From bec34fc8d7eec716c3f2136dc78c0d7eee050d7f Mon Sep 17 00:00:00 2001 From: Maxim Khitrov Date: Fri, 18 Apr 2014 20:31:57 -0400 Subject: [PATCH 001/515] Import flowcontrol package from Google Code --- flowcontrol/flowcontrol.go | 267 +++++++++++++++++++++++++++++++++++++ flowcontrol/io.go | 133 ++++++++++++++++++ flowcontrol/io_test.go | 146 ++++++++++++++++++++ flowcontrol/util.go | 67 ++++++++++ 4 files changed, 613 insertions(+) create mode 100644 flowcontrol/flowcontrol.go create mode 100644 flowcontrol/io.go create mode 100644 flowcontrol/io_test.go create mode 100644 flowcontrol/util.go diff --git a/flowcontrol/flowcontrol.go b/flowcontrol/flowcontrol.go new file mode 100644 index 000000000..40db5d89e --- /dev/null +++ b/flowcontrol/flowcontrol.go @@ -0,0 +1,267 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowcontrol provides the tools for monitoring and limiting the +// transfer rate of an arbitrary data stream. +package flowcontrol + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/flowcontrol/io.go b/flowcontrol/io.go new file mode 100644 index 000000000..12a753ddf --- /dev/null +++ b/flowcontrol/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowcontrol + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowcontrol: transfer rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/flowcontrol/io_test.go b/flowcontrol/io_test.go new file mode 100644 index 000000000..318069366 --- /dev/null +++ b/flowcontrol/io_test.go @@ -0,0 +1,146 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowcontrol + +import ( + "bytes" + "reflect" + "testing" + "time" +) + +const ( + _50ms = 50 * time.Millisecond + _100ms = 100 * time.Millisecond + _200ms = 200 * time.Millisecond + _300ms = 300 * time.Millisecond + _400ms = 400 * time.Millisecond + _500ms = 500 * time.Millisecond +) + +func nextStatus(m *Monitor) Status { + samples := m.samples + for i := 0; i < 30; i++ { + if s := m.Status(); s.Samples != samples { + return s + } + time.Sleep(5 * time.Millisecond) + } + return m.Status() +} + +func TestReader(t *testing.T) { + in := make([]byte, 100) + for i := range in { + in[i] = byte(i) + } + b := make([]byte, 100) + r := NewReader(bytes.NewReader(in), 100) + start := time.Now() + + // Make sure r implements Limiter + _ = Limiter(r) + + // 1st read of 10 bytes is performed immediately + if n, err := r.Read(b); n != 10 || err != nil { + t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + // No new Reads allowed in the current sample + r.SetBlocking(false) + if n, err := r.Read(b); n != 0 || err != nil { + t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + status := [6]Status{0: r.Status()} // No samples in the first status + + // 2nd read of 10 bytes blocks until the next sample + r.SetBlocking(true) + if n, err := r.Read(b[10:]); n != 10 || err != nil { + t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _100ms { + t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) + } + + status[1] = r.Status() // 1st sample + status[2] = nextStatus(r.Monitor) // 2nd sample + status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample + + if n := r.Done(); n != 20 { + t.Fatalf("r.Done() expected 20; got %v", n) + } + + status[4] = r.Status() + status[5] = nextStatus(r.Monitor) // Timeout + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + } + for i, s := range status { + if !reflect.DeepEqual(&s, &want[i]) { + t.Errorf("r.Status(%v) expected %v; got %v", i, want[i], s) + } + } + if !bytes.Equal(b[:20], in[:20]) { + t.Errorf("r.Read() input doesn't match output") + } +} + +func TestWriter(t *testing.T) { + b := make([]byte, 100) + for i := range b { + b[i] = byte(i) + } + w := NewWriter(&bytes.Buffer{}, 200) + start := time.Now() + + // Make sure w implements Limiter + _ = Limiter(w) + + // Non-blocking 20-byte write for the first sample returns ErrLimit + w.SetBlocking(false) + if n, err := w.Write(b); n != 20 || err != ErrLimit { + t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("w.Write(b) took too long (%v)", rt) + } + + // Blocking 80-byte write + w.SetBlocking(true) + if n, err := w.Write(b[20:]); n != 80 || err != nil { + t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _400ms { + t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) + } + + w.SetTransferSize(100) + status := []Status{w.Status(), nextStatus(w.Monitor)} + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000}, + Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, + } + for i, s := range status { + if !reflect.DeepEqual(&s, &want[i]) { + t.Errorf("w.Status(%v) expected %v; got %v", i, want[i], s) + } + } + if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { + t.Errorf("w.Write() input doesn't match output") + } +} diff --git a/flowcontrol/util.go b/flowcontrol/util.go new file mode 100644 index 000000000..91efd8815 --- /dev/null +++ b/flowcontrol/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowcontrol + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return time.Unix(0, int64(czero+c)) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} From 10e10fd81dfce924fe792e730f08bf7d28d488cf Mon Sep 17 00:00:00 2001 From: Maxim Khitrov Date: Fri, 18 Apr 2014 20:32:46 -0400 Subject: [PATCH 002/515] Rename 'flowcontrol' directory --- {flowcontrol => flowrate}/flowcontrol.go | 0 {flowcontrol => flowrate}/io.go | 0 {flowcontrol => flowrate}/io_test.go | 0 {flowcontrol => flowrate}/util.go | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename {flowcontrol => flowrate}/flowcontrol.go (100%) rename {flowcontrol => flowrate}/io.go (100%) rename {flowcontrol => flowrate}/io_test.go (100%) rename {flowcontrol => flowrate}/util.go (100%) diff --git a/flowcontrol/flowcontrol.go b/flowrate/flowcontrol.go similarity index 100% rename from flowcontrol/flowcontrol.go rename to flowrate/flowcontrol.go diff --git a/flowcontrol/io.go b/flowrate/io.go similarity index 100% rename from flowcontrol/io.go rename to flowrate/io.go diff --git a/flowcontrol/io_test.go b/flowrate/io_test.go similarity index 100% rename from flowcontrol/io_test.go rename to flowrate/io_test.go diff --git a/flowcontrol/util.go b/flowrate/util.go similarity index 100% rename from flowcontrol/util.go rename to flowrate/util.go From 6a20af87667a64a2bd5d212abb67de93ba4b0ee4 Mon Sep 17 00:00:00 2001 From: Maxim Khitrov Date: Fri, 18 Apr 2014 20:41:01 -0400 Subject: [PATCH 003/515] Rename package to 'flowrate' --- flowrate/{flowcontrol.go => flowrate.go} | 6 +++--- flowrate/io.go | 4 ++-- flowrate/io_test.go | 2 +- flowrate/util.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) rename flowrate/{flowcontrol.go => flowrate.go} (98%) diff --git a/flowrate/flowcontrol.go b/flowrate/flowrate.go similarity index 98% rename from flowrate/flowcontrol.go rename to flowrate/flowrate.go index 40db5d89e..1b727721e 100644 --- a/flowrate/flowcontrol.go +++ b/flowrate/flowrate.go @@ -2,9 +2,9 @@ // Written by Maxim Khitrov (November 2012) // -// Package flowcontrol provides the tools for monitoring and limiting the -// transfer rate of an arbitrary data stream. -package flowcontrol +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate import ( "math" diff --git a/flowrate/io.go b/flowrate/io.go index 12a753ddf..fbe090972 100644 --- a/flowrate/io.go +++ b/flowrate/io.go @@ -2,7 +2,7 @@ // Written by Maxim Khitrov (November 2012) // -package flowcontrol +package flowrate import ( "errors" @@ -11,7 +11,7 @@ import ( // ErrLimit is returned by the Writer when a non-blocking write is short due to // the transfer rate limit. -var ErrLimit = errors.New("flowcontrol: transfer rate limit exceeded") +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") // Limiter is implemented by the Reader and Writer to provide a consistent // interface for monitoring and controlling data transfer. diff --git a/flowrate/io_test.go b/flowrate/io_test.go index 318069366..fa7f4b4ae 100644 --- a/flowrate/io_test.go +++ b/flowrate/io_test.go @@ -2,7 +2,7 @@ // Written by Maxim Khitrov (November 2012) // -package flowcontrol +package flowrate import ( "bytes" diff --git a/flowrate/util.go b/flowrate/util.go index 91efd8815..4caac583f 100644 --- a/flowrate/util.go +++ b/flowrate/util.go @@ -2,7 +2,7 @@ // Written by Maxim Khitrov (November 2012) // -package flowcontrol +package flowrate import ( "math" From 3524264adda6c527b002e4f2e7ab27e38cf27324 Mon Sep 17 00:00:00 2001 From: Maxim Khitrov Date: Fri, 18 Apr 2014 20:43:33 -0400 Subject: [PATCH 004/515] Add LICENSE and README files --- LICENSE | 29 +++++++++++++++++++++++++++++ README | 10 ++++++++++ 2 files changed, 39 insertions(+) create mode 100644 LICENSE create mode 100644 README diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..1b914e3ed --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + + * Neither the name of the go-imap project nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README b/README new file mode 100644 index 000000000..db428090c --- /dev/null +++ b/README @@ -0,0 +1,10 @@ +Data Flow Rate Control +====================== + +To download and install this package run: + +go get github.com/mxk/go-flowrate/flowrate + +The documentation is available at: + +http://godoc.org/github.com/mxk/go-flowrate/flowrate From cca7078d478f8520f85629ad7c68962d31ed7682 Mon Sep 17 00:00:00 2001 From: Maxim Khitrov Date: Fri, 18 Apr 2014 21:45:27 -0400 Subject: [PATCH 005/515] Fix project name in LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 1b914e3ed..e9f9f628b 100644 --- a/LICENSE +++ b/LICENSE @@ -12,7 +12,7 @@ met: documentation and/or other materials provided with the distribution. - * Neither the name of the go-imap project nor the names of its + * Neither the name of the go-flowrate project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. From 16372365c47cc9387d40325ea51ecef63e949c75 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 21 Oct 2015 12:15:19 -0700 Subject: [PATCH 006/515] First commit --- array.go | 5 + async.go | 15 +++ bit_array.go | 275 ++++++++++++++++++++++++++++++++++++++++++++++ bit_array_test.go | 120 ++++++++++++++++++++ byteslice.go | 44 ++++++++ cmap.go | 62 +++++++++++ colors.go | 84 ++++++++++++++ errors.go | 45 ++++++++ heap.go | 103 +++++++++++++++++ int.go | 55 ++++++++++ io.go | 75 +++++++++++++ math.go | 157 ++++++++++++++++++++++++++ os.go | 225 +++++++++++++++++++++++++++++++++++++ random.go | 145 ++++++++++++++++++++++++ repeat_timer.go | 72 ++++++++++++ service.go | 154 ++++++++++++++++++++++++++ string.go | 24 ++++ test/assert.go | 14 +++ test/mutate.go | 28 +++++ throttle_timer.go | 57 ++++++++++ word.go | 91 +++++++++++++++ 21 files changed, 1850 insertions(+) create mode 100644 array.go create mode 100644 async.go create mode 100644 bit_array.go create mode 100644 bit_array_test.go create mode 100644 byteslice.go create mode 100644 cmap.go create mode 100644 colors.go create mode 100644 errors.go create mode 100644 heap.go create mode 100644 int.go create mode 100644 io.go create mode 100644 math.go create mode 100644 os.go create mode 100644 random.go create mode 100644 repeat_timer.go create mode 100644 service.go create mode 100644 string.go create mode 100644 test/assert.go create mode 100644 test/mutate.go create mode 100644 throttle_timer.go create mode 100644 word.go diff --git a/array.go b/array.go new file mode 100644 index 000000000..adedc42be --- /dev/null +++ b/array.go @@ -0,0 +1,5 @@ +package common + +func Arr(items ...interface{}) []interface{} { + return items +} diff --git a/async.go b/async.go new file mode 100644 index 000000000..1d302c344 --- /dev/null +++ b/async.go @@ -0,0 +1,15 @@ +package common + +import "sync" + +func Parallel(tasks ...func()) { + var wg sync.WaitGroup + wg.Add(len(tasks)) + for _, task := range tasks { + go func(task func()) { + task() + wg.Done() + }(task) + } + wg.Wait() +} diff --git a/bit_array.go b/bit_array.go new file mode 100644 index 000000000..dc006f0eb --- /dev/null +++ b/bit_array.go @@ -0,0 +1,275 @@ +package common + +import ( + "fmt" + "math/rand" + "strings" + "sync" +) + +type BitArray struct { + mtx sync.Mutex + Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported + Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported +} + +// There is no BitArray whose Size is 0. Use nil instead. +func NewBitArray(bits int) *BitArray { + if bits == 0 { + return nil + } + return &BitArray{ + Bits: bits, + Elems: make([]uint64, (bits+63)/64), + } +} + +func (bA *BitArray) Size() int { + if bA == nil { + return 0 + } + return bA.Bits +} + +// NOTE: behavior is undefined if i >= bA.Bits +func (bA *BitArray) GetIndex(i int) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.getIndex(i) +} + +func (bA *BitArray) getIndex(i int) bool { + if i >= bA.Bits { + return false + } + return bA.Elems[i/64]&(uint64(1)< 0 +} + +// NOTE: behavior is undefined if i >= bA.Bits +func (bA *BitArray) SetIndex(i int, v bool) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.setIndex(i, v) +} + +func (bA *BitArray) setIndex(i int, v bool) bool { + if i >= bA.Bits { + return false + } + if v { + bA.Elems[i/64] |= (uint64(1) << uint(i%64)) + } else { + bA.Elems[i/64] &= ^(uint64(1) << uint(i%64)) + } + return true +} + +func (bA *BitArray) Copy() *BitArray { + if bA == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.copy() +} + +func (bA *BitArray) copy() *BitArray { + c := make([]uint64, len(bA.Elems)) + copy(c, bA.Elems) + return &BitArray{ + Bits: bA.Bits, + Elems: c, + } +} + +func (bA *BitArray) copyBits(bits int) *BitArray { + c := make([]uint64, (bits+63)/64) + copy(c, bA.Elems) + return &BitArray{ + Bits: bits, + Elems: c, + } +} + +// Returns a BitArray of larger bits size. +func (bA *BitArray) Or(o *BitArray) *BitArray { + if bA == nil { + o.Copy() + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] |= o.Elems[i] + } + return c +} + +// Returns a BitArray of smaller bit size. +func (bA *BitArray) And(o *BitArray) *BitArray { + if bA == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.and(o) +} + +func (bA *BitArray) and(o *BitArray) *BitArray { + c := bA.copyBits(MinInt(bA.Bits, o.Bits)) + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] &= o.Elems[i] + } + return c +} + +func (bA *BitArray) Not() *BitArray { + if bA == nil { + return nil // Degenerate + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + c := bA.copy() + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] = ^c.Elems[i] + } + return c +} + +func (bA *BitArray) Sub(o *BitArray) *BitArray { + if bA == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + if bA.Bits > o.Bits { + c := bA.copy() + for i := 0; i < len(o.Elems)-1; i++ { + c.Elems[i] &= ^c.Elems[i] + } + i := len(o.Elems) - 1 + if i >= 0 { + for idx := i * 64; idx < o.Bits; idx++ { + // NOTE: each individual GetIndex() call to o is safe. + c.setIndex(idx, c.getIndex(idx) && !o.GetIndex(idx)) + } + } + return c + } else { + return bA.and(o.Not()) // Note degenerate case where o == nil + } +} + +func (bA *BitArray) IsFull() bool { + if bA == nil { + return true + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + + // Check all elements except the last + for _, elem := range bA.Elems[:len(bA.Elems)-1] { + if (^elem) != 0 { + return false + } + } + + // Check that the last element has (lastElemBits) 1's + lastElemBits := (bA.Bits+63)%64 + 1 + lastElem := bA.Elems[len(bA.Elems)-1] + return (lastElem+1)&((uint64(1)< 0 { + randBitStart := rand.Intn(64) + for j := 0; j < 64; j++ { + bitIdx := ((j + randBitStart) % 64) + if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { + return 64*elemIdx + bitIdx, true + } + } + PanicSanity("should not happen") + } + } else { + // Special case for last elem, to ignore straggler bits + elemBits := bA.Bits % 64 + if elemBits == 0 { + elemBits = 64 + } + randBitStart := rand.Intn(elemBits) + for j := 0; j < elemBits; j++ { + bitIdx := ((j + randBitStart) % elemBits) + if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { + return 64*elemIdx + bitIdx, true + } + } + } + } + return 0, false +} + +func (bA *BitArray) String() string { + if bA == nil { + return "nil-BitArray" + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.stringIndented("") +} + +func (bA *BitArray) StringIndented(indent string) string { + if bA == nil { + return "nil-BitArray" + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.stringIndented(indent) +} + +func (bA *BitArray) stringIndented(indent string) string { + + lines := []string{} + bits := "" + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += "X" + } else { + bits += "_" + } + if i%100 == 99 { + lines = append(lines, bits) + bits = "" + } + if i%10 == 9 { + bits += " " + } + if i%50 == 49 { + bits += " " + } + } + if len(bits) > 0 { + lines = append(lines, bits) + } + return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent)) +} diff --git a/bit_array_test.go b/bit_array_test.go new file mode 100644 index 000000000..93274aab0 --- /dev/null +++ b/bit_array_test.go @@ -0,0 +1,120 @@ +package common + +import ( + "testing" +) + +func randBitArray(bits int) (*BitArray, []byte) { + src := RandBytes((bits + 7) / 8) + bA := NewBitArray(bits) + for i := 0; i < len(src); i++ { + for j := 0; j < 8; j++ { + if i*8+j >= bits { + return bA, src + } + setBit := src[i]&(1< 0 + bA.SetIndex(i*8+j, setBit) + } + } + return bA, src +} + +func TestAnd(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.And(bA2) + + if bA3.Bits != 31 { + t.Error("Expected min bits", bA3.Bits) + } + if len(bA3.Elems) != len(bA2.Elems) { + t.Error("Expected min elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) && bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestOr(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.Or(bA2) + + if bA3.Bits != 51 { + t.Error("Expected max bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected max elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) || bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestSub1(t *testing.T) { + + bA1, _ := randBitArray(31) + bA2, _ := randBitArray(51) + bA3 := bA1.Sub(bA2) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if bA2.GetIndex(i) { + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestSub2(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.Sub(bA2) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if i < bA2.Bits && bA2.GetIndex(i) { + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3") + } + } +} + +func TestPickRandom(t *testing.T) { + for idx := 0; idx < 123; idx++ { + bA1 := NewBitArray(123) + bA1.SetIndex(idx, true) + index, ok := bA1.PickRandom() + if !ok { + t.Fatal("Expected to pick element but got none") + } + if index != idx { + t.Fatalf("Expected to pick element at %v but got wrong index", idx) + } + } +} diff --git a/byteslice.go b/byteslice.go new file mode 100644 index 000000000..be828f065 --- /dev/null +++ b/byteslice.go @@ -0,0 +1,44 @@ +package common + +import ( + "bytes" +) + +func Fingerprint(slice []byte) []byte { + fingerprint := make([]byte, 6) + copy(fingerprint, slice) + return fingerprint +} + +func IsZeros(slice []byte) bool { + for _, byt := range slice { + if byt != byte(0) { + return false + } + } + return true +} + +func RightPadBytes(slice []byte, l int) []byte { + if l < len(slice) { + return slice + } + padded := make([]byte, l) + copy(padded[0:len(slice)], slice) + return padded +} + +func LeftPadBytes(slice []byte, l int) []byte { + if l < len(slice) { + return slice + } + padded := make([]byte, l) + copy(padded[l-len(slice):], slice) + return padded +} + +func TrimmedString(b []byte) string { + trimSet := string([]byte{0}) + return string(bytes.TrimLeft(b, trimSet)) + +} diff --git a/cmap.go b/cmap.go new file mode 100644 index 000000000..5de6fa2fa --- /dev/null +++ b/cmap.go @@ -0,0 +1,62 @@ +package common + +import "sync" + +// CMap is a goroutine-safe map +type CMap struct { + m map[string]interface{} + l sync.Mutex +} + +func NewCMap() *CMap { + return &CMap{ + m: make(map[string]interface{}, 0), + } +} + +func (cm *CMap) Set(key string, value interface{}) { + cm.l.Lock() + defer cm.l.Unlock() + cm.m[key] = value +} + +func (cm *CMap) Get(key string) interface{} { + cm.l.Lock() + defer cm.l.Unlock() + return cm.m[key] +} + +func (cm *CMap) Has(key string) bool { + cm.l.Lock() + defer cm.l.Unlock() + _, ok := cm.m[key] + return ok +} + +func (cm *CMap) Delete(key string) { + cm.l.Lock() + defer cm.l.Unlock() + delete(cm.m, key) +} + +func (cm *CMap) Size() int { + cm.l.Lock() + defer cm.l.Unlock() + return len(cm.m) +} + +func (cm *CMap) Clear() { + cm.l.Lock() + defer cm.l.Unlock() + cm.m = make(map[string]interface{}, 0) +} + +func (cm *CMap) Values() []interface{} { + cm.l.Lock() + defer cm.l.Unlock() + items := []interface{}{} + for _, v := range cm.m { + items = append(items, v) + } + return items +} diff --git a/colors.go b/colors.go new file mode 100644 index 000000000..776b22e2e --- /dev/null +++ b/colors.go @@ -0,0 +1,84 @@ +package common + +import ( + "fmt" + "strings" +) + +const ( + ANSIReset = "\x1b[0m" + ANSIBright = "\x1b[1m" + ANSIDim = "\x1b[2m" + ANSIUnderscore = "\x1b[4m" + ANSIBlink = "\x1b[5m" + ANSIReverse = "\x1b[7m" + ANSIHidden = "\x1b[8m" + + ANSIFgBlack = "\x1b[30m" + ANSIFgRed = "\x1b[31m" + ANSIFgGreen = "\x1b[32m" + ANSIFgYellow = "\x1b[33m" + ANSIFgBlue = "\x1b[34m" + ANSIFgMagenta = "\x1b[35m" + ANSIFgCyan = "\x1b[36m" + ANSIFgWhite = "\x1b[37m" + + ANSIBgBlack = "\x1b[40m" + ANSIBgRed = "\x1b[41m" + ANSIBgGreen = "\x1b[42m" + ANSIBgYellow = "\x1b[43m" + ANSIBgBlue = "\x1b[44m" + ANSIBgMagenta = "\x1b[45m" + ANSIBgCyan = "\x1b[46m" + ANSIBgWhite = "\x1b[47m" +) + +// color the string s with color 'color' +// unless s is already colored +func treat(s string, color string) string { + if len(s) > 2 && s[:2] == "\x1b[" { + return s + } else { + return color + s + ANSIReset + } +} + +func treatAll(color string, args ...interface{}) string { + var parts []string + for _, arg := range args { + parts = append(parts, treat(fmt.Sprintf("%v", arg), color)) + } + return strings.Join(parts, "") +} + +func Black(args ...interface{}) string { + return treatAll(ANSIFgBlack, args...) +} + +func Red(args ...interface{}) string { + return treatAll(ANSIFgRed, args...) +} + +func Green(args ...interface{}) string { + return treatAll(ANSIFgGreen, args...) +} + +func Yellow(args ...interface{}) string { + return treatAll(ANSIFgYellow, args...) +} + +func Blue(args ...interface{}) string { + return treatAll(ANSIFgBlue, args...) +} + +func Magenta(args ...interface{}) string { + return treatAll(ANSIFgMagenta, args...) +} + +func Cyan(args ...interface{}) string { + return treatAll(ANSIFgCyan, args...) +} + +func White(args ...interface{}) string { + return treatAll(ANSIFgWhite, args...) +} diff --git a/errors.go b/errors.go new file mode 100644 index 000000000..e168a75b7 --- /dev/null +++ b/errors.go @@ -0,0 +1,45 @@ +package common + +import ( + "fmt" +) + +type StackError struct { + Err interface{} + Stack []byte +} + +func (se StackError) String() string { + return fmt.Sprintf("Error: %v\nStack: %s", se.Err, se.Stack) +} + +func (se StackError) Error() string { + return se.String() +} + +//-------------------------------------------------------------------------------------------------- +// panic wrappers + +// A panic resulting from a sanity check means there is a programmer error +// and some gaurantee is not satisfied. +func PanicSanity(v interface{}) { + panic(Fmt("Paniced on a Sanity Check: %v", v)) +} + +// A panic here means something has gone horribly wrong, in the form of data corruption or +// failure of the operating system. In a correct/healthy system, these should never fire. +// If they do, it's indicative of a much more serious problem. +func PanicCrisis(v interface{}) { + panic(Fmt("Paniced on a Crisis: %v", v)) +} + +// Indicates a failure of consensus. Someone was malicious or something has +// gone horribly wrong. These should really boot us into an "emergency-recover" mode +func PanicConsensus(v interface{}) { + panic(Fmt("Paniced on a Consensus Failure: %v", v)) +} + +// For those times when we're not sure if we should panic +func PanicQ(v interface{}) { + panic(Fmt("Paniced questionably: %v", v)) +} diff --git a/heap.go b/heap.go new file mode 100644 index 000000000..4a96d7aaa --- /dev/null +++ b/heap.go @@ -0,0 +1,103 @@ +package common + +import ( + "container/heap" +) + +type Comparable interface { + Less(o interface{}) bool +} + +//----------------------------------------------------------------------------- + +/* +Example usage: + h := NewHeap() + + h.Push(String("msg1"), 1) + h.Push(String("msg3"), 3) + h.Push(String("msg2"), 2) + + fmt.Println(h.Pop()) + fmt.Println(h.Pop()) + fmt.Println(h.Pop()) +*/ + +type Heap struct { + pq priorityQueue +} + +func NewHeap() *Heap { + return &Heap{pq: make([]*pqItem, 0)} +} + +func (h *Heap) Len() int64 { + return int64(len(h.pq)) +} + +func (h *Heap) Push(value interface{}, priority Comparable) { + heap.Push(&h.pq, &pqItem{value: value, priority: priority}) +} + +func (h *Heap) Peek() interface{} { + if len(h.pq) == 0 { + return nil + } + return h.pq[0].value +} + +func (h *Heap) Update(value interface{}, priority Comparable) { + h.pq.Update(h.pq[0], value, priority) +} + +func (h *Heap) Pop() interface{} { + item := heap.Pop(&h.pq).(*pqItem) + return item.value +} + +//----------------------------------------------------------------------------- + +/////////////////////// +// From: http://golang.org/pkg/container/heap/#example__priorityQueue + +type pqItem struct { + value interface{} + priority Comparable + index int +} + +type priorityQueue []*pqItem + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + return pq[i].priority.Less(pq[j].priority) +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*pqItem) + item.index = n + *pq = append(*pq, item) +} + +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) { + item.value = value + item.priority = priority + heap.Fix(pq, item.index) +} diff --git a/int.go b/int.go new file mode 100644 index 000000000..50e86a072 --- /dev/null +++ b/int.go @@ -0,0 +1,55 @@ +package common + +import ( + "encoding/binary" + "sort" +) + +// Sort for []uint64 + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Uint64Slice) Sort() { sort.Sort(p) } + +func SearchUint64s(a []uint64, x uint64) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } + +//----------------------------------------------------------------------------- + +func PutUint64LE(dest []byte, i uint64) { + binary.LittleEndian.PutUint64(dest, i) +} + +func GetUint64LE(src []byte) uint64 { + return binary.LittleEndian.Uint64(src) +} + +func PutUint64BE(dest []byte, i uint64) { + binary.BigEndian.PutUint64(dest, i) +} + +func GetUint64BE(src []byte) uint64 { + return binary.BigEndian.Uint64(src) +} + +func PutInt64LE(dest []byte, i int64) { + binary.LittleEndian.PutUint64(dest, uint64(i)) +} + +func GetInt64LE(src []byte) int64 { + return int64(binary.LittleEndian.Uint64(src)) +} + +func PutInt64BE(dest []byte, i int64) { + binary.BigEndian.PutUint64(dest, uint64(i)) +} + +func GetInt64BE(src []byte) int64 { + return int64(binary.BigEndian.Uint64(src)) +} diff --git a/io.go b/io.go new file mode 100644 index 000000000..378c19fc6 --- /dev/null +++ b/io.go @@ -0,0 +1,75 @@ +package common + +import ( + "bytes" + "errors" + "io" +) + +type PrefixedReader struct { + Prefix []byte + reader io.Reader +} + +func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader { + return &PrefixedReader{prefix, reader} +} + +func (pr *PrefixedReader) Read(p []byte) (n int, err error) { + if len(pr.Prefix) > 0 { + read := copy(p, pr.Prefix) + pr.Prefix = pr.Prefix[read:] + return read, nil + } else { + return pr.reader.Read(p) + } +} + +// NOTE: Not goroutine safe +type BufferCloser struct { + bytes.Buffer + Closed bool +} + +func NewBufferCloser(buf []byte) *BufferCloser { + return &BufferCloser{ + *bytes.NewBuffer(buf), + false, + } +} + +func (bc *BufferCloser) Close() error { + if bc.Closed { + return errors.New("BufferCloser already closed") + } + bc.Closed = true + return nil +} + +func (bc *BufferCloser) Write(p []byte) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.Write(p) +} + +func (bc *BufferCloser) WriteByte(c byte) error { + if bc.Closed { + return errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteByte(c) +} + +func (bc *BufferCloser) WriteRune(r rune) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteRune(r) +} + +func (bc *BufferCloser) WriteString(s string) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteString(s) +} diff --git a/math.go b/math.go new file mode 100644 index 000000000..b037d1a71 --- /dev/null +++ b/math.go @@ -0,0 +1,157 @@ +package common + +func MaxInt8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +func MaxUint8(a, b uint8) uint8 { + if a > b { + return a + } + return b +} + +func MaxInt16(a, b int16) int16 { + if a > b { + return a + } + return b +} + +func MaxUint16(a, b uint16) uint16 { + if a > b { + return a + } + return b +} + +func MaxInt32(a, b int32) int32 { + if a > b { + return a + } + return b +} + +func MaxUint32(a, b uint32) uint32 { + if a > b { + return a + } + return b +} + +func MaxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func MaxUint64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +func MaxUint(a, b uint) uint { + if a > b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func MinInt8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func MinUint8(a, b uint8) uint8 { + if a < b { + return a + } + return b +} + +func MinInt16(a, b int16) int16 { + if a < b { + return a + } + return b +} + +func MinUint16(a, b uint16) uint16 { + if a < b { + return a + } + return b +} + +func MinInt32(a, b int32) int32 { + if a < b { + return a + } + return b +} + +func MinUint32(a, b uint32) uint32 { + if a < b { + return a + } + return b +} + +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func MinUint64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +func MinUint(a, b uint) uint { + if a < b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func ExpUint64(a, b uint64) uint64 { + accum := uint64(1) + for b > 0 { + if b&1 == 1 { + accum *= a + } + a *= a + b >>= 1 + } + return accum +} diff --git a/os.go b/os.go new file mode 100644 index 000000000..170c6f82a --- /dev/null +++ b/os.go @@ -0,0 +1,225 @@ +package common + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/signal" + "strings" + "sync" + "time" +) + +var ( + GoPath = os.Getenv("GOPATH") +) + +func TrapSignal(cb func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + signal.Notify(c, os.Kill) + go func() { + for sig := range c { + fmt.Printf("captured %v, exiting...\n", sig) + if cb != nil { + cb() + } + os.Exit(1) + } + }() + select {} +} + +func Exit(s string) { + fmt.Printf(s + "\n") + os.Exit(1) +} + +func EnsureDir(dir string) error { + if _, err := os.Stat(dir); os.IsNotExist(err) { + err := os.MkdirAll(dir, 0700) + if err != nil { + return fmt.Errorf("Could not create directory %v. %v", dir, err) + } + } + return nil +} + +func FileExists(filePath string) bool { + _, err := os.Stat(filePath) + return !os.IsNotExist(err) +} + +func ReadFile(filePath string) ([]byte, error) { + return ioutil.ReadFile(filePath) +} + +func MustReadFile(filePath string) []byte { + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + Exit(Fmt("MustReadFile failed: %v", err)) + return nil + } + return fileBytes +} + +func WriteFile(filePath string, contents []byte) error { + err := ioutil.WriteFile(filePath, contents, 0600) + if err != nil { + return err + } + // fmt.Printf("File written to %v.\n", filePath) + return nil +} + +func MustWriteFile(filePath string, contents []byte) { + err := WriteFile(filePath, contents) + if err != nil { + Exit(Fmt("MustWriteFile failed: %v", err)) + } +} + +// Writes to newBytes to filePath. +// Guaranteed not to lose *both* oldBytes and newBytes, +// (assuming that the OS is perfect) +func WriteFileAtomic(filePath string, newBytes []byte) error { + // If a file already exists there, copy to filePath+".bak" (overwrite anything) + if _, err := os.Stat(filePath); !os.IsNotExist(err) { + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + return fmt.Errorf("Could not read file %v. %v", filePath, err) + } + err = ioutil.WriteFile(filePath+".bak", fileBytes, 0600) + if err != nil { + return fmt.Errorf("Could not write file %v. %v", filePath+".bak", err) + } + } + // Write newBytes to filePath.new + err := ioutil.WriteFile(filePath+".new", newBytes, 0600) + if err != nil { + return fmt.Errorf("Could not write file %v. %v", filePath+".new", err) + } + // Move filePath.new to filePath + err = os.Rename(filePath+".new", filePath) + return err +} + +//-------------------------------------------------------------------------------- + +/* AutoFile usage + +// Create/Append to ./autofile_test +af, err := OpenAutoFile("autofile_test") +if err != nil { + panic(err) +} + +// Stream of writes. +// During this time, the file may be moved e.g. by logRotate. +for i := 0; i < 60; i++ { + af.Write([]byte(Fmt("LOOP(%v)", i))) + time.Sleep(time.Second) +} + +// Close the AutoFile +err = af.Close() +if err != nil { + panic(err) +} +*/ + +const autoFileOpenDuration = 1000 * time.Millisecond + +// Automatically closes and re-opens file for writing. +// This is useful for using a log file with the logrotate tool. +type AutoFile struct { + Path string + ticker *time.Ticker + mtx sync.Mutex + file *os.File +} + +func OpenAutoFile(path string) (af *AutoFile, err error) { + af = &AutoFile{ + Path: path, + ticker: time.NewTicker(autoFileOpenDuration), + } + if err = af.openFile(); err != nil { + return + } + go af.processTicks() + return +} + +func (af *AutoFile) Close() error { + af.ticker.Stop() + af.mtx.Lock() + err := af.closeFile() + af.mtx.Unlock() + return err +} + +func (af *AutoFile) processTicks() { + for { + _, ok := <-af.ticker.C + if !ok { + return // Done. + } + af.mtx.Lock() + af.closeFile() + af.mtx.Unlock() + } +} + +func (af *AutoFile) closeFile() (err error) { + file := af.file + if file == nil { + return nil + } + af.file = nil + return file.Close() +} + +func (af *AutoFile) Write(b []byte) (n int, err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + if af.file == nil { + if err = af.openFile(); err != nil { + return + } + } + return af.file.Write(b) +} + +func (af *AutoFile) openFile() error { + file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + return err + } + af.file = file + return nil +} + +func Tempfile(prefix string) (*os.File, string) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + PanicCrisis(err) + } + return file, file.Name() +} + +func Prompt(prompt string, defaultValue string) (string, error) { + fmt.Print(prompt) + reader := bufio.NewReader(os.Stdin) + line, err := reader.ReadString('\n') + if err != nil { + return defaultValue, err + } else { + line = strings.TrimSpace(line) + if line == "" { + return defaultValue, nil + } + return line, nil + } +} diff --git a/random.go b/random.go new file mode 100644 index 000000000..645601154 --- /dev/null +++ b/random.go @@ -0,0 +1,145 @@ +package common + +import ( + crand "crypto/rand" + "encoding/hex" + "math/rand" + "time" +) + +const ( + strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters +) + +func init() { + // Seed math/rand with "secure" int64 + b := CRandBytes(8) + var seed uint64 + for i := 0; i < 8; i++ { + seed |= uint64(b[i]) + seed <<= 8 + } + rand.Seed(int64(seed)) +} + +// Constructs an alphanumeric string of given length. +func RandStr(length int) string { + chars := []byte{} +MAIN_LOOP: + for { + val := rand.Int63() + for i := 0; i < 10; i++ { + v := int(val & 0x3f) // rightmost 6 bits + if v >= 62 { // only 62 characters in strChars + val >>= 6 + continue + } else { + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 + } + } + } + + return string(chars) +} + +func RandUint16() uint16 { + return uint16(rand.Uint32() & (1<<16 - 1)) +} + +func RandUint32() uint32 { + return rand.Uint32() +} + +func RandUint64() uint64 { + return uint64(rand.Uint32())<<32 + uint64(rand.Uint32()) +} + +func RandUint() uint { + return uint(rand.Int()) +} + +func RandInt16() int16 { + return int16(rand.Uint32() & (1<<16 - 1)) +} + +func RandInt32() int32 { + return int32(rand.Uint32()) +} + +func RandInt64() int64 { + return int64(rand.Uint32())<<32 + int64(rand.Uint32()) +} + +func RandInt() int { + return rand.Int() +} + +// Distributed pseudo-exponentially to test for various cases +func RandUint16Exp() uint16 { + bits := rand.Uint32() % 16 + if bits == 0 { + return 0 + } + n := uint16(1 << (bits - 1)) + n += uint16(rand.Int31()) & ((1 << (bits - 1)) - 1) + return n +} + +// Distributed pseudo-exponentially to test for various cases +func RandUint32Exp() uint32 { + bits := rand.Uint32() % 32 + if bits == 0 { + return 0 + } + n := uint32(1 << (bits - 1)) + n += uint32(rand.Int31()) & ((1 << (bits - 1)) - 1) + return n +} + +// Distributed pseudo-exponentially to test for various cases +func RandUint64Exp() uint64 { + bits := rand.Uint32() % 64 + if bits == 0 { + return 0 + } + n := uint64(1 << (bits - 1)) + n += uint64(rand.Int63()) & ((1 << (bits - 1)) - 1) + return n +} + +func RandFloat32() float32 { + return rand.Float32() +} + +func RandTime() time.Time { + return time.Unix(int64(RandUint64Exp()), 0) +} + +func RandBytes(n int) []byte { + bs := make([]byte, n) + for i := 0; i < n; i++ { + bs[i] = byte(rand.Intn(256)) + } + return bs +} + +//----------------------------------------------------------------------------- +// CRand* methods are crypto safe. + +func CRandBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + PanicCrisis(err) + } + return b +} + +// RandHex(24) gives 96 bits of randomness, strong enough for most purposes. +func CRandHex(numDigits int) string { + return hex.EncodeToString(CRandBytes(numDigits / 2)) +} diff --git a/repeat_timer.go b/repeat_timer.go new file mode 100644 index 000000000..e2aa18ea8 --- /dev/null +++ b/repeat_timer.go @@ -0,0 +1,72 @@ +package common + +import "time" +import "sync" + +/* +RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period. +It's good for keeping connections alive. +A RepeatTimer must be Stop()'d or it will keep a goroutine alive. +*/ +type RepeatTimer struct { + Ch chan time.Time + + mtx sync.Mutex + name string + ticker *time.Ticker + quit chan struct{} + dur time.Duration +} + +func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { + var t = &RepeatTimer{ + Ch: make(chan time.Time), + ticker: time.NewTicker(dur), + quit: make(chan struct{}), + name: name, + dur: dur, + } + go t.fireRoutine(t.ticker) + return t +} + +func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { + for { + select { + case t_ := <-ticker.C: + t.Ch <- t_ + case <-t.quit: + return + } + } +} + +// Wait the duration again before firing. +func (t *RepeatTimer) Reset() { + t.Stop() + + t.mtx.Lock() // Lock + defer t.mtx.Unlock() + + t.ticker = time.NewTicker(t.dur) + t.quit = make(chan struct{}) + go t.fireRoutine(t.ticker) +} + +// For ease of .Stop()'ing services before .Start()'ing them, +// we ignore .Stop()'s on nil RepeatTimers. +func (t *RepeatTimer) Stop() bool { + if t == nil { + return false + } + t.mtx.Lock() // Lock + defer t.mtx.Unlock() + + exists := t.ticker != nil + if exists { + t.ticker.Stop() + t.ticker = nil + close(t.quit) + } + return exists +} diff --git a/service.go b/service.go new file mode 100644 index 000000000..05b2adedd --- /dev/null +++ b/service.go @@ -0,0 +1,154 @@ +/* + +Classical-inheritance-style service declarations. +Services can be started, then stopped. +Users can override the OnStart/OnStop methods. +These methods are guaranteed to be called at most once. +Caller must ensure that Start() and Stop() are not called concurrently. +It is ok to call Stop() without calling Start() first. +Services cannot be re-started unless otherwise documented. + +Typical usage: + +type FooService struct { + BaseService + // private fields +} + +func NewFooService() *FooService { + fs := &FooService{ + // init + } + fs.BaseService = *NewBaseService(log, "FooService", fs) + return fs +} + +func (fs *FooService) OnStart() error { + fs.BaseService.OnStart() // Always call the overridden method. + // initialize private fields + // start subroutines, etc. +} + +func (fs *FooService) OnStop() error { + fs.BaseService.OnStop() // Always call the overridden method. + // close/destroy private fields + // stop subroutines, etc. +} + +*/ +package common + +import "sync/atomic" +import "github.com/tendermint/tendermint/Godeps/_workspace/src/github.com/tendermint/log15" + +type Service interface { + Start() (bool, error) + OnStart() error + + Stop() bool + OnStop() + + IsRunning() bool + + String() string +} + +type BaseService struct { + log log15.Logger + name string + started uint32 // atomic + stopped uint32 // atomic + + // The "subclass" of BaseService + impl Service +} + +func NewBaseService(log log15.Logger, name string, impl Service) *BaseService { + return &BaseService{ + log: log, + name: name, + impl: impl, + } +} + +// Implements Servce +func (bs *BaseService) Start() (bool, error) { + if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { + if atomic.LoadUint32(&bs.stopped) == 1 { + if bs.log != nil { + bs.log.Warn(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + } + return false, nil + } else { + if bs.log != nil { + bs.log.Notice(Fmt("Starting %v", bs.name), "impl", bs.impl) + } + } + err := bs.impl.OnStart() + return true, err + } else { + if bs.log != nil { + bs.log.Info(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) + } + return false, nil + } +} + +// Implements Service +func (bs *BaseService) OnStart() error { return nil } + +// Implements Service +func (bs *BaseService) Stop() bool { + if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { + if bs.log != nil { + bs.log.Notice(Fmt("Stopping %v", bs.name), "impl", bs.impl) + } + bs.impl.OnStop() + return true + } else { + if bs.log != nil { + bs.log.Notice(Fmt("Not stopping %v", bs.name), "impl", bs.impl) + } + return false + } +} + +// Implements Service +func (bs *BaseService) OnStop() {} + +// Implements Service +func (bs *BaseService) IsRunning() bool { + return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 +} + +// Implements Servce +func (bs *BaseService) String() string { + return bs.name +} + +//---------------------------------------- + +type QuitService struct { + BaseService + Quit chan struct{} +} + +func NewQuitService(log log15.Logger, name string, impl Service) *QuitService { + return &QuitService{ + BaseService: *NewBaseService(log, name, impl), + Quit: nil, + } +} + +// NOTE: when overriding OnStart, must call .QuitService.OnStart(). +func (qs *QuitService) OnStart() error { + qs.Quit = make(chan struct{}) + return nil +} + +// NOTE: when overriding OnStop, must call .QuitService.OnStop(). +func (qs *QuitService) OnStop() { + if qs.Quit != nil { + close(qs.Quit) + } +} diff --git a/string.go b/string.go new file mode 100644 index 000000000..a4d221b74 --- /dev/null +++ b/string.go @@ -0,0 +1,24 @@ +package common + +import ( + "fmt" + "strings" +) + +var Fmt = fmt.Sprintf + +func RightPadString(s string, totalLength int) string { + remaining := totalLength - len(s) + if remaining > 0 { + s = s + strings.Repeat(" ", remaining) + } + return s +} + +func LeftPadString(s string, totalLength int) string { + remaining := totalLength - len(s) + if remaining > 0 { + s = strings.Repeat(" ", remaining) + s + } + return s +} diff --git a/test/assert.go b/test/assert.go new file mode 100644 index 000000000..a6ffed0ce --- /dev/null +++ b/test/assert.go @@ -0,0 +1,14 @@ +package test + +import ( + "testing" +) + +func AssertPanics(t *testing.T, msg string, f func()) { + defer func() { + if err := recover(); err == nil { + t.Errorf("Should have panic'd, but didn't: %v", msg) + } + }() + f() +} diff --git a/test/mutate.go b/test/mutate.go new file mode 100644 index 000000000..39bf90557 --- /dev/null +++ b/test/mutate.go @@ -0,0 +1,28 @@ +package test + +import ( + . "github.com/tendermint/tendermint/common" +) + +// Contract: !bytes.Equal(input, output) && len(input) >= len(output) +func MutateByteSlice(bytez []byte) []byte { + // If bytez is empty, panic + if len(bytez) == 0 { + panic("Cannot mutate an empty bytez") + } + + // Copy bytez + mBytez := make([]byte, len(bytez)) + copy(mBytez, bytez) + bytez = mBytez + + // Try a random mutation + switch RandInt() % 2 { + case 0: // Mutate a single byte + bytez[RandInt()%len(bytez)] += byte(RandInt()%255 + 1) + case 1: // Remove an arbitrary byte + pos := RandInt() % len(bytez) + bytez = append(bytez[:pos], bytez[pos+1:]...) + } + return bytez +} diff --git a/throttle_timer.go b/throttle_timer.go new file mode 100644 index 000000000..0b40a60c2 --- /dev/null +++ b/throttle_timer.go @@ -0,0 +1,57 @@ +package common + +import ( + "sync/atomic" + "time" +) + +/* +ThrottleTimer fires an event at most "dur" after each .Set() call. +If a short burst of .Set() calls happens, ThrottleTimer fires once. +If a long continuous burst of .Set() calls happens, ThrottleTimer fires +at most once every "dur". +*/ +type ThrottleTimer struct { + Name string + Ch chan struct{} + quit chan struct{} + dur time.Duration + timer *time.Timer + isSet uint32 +} + +func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { + var ch = make(chan struct{}) + var quit = make(chan struct{}) + var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} + t.timer = time.AfterFunc(dur, t.fireRoutine) + t.timer.Stop() + return t +} + +func (t *ThrottleTimer) fireRoutine() { + select { + case t.Ch <- struct{}{}: + atomic.StoreUint32(&t.isSet, 0) + case <-t.quit: + // do nothing + default: + t.timer.Reset(t.dur) + } +} + +func (t *ThrottleTimer) Set() { + if atomic.CompareAndSwapUint32(&t.isSet, 0, 1) { + t.timer.Reset(t.dur) + } +} + +// For ease of .Stop()'ing services before .Start()'ing them, +// we ignore .Stop()'s on nil ThrottleTimers +func (t *ThrottleTimer) Stop() bool { + if t == nil { + return false + } + close(t.quit) + return t.timer.Stop() +} diff --git a/word.go b/word.go new file mode 100644 index 000000000..4072482b8 --- /dev/null +++ b/word.go @@ -0,0 +1,91 @@ +package common + +import ( + "bytes" + "sort" +) + +var ( + Zero256 = Word256{0} + One256 = Word256{1} +) + +type Word256 [32]byte + +func (w Word256) String() string { return string(w[:]) } +func (w Word256) TrimmedString() string { return TrimmedString(w.Bytes()) } +func (w Word256) Copy() Word256 { return w } +func (w Word256) Bytes() []byte { return w[:] } // copied. +func (w Word256) Prefix(n int) []byte { return w[:n] } +func (w Word256) Postfix(n int) []byte { return w[32-n:] } +func (w Word256) IsZero() bool { + accum := byte(0) + for _, byt := range w { + accum |= byt + } + return accum == 0 +} +func (w Word256) Compare(other Word256) int { + return bytes.Compare(w[:], other[:]) +} + +func Uint64ToWord256(i uint64) Word256 { + buf := [8]byte{} + PutUint64BE(buf[:], i) + return LeftPadWord256(buf[:]) +} + +func Int64ToWord256(i int64) Word256 { + buf := [8]byte{} + PutInt64BE(buf[:], i) + return LeftPadWord256(buf[:]) +} + +func RightPadWord256(bz []byte) (word Word256) { + copy(word[:], bz) + return +} + +func LeftPadWord256(bz []byte) (word Word256) { + copy(word[32-len(bz):], bz) + return +} + +func Uint64FromWord256(word Word256) uint64 { + buf := word.Postfix(8) + return GetUint64BE(buf) +} + +func Int64FromWord256(word Word256) int64 { + buf := word.Postfix(8) + return GetInt64BE(buf) +} + +//------------------------------------- + +type Tuple256 struct { + First Word256 + Second Word256 +} + +func (tuple Tuple256) Compare(other Tuple256) int { + firstCompare := tuple.First.Compare(other.First) + if firstCompare == 0 { + return tuple.Second.Compare(other.Second) + } else { + return firstCompare + } +} + +func Tuple256Split(t Tuple256) (Word256, Word256) { + return t.First, t.Second +} + +type Tuple256Slice []Tuple256 + +func (p Tuple256Slice) Len() int { return len(p) } +func (p Tuple256Slice) Less(i, j int) bool { + return p[i].Compare(p[j]) < 0 +} +func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Tuple256Slice) Sort() { sort.Sort(p) } From cfeac977fd6b5e9ed5d59863ac81374e02652582 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 21 Oct 2015 18:19:14 -0700 Subject: [PATCH 007/515] first commit --- LICENSE.md | 206 +++++++++++++++++++++++++++++++++++++++++++++++++++++ config.go | 14 ++++ log.go | 64 +++++++++++++++++ 3 files changed, 284 insertions(+) create mode 100644 LICENSE.md create mode 100644 config.go create mode 100644 log.go diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..ee1d394da --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,206 @@ +Tendermint Go-Logger +Copyright (C) 2015 Tendermint + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +//-------------------------------------------------------------------------------- + +GNU GENERAL PUBLIC LICENSE + +Version 3, 29 June 2007 + +Copyright © 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The GNU General Public License is a free, copyleft license for software and other kinds of works. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. + +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS + +0. Definitions. +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on the Program. + +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. + +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + +a) The work must carry prominent notices stating that you modified it, and giving a relevant date. +b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. +c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. +d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + +a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. +b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. +c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. +d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. +e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + +a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or +b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or +c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or +d) Limiting the use for publicity purposes of names of licensors or authors of the material; or +e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or +f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. + +14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS diff --git a/config.go b/config.go new file mode 100644 index 000000000..4083152a0 --- /dev/null +++ b/config.go @@ -0,0 +1,14 @@ +package logger + +import ( + cfg "github.com/tendermint/go-config" +) + +var config cfg.Config = nil + +func init() { + cfg.OnConfig(func(newConfig cfg.Config) { + config = newConfig + Reset() // reset log root upon config change. + }) +} diff --git a/log.go b/log.go new file mode 100644 index 000000000..104f7ccd8 --- /dev/null +++ b/log.go @@ -0,0 +1,64 @@ +package logger + +import ( + "os" + + . "github.com/tendermint/common" + "github.com/tendermint/log15" +) + +var rootHandler log15.Handler + +func init() { + Reset() +} + +// You might want to call this after resetting tendermint/go-config. +func Reset() { + + var logLevel string = "debug" + if config != nil { + logLevel = config.GetString("log_level") + } + + // stdout handler + //handlers := []log15.Handler{} + stdoutHandler := log15.LvlFilterHandler( + getLevel(logLevel), + log15.StreamHandler(os.Stdout, log15.TerminalFormat()), + ) + //handlers = append(handlers, stdoutHandler) + + // Set rootHandler. + //rootHandler = log15.MultiHandler(handlers...) + rootHandler = stdoutHandler + + // By setting handlers on the root, we handle events from all loggers. + log15.Root().SetHandler(rootHandler) +} + +// See binary/log for an example of usage. +func RootHandler() log15.Handler { + return rootHandler +} + +func New(ctx ...interface{}) log15.Logger { + return log15.Root().New(ctx...) +} + +func getLevel(lvlString string) log15.Lvl { + lvl, err := log15.LvlFromString(lvlString) + if err != nil { + Exit(Fmt("Invalid log level %v: %v", lvlString, err)) + } + return lvl +} + +//---------------------------------------- +// Exported from log15 + +var LvlFilterHandler = log15.LvlFilterHandler +var LvlDebug = log15.LvlDebug +var LvlInfo = log15.LvlInfo +var LvlWarn = log15.LvlWarn +var LvlError = log15.LvlError From 6b54e7b8d945347f8fd1bdf83a2e9843561abbc5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 21 Oct 2015 12:26:50 -0700 Subject: [PATCH 008/515] Added License --- LICENSE.md | 206 +++++++++++++++++++++++++++++++++++++++++++++++++ service.go | 7 +- test/mutate.go | 2 +- 3 files changed, 212 insertions(+), 3 deletions(-) create mode 100644 LICENSE.md diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..aaf0cf06d --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,206 @@ +Tendermint Go-Common +Copyright (C) 2015 Tendermint + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +//-------------------------------------------------------------------------------- + +GNU GENERAL PUBLIC LICENSE + +Version 3, 29 June 2007 + +Copyright © 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The GNU General Public License is a free, copyleft license for software and other kinds of works. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. + +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS + +0. Definitions. +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on the Program. + +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. + +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + +a) The work must carry prominent notices stating that you modified it, and giving a relevant date. +b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. +c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. +d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + +a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. +b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. +c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. +d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. +e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + +a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or +b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or +c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or +d) Limiting the use for publicity purposes of names of licensors or authors of the material; or +e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or +f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. + +14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS diff --git a/service.go b/service.go index 05b2adedd..ca923b1d5 100644 --- a/service.go +++ b/service.go @@ -38,8 +38,11 @@ func (fs *FooService) OnStop() error { */ package common -import "sync/atomic" -import "github.com/tendermint/tendermint/Godeps/_workspace/src/github.com/tendermint/log15" +import ( + "sync/atomic" + + "github.com/tendermint/log15" +) type Service interface { Start() (bool, error) diff --git a/test/mutate.go b/test/mutate.go index 39bf90557..629e9f865 100644 --- a/test/mutate.go +++ b/test/mutate.go @@ -1,7 +1,7 @@ package test import ( - . "github.com/tendermint/tendermint/common" + . "github.com/tendermint/go-common" ) // Contract: !bytes.Equal(input, output) && len(input) >= len(output) From 28d39f8726c76b163e881c3d05dad227c93200ae Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 22 Oct 2015 12:31:02 -0700 Subject: [PATCH 009/515] ... --- LICENSE.md | 206 ++++++++++++++++++++++++++++++++++++++++++++++++++++ config.go | 13 ++++ db.go | 50 +++++++++++++ level_db.go | 83 +++++++++++++++++++++ mem_db.go | 44 +++++++++++ 5 files changed, 396 insertions(+) create mode 100644 LICENSE.md create mode 100644 config.go create mode 100644 db.go create mode 100644 level_db.go create mode 100644 mem_db.go diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..25c3191e9 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,206 @@ +Tendermint Go-DB +Copyright (C) 2015 Tendermint + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +//-------------------------------------------------------------------------------- + +GNU GENERAL PUBLIC LICENSE + +Version 3, 29 June 2007 + +Copyright © 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The GNU General Public License is a free, copyleft license for software and other kinds of works. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. + +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS + +0. Definitions. +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on the Program. + +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. + +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + +a) The work must carry prominent notices stating that you modified it, and giving a relevant date. +b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. +c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. +d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + +a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. +b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. +c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. +d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. +e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + +a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or +b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or +c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or +d) Limiting the use for publicity purposes of names of licensors or authors of the material; or +e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or +f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. + +14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS diff --git a/config.go b/config.go new file mode 100644 index 000000000..da66c2158 --- /dev/null +++ b/config.go @@ -0,0 +1,13 @@ +package db + +import ( + cfg "github.com/tendermint/go-config" +) + +var config cfg.Config = nil + +func init() { + cfg.OnConfig(func(newConfig cfg.Config) { + config = newConfig + }) +} diff --git a/db.go b/db.go new file mode 100644 index 000000000..2d9c3d2b1 --- /dev/null +++ b/db.go @@ -0,0 +1,50 @@ +package db + +import ( + "path" + + . "github.com/tendermint/go-common" +) + +type DB interface { + Get([]byte) []byte + Set([]byte, []byte) + SetSync([]byte, []byte) + Delete([]byte) + DeleteSync([]byte) + Close() + + // For debugging + Print() +} + +//----------------------------------------------------------------------------- + +// Database types +const DBBackendMemDB = "memdb" +const DBBackendLevelDB = "leveldb" + +var dbs = NewCMap() + +func GetDB(name string) DB { + db := dbs.Get(name) + if db != nil { + return db.(DB) + } + switch config.GetString("db_backend") { + case DBBackendMemDB: + db := NewMemDB() + dbs.Set(name, db) + return db + case DBBackendLevelDB: + db, err := NewLevelDB(path.Join(config.GetString("db_dir"), name+".db")) + if err != nil { + PanicCrisis(err) + } + dbs.Set(name, db) + return db + default: + PanicSanity(Fmt("Unknown DB backend: %v", config.GetString("db_backend"))) + } + return nil +} diff --git a/level_db.go b/level_db.go new file mode 100644 index 000000000..dee57a321 --- /dev/null +++ b/level_db.go @@ -0,0 +1,83 @@ +package db + +import ( + "fmt" + "path" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + + . "github.com/tendermint/go-common" +) + +type LevelDB struct { + db *leveldb.DB +} + +func NewLevelDB(name string) (*LevelDB, error) { + dbPath := path.Join(name) + db, err := leveldb.OpenFile(dbPath, nil) + if err != nil { + return nil, err + } + database := &LevelDB{db: db} + return database, nil +} + +func (db *LevelDB) Get(key []byte) []byte { + res, err := db.db.Get(key, nil) + if err != nil { + if err == errors.ErrNotFound { + return nil + } else { + PanicCrisis(err) + } + } + return res +} + +func (db *LevelDB) Set(key []byte, value []byte) { + err := db.db.Put(key, value, nil) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB) SetSync(key []byte, value []byte) { + err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB) Delete(key []byte) { + err := db.db.Delete(key, nil) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB) DeleteSync(key []byte) { + err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB) DB() *leveldb.DB { + return db.db +} + +func (db *LevelDB) Close() { + db.db.Close() +} + +func (db *LevelDB) Print() { + iter := db.db.NewIterator(nil, nil) + for iter.Next() { + key := iter.Key() + value := iter.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} diff --git a/mem_db.go b/mem_db.go new file mode 100644 index 000000000..b7d8918d4 --- /dev/null +++ b/mem_db.go @@ -0,0 +1,44 @@ +package db + +import ( + "fmt" +) + +type MemDB struct { + db map[string][]byte +} + +func NewMemDB() *MemDB { + database := &MemDB{db: make(map[string][]byte)} + return database +} + +func (db *MemDB) Get(key []byte) []byte { + return db.db[string(key)] +} + +func (db *MemDB) Set(key []byte, value []byte) { + db.db[string(key)] = value +} + +func (db *MemDB) SetSync(key []byte, value []byte) { + db.db[string(key)] = value +} + +func (db *MemDB) Delete(key []byte) { + delete(db.db, string(key)) +} + +func (db *MemDB) DeleteSync(key []byte) { + delete(db.db, string(key)) +} + +func (db *MemDB) Close() { + db = nil +} + +func (db *MemDB) Print() { + for key, value := range db.db { + fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) + } +} From b072ed317354e6b507d6abde4c0cfbb516f31ab5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 22 Oct 2015 12:33:53 -0700 Subject: [PATCH 010/515] ... --- log.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/log.go b/log.go index 104f7ccd8..e616d0ac8 100644 --- a/log.go +++ b/log.go @@ -3,7 +3,7 @@ package logger import ( "os" - . "github.com/tendermint/common" + . "github.com/tendermint/go-common" "github.com/tendermint/log15" ) From db8763068922fd08bc9cd863ceeb6f5b60ddae7d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 5 Nov 2015 10:54:58 -0800 Subject: [PATCH 011/515] initial commit --- LICENSE.md | 206 +++++++++++++++++++++++++++++++++++++++++++++++++++++ process.go | 75 +++++++++++++++++++ 2 files changed, 281 insertions(+) create mode 100644 LICENSE.md create mode 100644 process.go diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..3f811edc1 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,206 @@ +Tendermint Go-Process +Copyright (C) 2015 Tendermint + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +//-------------------------------------------------------------------------------- + +GNU GENERAL PUBLIC LICENSE + +Version 3, 29 June 2007 + +Copyright © 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The GNU General Public License is a free, copyleft license for software and other kinds of works. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. + +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS + +0. Definitions. +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on the Program. + +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. + +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + +a) The work must carry prominent notices stating that you modified it, and giving a relevant date. +b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. +c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. +d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + +a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. +b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. +c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. +d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. +e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + +a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or +b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or +c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or +d) Limiting the use for publicity purposes of names of licensors or authors of the material; or +e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or +f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. + +14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS diff --git a/process.go b/process.go new file mode 100644 index 000000000..88497a98a --- /dev/null +++ b/process.go @@ -0,0 +1,75 @@ +package process + +import ( + "fmt" + "io" + "os" + "os/exec" + "time" +) + +type Process struct { + Label string + ExecPath string + Args []string + Pid int + StartTime time.Time + EndTime time.Time + Cmd *exec.Cmd `json:"-"` + ExitState *os.ProcessState `json:"-"` + InputFile io.Reader `json:"-"` + OutputFile io.WriteCloser `json:"-"` + WaitCh chan struct{} `json:"-"` +} + +// execPath: command name +// args: args to command. (should not include name) +func StartProcess(label string, execPath string, args []string, inFile io.Reader, outFile io.WriteCloser) (*Process, error) { + cmd := exec.Command(execPath, args...) + cmd.Stdout = outFile + cmd.Stderr = outFile + cmd.Stdin = inFile + if err := cmd.Start(); err != nil { + return nil, err + } + proc := &Process{ + Label: label, + ExecPath: execPath, + Args: args, + Pid: cmd.Process.Pid, + StartTime: time.Now(), + Cmd: cmd, + ExitState: nil, + InputFile: inFile, + OutputFile: outFile, + WaitCh: make(chan struct{}), + } + go func() { + err := proc.Cmd.Wait() + if err != nil { + fmt.Printf("Process exit: %v\n", err) + if exitError, ok := err.(*exec.ExitError); ok { + proc.ExitState = exitError.ProcessState + } + } + proc.ExitState = proc.Cmd.ProcessState + proc.EndTime = time.Now() // TODO make this goroutine-safe + err = proc.OutputFile.Close() + if err != nil { + fmt.Printf("Error closing output file for %v: %v\n", proc.Label, err) + } + close(proc.WaitCh) + }() + return proc, nil +} + +func (proc *Process) StopProcess() error { + defer proc.OutputFile.Close() + if kill { + fmt.Printf("Killing process %v\n", proc.Cmd.Process) + return proc.Cmd.Process.Kill() + } else { + fmt.Printf("Stopping process %v\n", proc.Cmd.Process) + return proc.Cmd.Process.Signal(os.Interrupt) + } +} From 70b9a368caee165dd0c0754448f9bcb664014bbf Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 5 Nov 2015 14:21:33 -0800 Subject: [PATCH 012/515] ... --- process.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process.go b/process.go index 88497a98a..5560aaa43 100644 --- a/process.go +++ b/process.go @@ -63,7 +63,7 @@ func StartProcess(label string, execPath string, args []string, inFile io.Reader return proc, nil } -func (proc *Process) StopProcess() error { +func (proc *Process) StopProcess(kill bool) error { defer proc.OutputFile.Close() if kill { fmt.Printf("Killing process %v\n", proc.Cmd.Process) From 0424228e97d524295cc6e00b5cc69aa29c2c570e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 5 Nov 2015 15:00:25 -0800 Subject: [PATCH 013/515] Added net.go Connect() --- net.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 net.go diff --git a/net.go b/net.go new file mode 100644 index 000000000..2f9c9c8c2 --- /dev/null +++ b/net.go @@ -0,0 +1,14 @@ +package common + +import ( + "net" + "strings" +) + +// protoAddr: e.g. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" +func Connect(protoAddr string) (net.Conn, error) { + parts := strings.SplitN(protoAddr, "://", 2) + proto, address := parts[0], parts[1] + conn, err := net.Dial(proto, address) + return conn, err +} From c65e9e7d911725efd915956824d374cd7c61dc41 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 25 Nov 2015 16:50:28 -0800 Subject: [PATCH 014/515] Added CList --- clist.go | 152 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 clist.go diff --git a/clist.go b/clist.go new file mode 100644 index 000000000..90e662a33 --- /dev/null +++ b/clist.go @@ -0,0 +1,152 @@ +package common + +/* +The purpose of CList is to provide a goroutine-safe linked-list. +NOTE: Not all methods of container/list are (yet) implemented. +*/ + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// CElement is an element of a linked-list +// Traversal from a CElement are goroutine-safe. +type CElement struct { + next unsafe.Pointer + wg *sync.WaitGroup + Value interface{} +} + +// Blocking implementation of Next(). +// If return is nil, this element was removed from the list. +func (e *CElement) NextWait() *CElement { + e.wg.Wait() + return e.Next() +} + +func (e *CElement) Next() *CElement { + next := atomic.LoadPointer(&e.next) + if next == nil { + return nil + } + return (*CElement)(next) +} + +// CList represents a linked list. +// The zero value for CList is an empty list ready to use. +// Operations are goroutine-safe. +type CList struct { + mtx sync.Mutex + wg *sync.WaitGroup + head *CElement // first element + tail *CElement // last element + len int // list length +} + +func (l *CList) Init() *CList { + l.mtx.Lock() + defer l.mtx.Unlock() + l.wg = waitGroup1() + l.head = nil + l.tail = nil + l.len = 0 + return l +} + +func NewCList() *CList { return new(CList).Init() } + +func (l *CList) Len() int { + l.mtx.Lock() + defer l.mtx.Unlock() + return l.len +} + +func (l *CList) Front() *CElement { + l.mtx.Lock() + defer l.mtx.Unlock() + return l.head +} + +func (l *CList) FrontWait() *CElement { + for { + l.mtx.Lock() + head := l.head + wg := l.wg + l.mtx.Unlock() + if head == nil { + wg.Wait() + } else { + return head + } + } +} + +func (l *CList) Back() *CElement { + l.mtx.Lock() + defer l.mtx.Unlock() + return l.tail +} + +func (l *CList) BackWait() *CElement { + for { + l.mtx.Lock() + tail := l.tail + wg := l.wg + l.mtx.Unlock() + if tail == nil { + wg.Wait() + } else { + return tail + } + } +} + +func (l *CList) PushBack(v interface{}) *CElement { + e := &CElement{ + next: nil, + wg: waitGroup1(), + Value: v, + } + l.mtx.Lock() + defer l.mtx.Unlock() + l.len += 1 + if l.tail == nil { + l.head = e + l.tail = e + l.wg.Done() + return e + } else { + oldTail := l.tail + atomic.StorePointer(&oldTail.next, unsafe.Pointer(e)) + l.tail = e + oldTail.wg.Done() + return e + } + return e +} + +func (l *CList) RemoveFront() interface{} { + l.mtx.Lock() + defer l.mtx.Unlock() + if l.head == nil { + return nil + } + oldFront := l.head + next := (*CElement)(oldFront.next) + l.head = next + if next == nil { + l.tail = nil + l.wg = waitGroup1() + } + l.len -= 1 + atomic.StorePointer(&oldFront.next, unsafe.Pointer(nil)) + return oldFront.Value +} + +func waitGroup1() (wg *sync.WaitGroup) { + wg = &sync.WaitGroup{} + wg.Add(1) + return +} From 76189fde0d98cab9fd9368d35836950cfd20f13f Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 27 Nov 2015 13:04:24 -0800 Subject: [PATCH 015/515] Add Remove() implementation, CList is doubly-linked --- clist.go | 166 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 134 insertions(+), 32 deletions(-) diff --git a/clist.go b/clist.go index 90e662a33..29d74acad 100644 --- a/clist.go +++ b/clist.go @@ -2,6 +2,8 @@ package common /* The purpose of CList is to provide a goroutine-safe linked-list. +This list can be traversed concurrently by any number of goroutines. +However, removed CElements cannot be added back. NOTE: Not all methods of container/list are (yet) implemented. */ @@ -14,24 +16,88 @@ import ( // CElement is an element of a linked-list // Traversal from a CElement are goroutine-safe. type CElement struct { - next unsafe.Pointer - wg *sync.WaitGroup - Value interface{} + next unsafe.Pointer + nextWg *sync.WaitGroup + prev unsafe.Pointer + prevWg *sync.WaitGroup + removed uint32 + Value interface{} } // Blocking implementation of Next(). -// If return is nil, this element was removed from the list. +// May return nil iff CElement was tail and got removed. func (e *CElement) NextWait() *CElement { - e.wg.Wait() - return e.Next() + for { + e.nextWg.Wait() + next := e.Next() + if next == nil { + if e.Removed() { + return nil + } else { + continue + } + } else { + return next + } + } } +// Blocking implementation of Prev(). +// May return nil iff CElement was head and got removed. +func (e *CElement) PrevWait() *CElement { + for { + e.prevWg.Wait() + prev := e.Prev() + if prev == nil { + if e.Removed() { + return nil + } else { + continue + } + } else { + return prev + } + } +} + +// Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { - next := atomic.LoadPointer(&e.next) - if next == nil { - return nil + return (*CElement)(atomic.LoadPointer(&e.next)) +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Prev() *CElement { + return (*CElement)(atomic.LoadPointer(&e.prev)) +} + +func (e *CElement) Removed() bool { + return atomic.LoadUint32(&(e.removed)) > 0 +} + +func (e *CElement) setNextAtomic(next *CElement) { + oldNext := (*CElement)(atomic.LoadPointer(&e.next)) + if next == nil && oldNext != nil { + e.nextWg.Add(1) // NOTE: There is still a race condition for waiters, so we for-loop. + } + atomic.StorePointer(&(e.next), unsafe.Pointer(next)) + if next != nil && oldNext == nil { + e.nextWg.Done() + } +} + +func (e *CElement) setPrevAtomic(prev *CElement) { + oldPrev := (*CElement)(atomic.LoadPointer(&e.prev)) + if prev == nil && oldPrev != nil { + e.prevWg.Add(1) // NOTE: There is still a race condition for waiters, so we for-loop. + } + atomic.StorePointer(&(e.prev), unsafe.Pointer(prev)) + if prev != nil && oldPrev == nil { + e.prevWg.Done() } - return (*CElement)(next) +} + +func (e *CElement) setRemovedAtomic() { + atomic.StoreUint32(&(e.removed), 1) } // CList represents a linked list. @@ -104,45 +170,81 @@ func (l *CList) BackWait() *CElement { } func (l *CList) PushBack(v interface{}) *CElement { - e := &CElement{ - next: nil, - wg: waitGroup1(), - Value: v, - } l.mtx.Lock() defer l.mtx.Unlock() + + // Construct a new element + e := &CElement{ + prev: nil, + prevWg: waitGroup1(), + next: nil, + nextWg: waitGroup1(), + Value: v, + } + + // Release waiters on FrontWait/BackWait maybe + if l.len == 0 { + l.wg.Done() + } l.len += 1 + + // Modify the tail if l.tail == nil { l.head = e l.tail = e - l.wg.Done() - return e } else { - oldTail := l.tail - atomic.StorePointer(&oldTail.next, unsafe.Pointer(e)) + l.tail.setNextAtomic(e) + e.setPrevAtomic(l.tail) l.tail = e - oldTail.wg.Done() - return e } + return e } -func (l *CList) RemoveFront() interface{} { +// NOTE: As per the contract of CList, removed elements cannot be added back. +func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() defer l.mtx.Unlock() - if l.head == nil { - return nil + + prev := e.Prev() + next := e.Next() + + if l.head == nil || l.tail == nil { + PanicSanity("Remove(e) on empty CList") } - oldFront := l.head - next := (*CElement)(oldFront.next) - l.head = next - if next == nil { - l.tail = nil - l.wg = waitGroup1() + if prev == nil && l.head != e { + PanicSanity("Remove(e) with false head") + } + if next == nil && l.tail != e { + PanicSanity("Remove(e) with false tail") + } + + // If we're removing the only item, make CList FrontWait/BackWait wait. + if l.len == 1 { + l.wg.Add(1) } l.len -= 1 - atomic.StorePointer(&oldFront.next, unsafe.Pointer(nil)) - return oldFront.Value + + // Modify e.prev and e.next and connect + if prev != nil { + prev.setNextAtomic(next) + } + if next != nil { + next.setPrevAtomic(prev) + } + + // Mark e as removed so NextWait/PrevWait can return. + e.setRemovedAtomic() + + // Set .Done() on e, otherwise waiters will wait forever. + if prev == nil { + e.prevWg.Done() + } + if next == nil { + e.nextWg.Done() + } + + return e.Value } func waitGroup1() (wg *sync.WaitGroup) { From 80c7e26bee3e126897f091a94b86c6a8647c8c7e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 27 Nov 2015 14:09:15 -0800 Subject: [PATCH 016/515] Fix memory-leak in CList; Patched with DetachPrev/DetachHead --- clist.go | 53 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/clist.go b/clist.go index 29d74acad..507464bc4 100644 --- a/clist.go +++ b/clist.go @@ -5,6 +5,8 @@ The purpose of CList is to provide a goroutine-safe linked-list. This list can be traversed concurrently by any number of goroutines. However, removed CElements cannot be added back. NOTE: Not all methods of container/list are (yet) implemented. +NOTE: Removed elements need to NukePrev or NukeNext consistently +to ensure garbage collection of removed elements. */ import ( @@ -74,25 +76,45 @@ func (e *CElement) Removed() bool { return atomic.LoadUint32(&(e.removed)) > 0 } +func (e *CElement) DetachNext() { + e.setNextAtomic(nil) + e.nextWg.Done() +} + +func (e *CElement) DetachPrev() { + e.setPrevAtomic(nil) + e.prevWg.Done() +} + func (e *CElement) setNextAtomic(next *CElement) { - oldNext := (*CElement)(atomic.LoadPointer(&e.next)) - if next == nil && oldNext != nil { - e.nextWg.Add(1) // NOTE: There is still a race condition for waiters, so we for-loop. - } - atomic.StorePointer(&(e.next), unsafe.Pointer(next)) - if next != nil && oldNext == nil { - e.nextWg.Done() + for { + oldNext := atomic.LoadPointer(&e.next) + if !atomic.CompareAndSwapPointer(&(e.next), oldNext, unsafe.Pointer(next)) { + continue + } + if next == nil && oldNext != nil { // We for-loop in NextWait() so race is ok + e.nextWg.Add(1) + } + if next != nil && oldNext == nil { + e.nextWg.Done() + } + return } } func (e *CElement) setPrevAtomic(prev *CElement) { - oldPrev := (*CElement)(atomic.LoadPointer(&e.prev)) - if prev == nil && oldPrev != nil { - e.prevWg.Add(1) // NOTE: There is still a race condition for waiters, so we for-loop. - } - atomic.StorePointer(&(e.prev), unsafe.Pointer(prev)) - if prev != nil && oldPrev == nil { - e.prevWg.Done() + for { + oldPrev := atomic.LoadPointer(&e.prev) + if !atomic.CompareAndSwapPointer(&(e.prev), oldPrev, unsafe.Pointer(prev)) { + continue + } + if prev == nil && oldPrev != nil { // We for-loop in PrevWait() so race is ok + e.prevWg.Add(1) + } + if prev != nil && oldPrev == nil { + e.prevWg.Done() + } + return } } @@ -100,6 +122,8 @@ func (e *CElement) setRemovedAtomic() { atomic.StoreUint32(&(e.removed), 1) } +//-------------------------------------------------------------------------------- + // CList represents a linked list. // The zero value for CList is an empty list ready to use. // Operations are goroutine-safe. @@ -201,6 +225,7 @@ func (l *CList) PushBack(v interface{}) *CElement { return e } +// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. // NOTE: As per the contract of CList, removed elements cannot be added back. func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() From d939cd964b4413d3028a86cea95bdc5b05497038 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 29 Nov 2015 21:22:55 -0800 Subject: [PATCH 017/515] First commit of CList --- LICENSE.md | 206 ++++++++++++++++++++++++++++++++++++ clist.go | 285 ++++++++++++++++++++++++++++++++++++++++++++++++++ clist_test.go | 92 ++++++++++++++++ 3 files changed, 583 insertions(+) create mode 100644 LICENSE.md create mode 100644 clist.go create mode 100644 clist_test.go diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..57cfcf41c --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,206 @@ +Tendermint Go-CList +Copyright (C) 2015 Tendermint + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +//-------------------------------------------------------------------------------- + +GNU GENERAL PUBLIC LICENSE + +Version 3, 29 June 2007 + +Copyright © 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The GNU General Public License is a free, copyleft license for software and other kinds of works. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. + +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS + +0. Definitions. +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on the Program. + +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. + +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + +a) The work must carry prominent notices stating that you modified it, and giving a relevant date. +b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. +c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. +d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + +a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. +b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. +c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. +d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. +e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + +a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or +b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or +c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or +d) Limiting the use for publicity purposes of names of licensors or authors of the material; or +e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or +f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. + +14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS diff --git a/clist.go b/clist.go new file mode 100644 index 000000000..5295dd995 --- /dev/null +++ b/clist.go @@ -0,0 +1,285 @@ +package clist + +/* +The purpose of CList is to provide a goroutine-safe linked-list. +This list can be traversed concurrently by any number of goroutines. +However, removed CElements cannot be added back. +NOTE: Not all methods of container/list are (yet) implemented. +NOTE: Removed elements need to DetachPrev or DetachNext consistently +to ensure garbage collection of removed elements. +*/ + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// CElement is an element of a linked-list +// Traversal from a CElement are goroutine-safe. +type CElement struct { + prev unsafe.Pointer + prevWg *sync.WaitGroup + next unsafe.Pointer + nextWg *sync.WaitGroup + removed uint32 + Value interface{} +} + +// Blocking implementation of Next(). +// May return nil iff CElement was tail and got removed. +func (e *CElement) NextWait() *CElement { + for { + e.nextWg.Wait() + next := e.Next() + if next == nil { + if e.Removed() { + return nil + } else { + continue + } + } else { + return next + } + } +} + +// Blocking implementation of Prev(). +// May return nil iff CElement was head and got removed. +func (e *CElement) PrevWait() *CElement { + for { + e.prevWg.Wait() + prev := e.Prev() + if prev == nil { + if e.Removed() { + return nil + } else { + continue + } + } else { + return prev + } + } +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Next() *CElement { + return (*CElement)(atomic.LoadPointer(&e.next)) +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Prev() *CElement { + return (*CElement)(atomic.LoadPointer(&e.prev)) +} + +func (e *CElement) Removed() bool { + return atomic.LoadUint32(&(e.removed)) > 0 +} + +func (e *CElement) DetachNext() { + if !e.Removed() { + panic("DetachNext() must be called after Remove(e)") + } + atomic.StorePointer(&e.next, nil) +} + +func (e *CElement) DetachPrev() { + if !e.Removed() { + panic("DetachPrev() must be called after Remove(e)") + } + atomic.StorePointer(&e.prev, nil) +} + +func (e *CElement) setNextAtomic(next *CElement) { + for { + oldNext := atomic.LoadPointer(&e.next) + if !atomic.CompareAndSwapPointer(&(e.next), oldNext, unsafe.Pointer(next)) { + continue + } + if next == nil && oldNext != nil { // We for-loop in NextWait() so race is ok + e.nextWg.Add(1) + } + if next != nil && oldNext == nil { + e.nextWg.Done() + } + return + } +} + +func (e *CElement) setPrevAtomic(prev *CElement) { + for { + oldPrev := atomic.LoadPointer(&e.prev) + if !atomic.CompareAndSwapPointer(&(e.prev), oldPrev, unsafe.Pointer(prev)) { + continue + } + if prev == nil && oldPrev != nil { // We for-loop in PrevWait() so race is ok + e.prevWg.Add(1) + } + if prev != nil && oldPrev == nil { + e.prevWg.Done() + } + return + } +} + +func (e *CElement) setRemovedAtomic() { + atomic.StoreUint32(&(e.removed), 1) +} + +//-------------------------------------------------------------------------------- + +// CList represents a linked list. +// The zero value for CList is an empty list ready to use. +// Operations are goroutine-safe. +type CList struct { + mtx sync.Mutex + wg *sync.WaitGroup + head *CElement // first element + tail *CElement // last element + len int // list length +} + +func (l *CList) Init() *CList { + l.mtx.Lock() + defer l.mtx.Unlock() + l.wg = waitGroup1() + l.head = nil + l.tail = nil + l.len = 0 + return l +} + +func New() *CList { return new(CList).Init() } + +func (l *CList) Len() int { + l.mtx.Lock() + defer l.mtx.Unlock() + return l.len +} + +func (l *CList) Front() *CElement { + l.mtx.Lock() + defer l.mtx.Unlock() + return l.head +} + +func (l *CList) FrontWait() *CElement { + for { + l.mtx.Lock() + head := l.head + wg := l.wg + l.mtx.Unlock() + if head == nil { + wg.Wait() + } else { + return head + } + } +} + +func (l *CList) Back() *CElement { + l.mtx.Lock() + defer l.mtx.Unlock() + return l.tail +} + +func (l *CList) BackWait() *CElement { + for { + l.mtx.Lock() + tail := l.tail + wg := l.wg + l.mtx.Unlock() + if tail == nil { + wg.Wait() + } else { + return tail + } + } +} + +func (l *CList) PushBack(v interface{}) *CElement { + l.mtx.Lock() + defer l.mtx.Unlock() + + // Construct a new element + e := &CElement{ + prev: nil, + prevWg: waitGroup1(), + next: nil, + nextWg: waitGroup1(), + Value: v, + } + + // Release waiters on FrontWait/BackWait maybe + if l.len == 0 { + l.wg.Done() + } + l.len += 1 + + // Modify the tail + if l.tail == nil { + l.head = e + l.tail = e + } else { + l.tail.setNextAtomic(e) + e.setPrevAtomic(l.tail) + l.tail = e + } + + return e +} + +// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. +// NOTE: As per the contract of CList, removed elements cannot be added back. +func (l *CList) Remove(e *CElement) interface{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + prev := e.Prev() + next := e.Next() + + if l.head == nil || l.tail == nil { + panic("Remove(e) on empty CList") + } + if prev == nil && l.head != e { + panic("Remove(e) with false head") + } + if next == nil && l.tail != e { + panic("Remove(e) with false tail") + } + + // If we're removing the only item, make CList FrontWait/BackWait wait. + if l.len == 1 { + l.wg.Add(1) + } + l.len -= 1 + + // Connect next/prev and set head/tail + if prev == nil { + l.head = next + } else { + prev.setNextAtomic(next) + } + if next == nil { + l.tail = prev + } else { + next.setPrevAtomic(prev) + } + + // Set .Done() on e, otherwise waiters will wait forever. + e.setRemovedAtomic() + if prev == nil { + e.prevWg.Done() + } + if next == nil { + e.nextWg.Done() + } + + return e.Value +} + +func waitGroup1() (wg *sync.WaitGroup) { + wg = &sync.WaitGroup{} + wg.Add(1) + return +} diff --git a/clist_test.go b/clist_test.go new file mode 100644 index 000000000..98f94d59a --- /dev/null +++ b/clist_test.go @@ -0,0 +1,92 @@ +package clist + +import ( + "runtime" + "testing" + "time" +) + +func TestSmall(t *testing.T) { + l := New() + el1 := l.PushBack(1) + el2 := l.PushBack(2) + el3 := l.PushBack(3) + if l.Len() != 3 { + t.Error("Expected len 3, got ", l.Len()) + } + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r1 := l.Remove(el1) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r2 := l.Remove(el2) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r3 := l.Remove(el3) + + if r1 != 1 { + t.Error("Expected 1, got ", r1) + } + if r2 != 2 { + t.Error("Expected 2, got ", r2) + } + if r3 != 3 { + t.Error("Expected 3, got ", r3) + } + if l.Len() != 0 { + t.Error("Expected len 0, got ", l.Len()) + } + +} + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +func TestGCFifo(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := 0 + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + gcCount += 1 + }) + } + + for el := l.Front(); el != nil; { + l.Remove(el) + //oldEl := el + el = el.Next() + //oldEl.DetachPrev() + //oldEl.DetachNext() + } + + runtime.GC() + time.Sleep(time.Second * 3) + + if gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + gcCount) + } +} From dbd04eadeb414e614810810620a2d95b2f744ef1 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 29 Nov 2015 21:23:33 -0800 Subject: [PATCH 018/515] Remove CList, moved to go-clist --- clist.go | 279 ------------------------------------------------------- 1 file changed, 279 deletions(-) delete mode 100644 clist.go diff --git a/clist.go b/clist.go deleted file mode 100644 index 507464bc4..000000000 --- a/clist.go +++ /dev/null @@ -1,279 +0,0 @@ -package common - -/* -The purpose of CList is to provide a goroutine-safe linked-list. -This list can be traversed concurrently by any number of goroutines. -However, removed CElements cannot be added back. -NOTE: Not all methods of container/list are (yet) implemented. -NOTE: Removed elements need to NukePrev or NukeNext consistently -to ensure garbage collection of removed elements. -*/ - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// CElement is an element of a linked-list -// Traversal from a CElement are goroutine-safe. -type CElement struct { - next unsafe.Pointer - nextWg *sync.WaitGroup - prev unsafe.Pointer - prevWg *sync.WaitGroup - removed uint32 - Value interface{} -} - -// Blocking implementation of Next(). -// May return nil iff CElement was tail and got removed. -func (e *CElement) NextWait() *CElement { - for { - e.nextWg.Wait() - next := e.Next() - if next == nil { - if e.Removed() { - return nil - } else { - continue - } - } else { - return next - } - } -} - -// Blocking implementation of Prev(). -// May return nil iff CElement was head and got removed. -func (e *CElement) PrevWait() *CElement { - for { - e.prevWg.Wait() - prev := e.Prev() - if prev == nil { - if e.Removed() { - return nil - } else { - continue - } - } else { - return prev - } - } -} - -// Nonblocking, may return nil if at the end. -func (e *CElement) Next() *CElement { - return (*CElement)(atomic.LoadPointer(&e.next)) -} - -// Nonblocking, may return nil if at the end. -func (e *CElement) Prev() *CElement { - return (*CElement)(atomic.LoadPointer(&e.prev)) -} - -func (e *CElement) Removed() bool { - return atomic.LoadUint32(&(e.removed)) > 0 -} - -func (e *CElement) DetachNext() { - e.setNextAtomic(nil) - e.nextWg.Done() -} - -func (e *CElement) DetachPrev() { - e.setPrevAtomic(nil) - e.prevWg.Done() -} - -func (e *CElement) setNextAtomic(next *CElement) { - for { - oldNext := atomic.LoadPointer(&e.next) - if !atomic.CompareAndSwapPointer(&(e.next), oldNext, unsafe.Pointer(next)) { - continue - } - if next == nil && oldNext != nil { // We for-loop in NextWait() so race is ok - e.nextWg.Add(1) - } - if next != nil && oldNext == nil { - e.nextWg.Done() - } - return - } -} - -func (e *CElement) setPrevAtomic(prev *CElement) { - for { - oldPrev := atomic.LoadPointer(&e.prev) - if !atomic.CompareAndSwapPointer(&(e.prev), oldPrev, unsafe.Pointer(prev)) { - continue - } - if prev == nil && oldPrev != nil { // We for-loop in PrevWait() so race is ok - e.prevWg.Add(1) - } - if prev != nil && oldPrev == nil { - e.prevWg.Done() - } - return - } -} - -func (e *CElement) setRemovedAtomic() { - atomic.StoreUint32(&(e.removed), 1) -} - -//-------------------------------------------------------------------------------- - -// CList represents a linked list. -// The zero value for CList is an empty list ready to use. -// Operations are goroutine-safe. -type CList struct { - mtx sync.Mutex - wg *sync.WaitGroup - head *CElement // first element - tail *CElement // last element - len int // list length -} - -func (l *CList) Init() *CList { - l.mtx.Lock() - defer l.mtx.Unlock() - l.wg = waitGroup1() - l.head = nil - l.tail = nil - l.len = 0 - return l -} - -func NewCList() *CList { return new(CList).Init() } - -func (l *CList) Len() int { - l.mtx.Lock() - defer l.mtx.Unlock() - return l.len -} - -func (l *CList) Front() *CElement { - l.mtx.Lock() - defer l.mtx.Unlock() - return l.head -} - -func (l *CList) FrontWait() *CElement { - for { - l.mtx.Lock() - head := l.head - wg := l.wg - l.mtx.Unlock() - if head == nil { - wg.Wait() - } else { - return head - } - } -} - -func (l *CList) Back() *CElement { - l.mtx.Lock() - defer l.mtx.Unlock() - return l.tail -} - -func (l *CList) BackWait() *CElement { - for { - l.mtx.Lock() - tail := l.tail - wg := l.wg - l.mtx.Unlock() - if tail == nil { - wg.Wait() - } else { - return tail - } - } -} - -func (l *CList) PushBack(v interface{}) *CElement { - l.mtx.Lock() - defer l.mtx.Unlock() - - // Construct a new element - e := &CElement{ - prev: nil, - prevWg: waitGroup1(), - next: nil, - nextWg: waitGroup1(), - Value: v, - } - - // Release waiters on FrontWait/BackWait maybe - if l.len == 0 { - l.wg.Done() - } - l.len += 1 - - // Modify the tail - if l.tail == nil { - l.head = e - l.tail = e - } else { - l.tail.setNextAtomic(e) - e.setPrevAtomic(l.tail) - l.tail = e - } - - return e -} - -// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. -// NOTE: As per the contract of CList, removed elements cannot be added back. -func (l *CList) Remove(e *CElement) interface{} { - l.mtx.Lock() - defer l.mtx.Unlock() - - prev := e.Prev() - next := e.Next() - - if l.head == nil || l.tail == nil { - PanicSanity("Remove(e) on empty CList") - } - if prev == nil && l.head != e { - PanicSanity("Remove(e) with false head") - } - if next == nil && l.tail != e { - PanicSanity("Remove(e) with false tail") - } - - // If we're removing the only item, make CList FrontWait/BackWait wait. - if l.len == 1 { - l.wg.Add(1) - } - l.len -= 1 - - // Modify e.prev and e.next and connect - if prev != nil { - prev.setNextAtomic(next) - } - if next != nil { - next.setPrevAtomic(prev) - } - - // Mark e as removed so NextWait/PrevWait can return. - e.setRemovedAtomic() - - // Set .Done() on e, otherwise waiters will wait forever. - if prev == nil { - e.prevWg.Done() - } - if next == nil { - e.nextWg.Done() - } - - return e.Value -} - -func waitGroup1() (wg *sync.WaitGroup) { - wg = &sync.WaitGroup{} - wg.Add(1) - return -} From e3d88d2d7964e5f6a98bf27267f34c2e9f1737d2 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 30 Nov 2015 12:59:05 -0800 Subject: [PATCH 019/515] Add ScanRightDeleteRandom test; Add GCRandom test --- clist_test.go | 121 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/clist_test.go b/clist_test.go index 98f94d59a..d5a3f8ed7 100644 --- a/clist_test.go +++ b/clist_test.go @@ -1,6 +1,8 @@ package clist import ( + "fmt" + "math/rand" "runtime" "testing" "time" @@ -90,3 +92,122 @@ func TestGCFifo(t *testing.T) { gcCount) } } + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +func TestGCRandom(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := 0 + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + gcCount += 1 + }) + } + + els := make([]*CElement, 0, numElements) + for el := l.Front(); el != nil; el = el.Next() { + els = append(els, el) + } + + for _, i := range rand.Perm(numElements) { + el := els[i] + l.Remove(el) + el = el.Next() + } + + runtime.GC() + time.Sleep(time.Second * 3) + + if gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + gcCount) + } +} + +func TestScanRightDeleteRandom(t *testing.T) { + + const numElements = 10000 + const numTimes = 10000000 + const numScanners = 10 + + l := New() + stop := make(chan struct{}) + + els := make([]*CElement, numElements, numElements) + for i := 0; i < numElements; i++ { + el := l.PushBack(i) + els[i] = el + } + + // Launch scanner routines that will rapidly iterate over elements. + for i := 0; i < numScanners; i++ { + go func(scannerID int) { + var el *CElement + restartCounter := 0 + counter := 0 + FOR_LOOP: + for { + select { + case <-stop: + fmt.Println("stopped") + break FOR_LOOP + default: + } + if el == nil { + el = l.FrontWait() + restartCounter += 1 + } + el = el.Next() + counter += 1 + } + fmt.Printf("Scanner %v restartCounter: %v counter: %v\n", scannerID, restartCounter, counter) + }(i) + } + + // Remove an element, push back an element. + for i := 0; i < numTimes; i++ { + // Pick an element to remove + rmElIdx := rand.Intn(len(els)) + rmEl := els[rmElIdx] + + // Remove it + l.Remove(rmEl) + //fmt.Print(".") + + // Insert a new element + newEl := l.PushBack(-1*i - 1) + els[rmElIdx] = newEl + + if i%100000 == 0 { + fmt.Printf("Pushed %vK elements so far...\n", i/1000) + } + + } + + // Stop scanners + close(stop) + time.Sleep(time.Second * 1) + + // And remove all the elements. + for el := l.Front(); el != nil; el = el.Next() { + l.Remove(el) + } + if l.Len() != 0 { + t.Fatal("Failed to remove all elements from CList") + } +} From 4b6741ca3bac84fd40fabde2e34e321081e4cdbf Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 3 Dec 2015 23:44:24 -0800 Subject: [PATCH 020/515] EnsureDir() takes file mode --- os.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/os.go b/os.go index 170c6f82a..43fed1349 100644 --- a/os.go +++ b/os.go @@ -36,9 +36,9 @@ func Exit(s string) { os.Exit(1) } -func EnsureDir(dir string) error { +func EnsureDir(dir string, mode os.FileMode) error { if _, err := os.Stat(dir); os.IsNotExist(err) { - err := os.MkdirAll(dir, 0700) + err := os.MkdirAll(dir, mode) if err != nil { return fmt.Errorf("Could not create directory %v. %v", dir, err) } From 3b50efbe025fae9be5c7efa01ad1781ac4221e85 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 3 Dec 2015 23:56:50 -0800 Subject: [PATCH 021/515] WriteFile*() takes file mode --- os.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/os.go b/os.go index 43fed1349..1d0b538d2 100644 --- a/os.go +++ b/os.go @@ -64,8 +64,8 @@ func MustReadFile(filePath string) []byte { return fileBytes } -func WriteFile(filePath string, contents []byte) error { - err := ioutil.WriteFile(filePath, contents, 0600) +func WriteFile(filePath string, contents []byte, mode os.FileMode) error { + err := ioutil.WriteFile(filePath, contents, mode) if err != nil { return err } @@ -73,8 +73,8 @@ func WriteFile(filePath string, contents []byte) error { return nil } -func MustWriteFile(filePath string, contents []byte) { - err := WriteFile(filePath, contents) +func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { + err := WriteFile(filePath, contents, mode) if err != nil { Exit(Fmt("MustWriteFile failed: %v", err)) } @@ -83,20 +83,20 @@ func MustWriteFile(filePath string, contents []byte) { // Writes to newBytes to filePath. // Guaranteed not to lose *both* oldBytes and newBytes, // (assuming that the OS is perfect) -func WriteFileAtomic(filePath string, newBytes []byte) error { +func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { // If a file already exists there, copy to filePath+".bak" (overwrite anything) if _, err := os.Stat(filePath); !os.IsNotExist(err) { fileBytes, err := ioutil.ReadFile(filePath) if err != nil { return fmt.Errorf("Could not read file %v. %v", filePath, err) } - err = ioutil.WriteFile(filePath+".bak", fileBytes, 0600) + err = ioutil.WriteFile(filePath+".bak", fileBytes, mode) if err != nil { return fmt.Errorf("Could not write file %v. %v", filePath+".bak", err) } } // Write newBytes to filePath.new - err := ioutil.WriteFile(filePath+".new", newBytes, 0600) + err := ioutil.WriteFile(filePath+".new", newBytes, mode) if err != nil { return fmt.Errorf("Could not write file %v. %v", filePath+".new", err) } From acdbd88e88a5d75b97348273706ec4b10e673b90 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 9 Dec 2015 09:38:40 -0800 Subject: [PATCH 022/515] Add BypassHandler and NewBypass to bypass main filter --- log.go | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/log.go b/log.go index e616d0ac8..828619f86 100644 --- a/log.go +++ b/log.go @@ -7,7 +7,9 @@ import ( "github.com/tendermint/log15" ) -var rootHandler log15.Handler +//var rootHandler log15.Handler +var mainHandler log15.Handler +var bypassHandler log15.Handler func init() { Reset() @@ -21,31 +23,48 @@ func Reset() { logLevel = config.GetString("log_level") } - // stdout handler + // main handler //handlers := []log15.Handler{} - stdoutHandler := log15.LvlFilterHandler( + mainHandler = log15.LvlFilterHandler( getLevel(logLevel), log15.StreamHandler(os.Stdout, log15.TerminalFormat()), ) - //handlers = append(handlers, stdoutHandler) + //handlers = append(handlers, mainHandler) + + // bypass handler for not filtering on global logLevel. + bypassHandler = log15.StreamHandler(os.Stdout, log15.TerminalFormat()) + //handlers = append(handlers, bypassHandler) // Set rootHandler. //rootHandler = log15.MultiHandler(handlers...) - rootHandler = stdoutHandler // By setting handlers on the root, we handle events from all loggers. - log15.Root().SetHandler(rootHandler) + log15.Root().SetHandler(mainHandler) +} + +// See go-wire/log for an example of usage. +func MainHandler() log15.Handler { + return mainHandler } -// See binary/log for an example of usage. -func RootHandler() log15.Handler { - return rootHandler +func BypassHandler() log15.Handler { + return bypassHandler } func New(ctx ...interface{}) log15.Logger { + return NewMain(ctx...) +} + +func NewMain(ctx ...interface{}) log15.Logger { return log15.Root().New(ctx...) } +func NewBypass(ctx ...interface{}) log15.Logger { + bypass := log15.New(ctx...) + bypass.SetHandler(bypassHandler) + return bypass +} + func getLevel(lvlString string) log15.Lvl { lvl, err := log15.LvlFromString(lvlString) if err != nil { From f592570310c9512c9bdc78335927ffb15fd58975 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 23 Dec 2015 14:14:41 -0800 Subject: [PATCH 023/515] Change license to Apache2.0 --- LICENSE | 193 +++++++++++++++++++++++++++++++++++++++++++++++++ LICENSE.md | 206 ----------------------------------------------------- 2 files changed, 193 insertions(+), 206 deletions(-) create mode 100644 LICENSE delete mode 100644 LICENSE.md diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..8a142a71b --- /dev/null +++ b/LICENSE @@ -0,0 +1,193 @@ +Tendermint Go-Common +Copyright (C) 2015 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index aaf0cf06d..000000000 --- a/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Common -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS From b24269fefc0c6fc29c4c1ad10b938b1614f6c6c7 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 23 Dec 2015 14:16:29 -0800 Subject: [PATCH 024/515] Change license to Apache2.0 --- LICENSE | 193 +++++++++++++++++++++++++++++++++++++++++++++++++ LICENSE.md | 206 ----------------------------------------------------- 2 files changed, 193 insertions(+), 206 deletions(-) create mode 100644 LICENSE delete mode 100644 LICENSE.md diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..b0c35f3b8 --- /dev/null +++ b/LICENSE @@ -0,0 +1,193 @@ +Tendermint Go-Process +Copyright (C) 2015 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index 3f811edc1..000000000 --- a/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Process -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS From 634527f5b60fd7c71ca811262493df2ad65ee0ca Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 23 Dec 2015 14:18:53 -0800 Subject: [PATCH 025/515] Change license to Apache2.0 --- LICENSE | 193 +++++++++++++++++++++++++++++++++++++++++++++++++ LICENSE.md | 206 ----------------------------------------------------- 2 files changed, 193 insertions(+), 206 deletions(-) create mode 100644 LICENSE delete mode 100644 LICENSE.md diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..1ec9bd42c --- /dev/null +++ b/LICENSE @@ -0,0 +1,193 @@ +Tendermint Go-CList +Copyright (C) 2015 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index 57cfcf41c..000000000 --- a/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-CList -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS From 980f02a5001b46f02ab3fbb036531d4ea789d2bf Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 23 Dec 2015 14:19:50 -0800 Subject: [PATCH 026/515] Change license to Apache2.0 --- LICENSE | 193 +++++++++++++++++++++++++++++++++++++++++++++++++ LICENSE.md | 206 ----------------------------------------------------- 2 files changed, 193 insertions(+), 206 deletions(-) create mode 100644 LICENSE delete mode 100644 LICENSE.md diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..17ce702bf --- /dev/null +++ b/LICENSE @@ -0,0 +1,193 @@ +Tendermint Go-Logger +Copyright (C) 2015 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index ee1d394da..000000000 --- a/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Logger -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS From ba01cfbb58d446673beff17e72883cb49c835fb9 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 2 Jan 2016 16:20:39 -0800 Subject: [PATCH 027/515] Remove spurious fmt.Print --- process.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/process.go b/process.go index 5560aaa43..cf2aa0289 100644 --- a/process.go +++ b/process.go @@ -47,7 +47,7 @@ func StartProcess(label string, execPath string, args []string, inFile io.Reader go func() { err := proc.Cmd.Wait() if err != nil { - fmt.Printf("Process exit: %v\n", err) + // fmt.Printf("Process exit: %v\n", err) if exitError, ok := err.(*exec.ExitError); ok { proc.ExitState = exitError.ProcessState } @@ -66,10 +66,10 @@ func StartProcess(label string, execPath string, args []string, inFile io.Reader func (proc *Process) StopProcess(kill bool) error { defer proc.OutputFile.Close() if kill { - fmt.Printf("Killing process %v\n", proc.Cmd.Process) + // fmt.Printf("Killing process %v\n", proc.Cmd.Process) return proc.Cmd.Process.Kill() } else { - fmt.Printf("Stopping process %v\n", proc.Cmd.Process) + // fmt.Printf("Stopping process %v\n", proc.Cmd.Process) return proc.Cmd.Process.Signal(os.Interrupt) } } From c58b1cbf8a9215a4bd71685db4a285f4e99747fe Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 10 Jan 2016 08:12:10 -0800 Subject: [PATCH 028/515] Add ThrottleTimer.Unset --- throttle_timer.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/throttle_timer.go b/throttle_timer.go index 0b40a60c2..5442d0cd8 100644 --- a/throttle_timer.go +++ b/throttle_timer.go @@ -46,6 +46,10 @@ func (t *ThrottleTimer) Set() { } } +func (t *ThrottleTimer) Unset() { + t.timer.Stop() +} + // For ease of .Stop()'ing services before .Start()'ing them, // we ignore .Stop()'s on nil ThrottleTimers func (t *ThrottleTimer) Stop() bool { From 8dacd4172ef28a6601612f25385120bbc8d36f45 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 11 Jan 2016 17:12:38 -0800 Subject: [PATCH 029/515] Fix bug where Unset halts ThrottleTimer --- throttle_timer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/throttle_timer.go b/throttle_timer.go index 5442d0cd8..b19896d5d 100644 --- a/throttle_timer.go +++ b/throttle_timer.go @@ -47,6 +47,7 @@ func (t *ThrottleTimer) Set() { } func (t *ThrottleTimer) Unset() { + atomic.StoreUint32(&t.isSet, 0) t.timer.Stop() } From e85e2842a042e127ee84f375b43ee50e9fe50aff Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Jan 2016 16:13:26 -0500 Subject: [PATCH 030/515] Initial commit --- LICENSE | 201 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 2 + 2 files changed, 203 insertions(+) create mode 100644 LICENSE create mode 100644 README.md diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 000000000..8c8492119 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# go-events +PubSub in Go From 089435d31b69e40e58ac2109bbfcd43c3120d44b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Jan 2016 16:21:43 -0500 Subject: [PATCH 031/515] move from tendermint/tendermint --- README.md | 2 +- event_cache.go | 41 +++++++++ events.go | 220 +++++++++++++++++++++++++++++++++++++++++++++++++ log.go | 7 ++ 4 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 event_cache.go create mode 100644 events.go create mode 100644 log.go diff --git a/README.md b/README.md index 8c8492119..737cbaaec 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ # go-events -PubSub in Go +PubSub in Go with event caching. diff --git a/event_cache.go b/event_cache.go new file mode 100644 index 000000000..905f1096a --- /dev/null +++ b/event_cache.go @@ -0,0 +1,41 @@ +package events + +const ( + eventsBufferSize = 1000 +) + +// An EventCache buffers events for a Fireable +// All events are cached. Filtering happens on Flush +type EventCache struct { + evsw Fireable + events []eventInfo +} + +// Create a new EventCache with an EventSwitch as backend +func NewEventCache(evsw Fireable) *EventCache { + return &EventCache{ + evsw: evsw, + events: make([]eventInfo, eventsBufferSize), + } +} + +// a cached event +type eventInfo struct { + event string + data EventData +} + +// Cache an event to be fired upon finality. +func (evc *EventCache) FireEvent(event string, data EventData) { + // append to list + evc.events = append(evc.events, eventInfo{event, data}) +} + +// Fire events by running evsw.FireEvent on all cached events. Blocks. +// Clears cached events +func (evc *EventCache) Flush() { + for _, ei := range evc.events { + evc.evsw.FireEvent(ei.event, ei.data) + } + evc.events = make([]eventInfo, eventsBufferSize) +} diff --git a/events.go b/events.go new file mode 100644 index 000000000..24c53c5d5 --- /dev/null +++ b/events.go @@ -0,0 +1,220 @@ +package events + +import ( + "sync" + + . "github.com/tendermint/go-common" +) + +// Generic event data can be typed and registered with tendermint/go-wire +// via concrete implementation of this interface +type EventData interface { + AssertIsEventData() +} + +// reactors and other modules should export +// this interface to become eventable +type Eventable interface { + SetEventSwitch(evsw *EventSwitch) +} + +// an event switch or cache implements fireable +type Fireable interface { + FireEvent(event string, data EventData) +} + +type EventSwitch struct { + BaseService + + mtx sync.RWMutex + eventCells map[string]*eventCell + listeners map[string]*eventListener +} + +func NewEventSwitch() *EventSwitch { + evsw := &EventSwitch{} + evsw.BaseService = *NewBaseService(log, "EventSwitch", evsw) + return evsw +} + +func (evsw *EventSwitch) OnStart() error { + evsw.BaseService.OnStart() + evsw.eventCells = make(map[string]*eventCell) + evsw.listeners = make(map[string]*eventListener) + return nil +} + +func (evsw *EventSwitch) OnStop() { + evsw.BaseService.OnStop() + evsw.eventCells = nil + evsw.listeners = nil +} + +func (evsw *EventSwitch) AddListenerForEvent(listenerID, event string, cb eventCallback) { + // Get/Create eventCell and listener + evsw.mtx.Lock() + eventCell := evsw.eventCells[event] + if eventCell == nil { + eventCell = newEventCell() + evsw.eventCells[event] = eventCell + } + listener := evsw.listeners[listenerID] + if listener == nil { + listener = newEventListener(listenerID) + evsw.listeners[listenerID] = listener + } + evsw.mtx.Unlock() + + // Add event and listener + eventCell.AddListener(listenerID, cb) + listener.AddEvent(event) +} + +func (evsw *EventSwitch) RemoveListener(listenerID string) { + // Get and remove listener + evsw.mtx.RLock() + listener := evsw.listeners[listenerID] + delete(evsw.listeners, listenerID) + evsw.mtx.RUnlock() + + if listener == nil { + return + } + + // Remove callback for each event. + listener.SetRemoved() + for _, event := range listener.GetEvents() { + evsw.RemoveListenerForEvent(event, listenerID) + } +} + +func (evsw *EventSwitch) RemoveListenerForEvent(event string, listenerID string) { + // Get eventCell + evsw.mtx.Lock() + eventCell := evsw.eventCells[event] + evsw.mtx.Unlock() + + if eventCell == nil { + return + } + + // Remove listenerID from eventCell + numListeners := eventCell.RemoveListener(listenerID) + + // Maybe garbage collect eventCell. + if numListeners == 0 { + // Lock again and double check. + evsw.mtx.Lock() // OUTER LOCK + eventCell.mtx.Lock() // INNER LOCK + if len(eventCell.listeners) == 0 { + delete(evsw.eventCells, event) + } + eventCell.mtx.Unlock() // INNER LOCK + evsw.mtx.Unlock() // OUTER LOCK + } +} + +func (evsw *EventSwitch) FireEvent(event string, data EventData) { + // Get the eventCell + evsw.mtx.RLock() + eventCell := evsw.eventCells[event] + evsw.mtx.RUnlock() + + if eventCell == nil { + return + } + + // Fire event for all listeners in eventCell + eventCell.FireEvent(data) +} + +func (evsw *EventSwitch) SubscribeToEvent(receiver, eventID string, chanCap int) chan interface{} { + // listen for new round + ch := make(chan interface{}, chanCap) + evsw.AddListenerForEvent(receiver, eventID, func(data EventData) { + // NOTE: in production, evsw callbacks should be nonblocking. + ch <- data + }) + return ch +} + +//----------------------------------------------------------------------------- + +// eventCell handles keeping track of listener callbacks for a given event. +type eventCell struct { + mtx sync.RWMutex + listeners map[string]eventCallback +} + +func newEventCell() *eventCell { + return &eventCell{ + listeners: make(map[string]eventCallback), + } +} + +func (cell *eventCell) AddListener(listenerID string, cb eventCallback) { + cell.mtx.Lock() + cell.listeners[listenerID] = cb + cell.mtx.Unlock() +} + +func (cell *eventCell) RemoveListener(listenerID string) int { + cell.mtx.Lock() + delete(cell.listeners, listenerID) + numListeners := len(cell.listeners) + cell.mtx.Unlock() + return numListeners +} + +func (cell *eventCell) FireEvent(data EventData) { + cell.mtx.RLock() + for _, listener := range cell.listeners { + listener(data) + } + cell.mtx.RUnlock() +} + +//----------------------------------------------------------------------------- + +type eventCallback func(data EventData) + +type eventListener struct { + id string + + mtx sync.RWMutex + removed bool + events []string +} + +func newEventListener(id string) *eventListener { + return &eventListener{ + id: id, + removed: false, + events: nil, + } +} + +func (evl *eventListener) AddEvent(event string) { + evl.mtx.Lock() + defer evl.mtx.Unlock() + + if evl.removed { + return + } + evl.events = append(evl.events, event) +} + +func (evl *eventListener) GetEvents() []string { + evl.mtx.RLock() + defer evl.mtx.RUnlock() + + events := make([]string, len(evl.events)) + copy(events, evl.events) + return events +} + +func (evl *eventListener) SetRemoved() { + evl.mtx.Lock() + defer evl.mtx.Unlock() + evl.removed = true +} diff --git a/log.go b/log.go new file mode 100644 index 000000000..525462294 --- /dev/null +++ b/log.go @@ -0,0 +1,7 @@ +package events + +import ( + "github.com/tendermint/go-logger" +) + +var log = logger.New("module", "events") From a5f26a8999f74c1a665c34841c08a6b111961402 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Jan 2016 18:09:55 -0500 Subject: [PATCH 032/515] EventResult --- events.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/events.go b/events.go index 24c53c5d5..ce7391118 100644 --- a/events.go +++ b/events.go @@ -12,6 +12,11 @@ type EventData interface { AssertIsEventData() } +type EventResult struct { + Event string `json:"event"` + Data EventData `json:"data"` +} + // reactors and other modules should export // this interface to become eventable type Eventable interface { From e8ffe6bb4c43447fbc0cda2b4cf0967e4ba4796d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 14 Jan 2016 11:01:44 -0800 Subject: [PATCH 033/515] Remove AssertIsEventData; Remove EventResult --- events.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/events.go b/events.go index ce7391118..824144926 100644 --- a/events.go +++ b/events.go @@ -9,12 +9,7 @@ import ( // Generic event data can be typed and registered with tendermint/go-wire // via concrete implementation of this interface type EventData interface { - AssertIsEventData() -} - -type EventResult struct { - Event string `json:"event"` - Data EventData `json:"data"` + //AssertIsEventData() } // reactors and other modules should export From a7878f1d0d8eaebf15f87bc2df15f7a1088cce7f Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 17 Jan 2016 13:13:19 -0800 Subject: [PATCH 034/515] Add basic test for level_db --- README.md | 1 + level_db_test.go | 83 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 README.md create mode 100644 level_db_test.go diff --git a/README.md b/README.md new file mode 100644 index 000000000..ca5ab33f9 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +TODO: syndtr/goleveldb should be replaced with actual LevelDB instance diff --git a/level_db_test.go b/level_db_test.go new file mode 100644 index 000000000..2a8e9ac1e --- /dev/null +++ b/level_db_test.go @@ -0,0 +1,83 @@ +package db + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + + . "github.com/tendermint/go-common" +) + +func BenchmarkRandomReadsWrites(b *testing.B) { + b.StopTimer() + + numItems := int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + db, err := NewLevelDB(Fmt("test_%x", RandStr(12))) + if err != nil { + b.Fatal(err.Error()) + return + } + + fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := (int64(RandInt()) % numItems) + internal[idx] += 1 + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set( + idxBytes, + valBytes, + ) + } + // Read something + { + idx := (int64(RandInt()) % numItems) + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if val == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %X for %v, got %X", + nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", + idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if val != valGot { + b.Errorf("Expected %v for %v, got %v", + val, idx, valGot) + break + } + } + } + } + + db.Close() +} + +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} From 7b75ca7bb55aa25e9ef765eb8c0b69486b227357 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 18 Jan 2016 11:15:46 -0800 Subject: [PATCH 035/515] Remove SubscribeToEvent --- events.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/events.go b/events.go index 824144926..04c9a9b2e 100644 --- a/events.go +++ b/events.go @@ -128,16 +128,6 @@ func (evsw *EventSwitch) FireEvent(event string, data EventData) { eventCell.FireEvent(data) } -func (evsw *EventSwitch) SubscribeToEvent(receiver, eventID string, chanCap int) chan interface{} { - // listen for new round - ch := make(chan interface{}, chanCap) - evsw.AddListenerForEvent(receiver, eventID, func(data EventData) { - // NOTE: in production, evsw callbacks should be nonblocking. - ch <- data - }) - return ch -} - //----------------------------------------------------------------------------- // eventCell handles keeping track of listener callbacks for a given event. From 19f5a93cffe6df6734c3e7cf3896ba3cea546348 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 18 Jan 2016 13:19:11 -0800 Subject: [PATCH 036/515] Adjust Service log notice for duplicate stops --- service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/service.go b/service.go index ca923b1d5..1d2303247 100644 --- a/service.go +++ b/service.go @@ -110,7 +110,7 @@ func (bs *BaseService) Stop() bool { return true } else { if bs.log != nil { - bs.log.Notice(Fmt("Not stopping %v", bs.name), "impl", bs.impl) + bs.log.Notice(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) } return false } From 02022e356aab724d17b8cc2e4e035ef49b6099af Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Feb 2016 16:19:46 -0500 Subject: [PATCH 037/515] make ignore already stopped service Debug --- service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/service.go b/service.go index 1d2303247..7ecb1687c 100644 --- a/service.go +++ b/service.go @@ -110,7 +110,7 @@ func (bs *BaseService) Stop() bool { return true } else { if bs.log != nil { - bs.log.Notice(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + bs.log.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) } return false } From 9364accf1fcf5a2e7c1a4ee6a6d1b301516ed594 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 3 Feb 2016 02:04:00 -0500 Subject: [PATCH 038/515] service: start/stop logs are info, ignored are debug --- service.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/service.go b/service.go index 7ecb1687c..0ea69e231 100644 --- a/service.go +++ b/service.go @@ -84,14 +84,14 @@ func (bs *BaseService) Start() (bool, error) { return false, nil } else { if bs.log != nil { - bs.log.Notice(Fmt("Starting %v", bs.name), "impl", bs.impl) + bs.log.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) } } err := bs.impl.OnStart() return true, err } else { if bs.log != nil { - bs.log.Info(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) + bs.log.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) } return false, nil } @@ -104,7 +104,7 @@ func (bs *BaseService) OnStart() error { return nil } func (bs *BaseService) Stop() bool { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { if bs.log != nil { - bs.log.Notice(Fmt("Stopping %v", bs.name), "impl", bs.impl) + bs.log.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) } bs.impl.OnStop() return true From 4901b71ade2b834ca0f4c2ca69edb96792dca05b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 5 Feb 2016 23:00:03 +0000 Subject: [PATCH 039/515] SetLogLevel --- log.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/log.go b/log.go index 828619f86..ac04b2032 100644 --- a/log.go +++ b/log.go @@ -15,14 +15,11 @@ func init() { Reset() } -// You might want to call this after resetting tendermint/go-config. -func Reset() { - - var logLevel string = "debug" - if config != nil { - logLevel = config.GetString("log_level") - } +func SetLogLevel(logLevel string) { + resetWithLogLevel(logLevel) +} +func resetWithLogLevel(logLevel string) { // main handler //handlers := []log15.Handler{} mainHandler = log15.LvlFilterHandler( @@ -42,6 +39,17 @@ func Reset() { log15.Root().SetHandler(mainHandler) } +// You might want to call this after resetting tendermint/go-config. +func Reset() { + + var logLevel string = "debug" + if config != nil { + logLevel = config.GetString("log_level") + } + + resetWithLogLevel(logLevel) +} + // See go-wire/log for an example of usage. func MainHandler() log15.Handler { return mainHandler From 1559ae1ac90c88b1373ff114c409399c5a1cedac Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 21 Feb 2016 00:14:08 -0800 Subject: [PATCH 040/515] Add BitArray.Bytes() --- bit_array.go | 15 +++++++++++++++ bit_array_test.go | 30 ++++++++++++++++++++++++++++++ int.go | 2 +- 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/bit_array.go b/bit_array.go index dc006f0eb..84f87fd06 100644 --- a/bit_array.go +++ b/bit_array.go @@ -1,6 +1,7 @@ package common import ( + "encoding/binary" "fmt" "math/rand" "strings" @@ -273,3 +274,17 @@ func (bA *BitArray) stringIndented(indent string) string { } return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent)) } + +func (bA *BitArray) Bytes() []byte { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + numBytes := (bA.Bits + 7) / 8 + bytes := make([]byte, numBytes) + for i := 0; i < len(bA.Elems); i++ { + elemBytes := [8]byte{} + binary.LittleEndian.PutUint64(elemBytes[:], bA.Elems[i]) + copy(bytes[i*8:], elemBytes[:]) + } + return bytes +} diff --git a/bit_array_test.go b/bit_array_test.go index 93274aab0..d8cbfb059 100644 --- a/bit_array_test.go +++ b/bit_array_test.go @@ -1,6 +1,7 @@ package common import ( + "bytes" "testing" ) @@ -118,3 +119,32 @@ func TestPickRandom(t *testing.T) { } } } + +func TestBytes(t *testing.T) { + bA := NewBitArray(4) + bA.SetIndex(0, true) + check := func(bA *BitArray, bz []byte) { + if !bytes.Equal(bA.Bytes(), bz) { + panic(Fmt("Expected %X but got %X", bz, bA.Bytes())) + } + } + check(bA, []byte{0x01}) + bA.SetIndex(3, true) + check(bA, []byte{0x09}) + + bA = NewBitArray(9) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + + bA = NewBitArray(16) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + bA.SetIndex(9, true) + check(bA, []byte{0x80, 0x03}) +} diff --git a/int.go b/int.go index 50e86a072..756e38cda 100644 --- a/int.go +++ b/int.go @@ -20,7 +20,7 @@ func SearchUint64s(a []uint64, x uint64) int { func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } -//----------------------------------------------------------------------------- +//-------------------------------------------------------------------------------- func PutUint64LE(dest []byte, i uint64) { binary.LittleEndian.PutUint64(dest, i) From 84391b36d3f5960e691c688d06b768708f0fa2f3 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 6 Mar 2016 12:31:22 -0800 Subject: [PATCH 041/515] Conform to new go-config default config behavior --- log.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/log.go b/log.go index ac04b2032..4cf854b38 100644 --- a/log.go +++ b/log.go @@ -43,7 +43,7 @@ func resetWithLogLevel(logLevel string) { func Reset() { var logLevel string = "debug" - if config != nil { + if config != nil && config.IsSet("log_level") { logLevel = config.GetString("log_level") } From 461c3b9785b8911034b6a7da8fcf4c840b651df8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 13 Mar 2016 09:57:10 -0700 Subject: [PATCH 042/515] Remove CRand* from go-common --- random.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/random.go b/random.go index 645601154..73bd16356 100644 --- a/random.go +++ b/random.go @@ -2,7 +2,6 @@ package common import ( crand "crypto/rand" - "encoding/hex" "math/rand" "time" ) @@ -12,8 +11,7 @@ const ( ) func init() { - // Seed math/rand with "secure" int64 - b := CRandBytes(8) + b := cRandBytes(8) var seed uint64 for i := 0; i < 8; i++ { seed |= uint64(b[i]) @@ -127,10 +125,10 @@ func RandBytes(n int) []byte { return bs } -//----------------------------------------------------------------------------- -// CRand* methods are crypto safe. - -func CRandBytes(numBytes int) []byte { +// NOTE: This relies on the os's random number generator. +// For real security, we should salt that with some seed. +// See github.com/tendermint/go-crypto for a more secure reader. +func cRandBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) if err != nil { @@ -138,8 +136,3 @@ func CRandBytes(numBytes int) []byte { } return b } - -// RandHex(24) gives 96 bits of randomness, strong enough for most purposes. -func CRandHex(numDigits int) string { - return hex.EncodeToString(CRandBytes(numDigits / 2)) -} From dcfa46af1341d03b80d32e4901019d1668b978b9 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 18 Mar 2016 02:00:15 -0400 Subject: [PATCH 043/515] BitArray.IsEmpty() --- bit_array.go | 14 ++++++++++++++ bit_array_test.go | 16 ++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/bit_array.go b/bit_array.go index 84f87fd06..78cf840f3 100644 --- a/bit_array.go +++ b/bit_array.go @@ -167,6 +167,20 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { } } +func (bA *BitArray) IsEmpty() bool { + if bA == nil { + return true // should this be opposite? + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + for _, e := range bA.Elems { + if e > 0 { + return false + } + } + return true +} + func (bA *BitArray) IsFull() bool { if bA == nil { return true diff --git a/bit_array_test.go b/bit_array_test.go index d8cbfb059..1c72882c7 100644 --- a/bit_array_test.go +++ b/bit_array_test.go @@ -148,3 +148,19 @@ func TestBytes(t *testing.T) { bA.SetIndex(9, true) check(bA, []byte{0x80, 0x03}) } + +func TestEmptyFull(t *testing.T) { + ns := []int{47, 123} + for _, n := range ns { + bA := NewBitArray(n) + if !bA.IsEmpty() { + t.Fatal("Expected bit array to be empty") + } + for i := 0; i < n; i++ { + bA.SetIndex(i, true) + } + if !bA.IsFull() { + t.Fatal("Expected bit array to be full") + } + } +} From 31fdd21c7eaeed53e0ea7ca597fb1e960e2988a5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 8 May 2016 15:00:13 -0700 Subject: [PATCH 044/515] No global config --- config.go | 13 ------------- db.go | 16 ++++------------ 2 files changed, 4 insertions(+), 25 deletions(-) delete mode 100644 config.go diff --git a/config.go b/config.go deleted file mode 100644 index da66c2158..000000000 --- a/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package db - -import ( - cfg "github.com/tendermint/go-config" -) - -var config cfg.Config = nil - -func init() { - cfg.OnConfig(func(newConfig cfg.Config) { - config = newConfig - }) -} diff --git a/db.go b/db.go index 2d9c3d2b1..6bb1efae7 100644 --- a/db.go +++ b/db.go @@ -24,27 +24,19 @@ type DB interface { const DBBackendMemDB = "memdb" const DBBackendLevelDB = "leveldb" -var dbs = NewCMap() - -func GetDB(name string) DB { - db := dbs.Get(name) - if db != nil { - return db.(DB) - } - switch config.GetString("db_backend") { +func NewDB(name string, backend string, dir string) DB { + switch backend { case DBBackendMemDB: db := NewMemDB() - dbs.Set(name, db) return db case DBBackendLevelDB: - db, err := NewLevelDB(path.Join(config.GetString("db_dir"), name+".db")) + db, err := NewLevelDB(path.Join(dir, name+".db")) if err != nil { PanicCrisis(err) } - dbs.Set(name, db) return db default: - PanicSanity(Fmt("Unknown DB backend: %v", config.GetString("db_backend"))) + PanicSanity(Fmt("Unknown DB backend: %v", backend)) } return nil } From 529efe50eab1a8a9c111d55f4de4ecd95f482761 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 8 May 2016 15:00:33 -0700 Subject: [PATCH 045/515] No global config --- config.go | 14 -------------- log.go | 17 +---------------- 2 files changed, 1 insertion(+), 30 deletions(-) delete mode 100644 config.go diff --git a/config.go b/config.go deleted file mode 100644 index 4083152a0..000000000 --- a/config.go +++ /dev/null @@ -1,14 +0,0 @@ -package logger - -import ( - cfg "github.com/tendermint/go-config" -) - -var config cfg.Config = nil - -func init() { - cfg.OnConfig(func(newConfig cfg.Config) { - config = newConfig - Reset() // reset log root upon config change. - }) -} diff --git a/log.go b/log.go index 4cf854b38..5387d4cac 100644 --- a/log.go +++ b/log.go @@ -7,12 +7,11 @@ import ( "github.com/tendermint/log15" ) -//var rootHandler log15.Handler var mainHandler log15.Handler var bypassHandler log15.Handler func init() { - Reset() + resetWithLogLevel("debug") } func SetLogLevel(logLevel string) { @@ -32,24 +31,10 @@ func resetWithLogLevel(logLevel string) { bypassHandler = log15.StreamHandler(os.Stdout, log15.TerminalFormat()) //handlers = append(handlers, bypassHandler) - // Set rootHandler. - //rootHandler = log15.MultiHandler(handlers...) - // By setting handlers on the root, we handle events from all loggers. log15.Root().SetHandler(mainHandler) } -// You might want to call this after resetting tendermint/go-config. -func Reset() { - - var logLevel string = "debug" - if config != nil && config.IsSet("log_level") { - logLevel = config.GetString("log_level") - } - - resetWithLogLevel(logLevel) -} - // See go-wire/log for an example of usage. func MainHandler() log15.Handler { return mainHandler From acabc4d3bbb19800fd850e6f7a4bba8ebe21f3fc Mon Sep 17 00:00:00 2001 From: Benjamin Bollen Date: Fri, 17 Jun 2016 02:02:25 +0200 Subject: [PATCH 046/515] Introduce events_test.go with five unit tests asserting normal behaviour and a sixth unit test to probe race conditions on RemoveListener. --- events_test.go | 335 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 335 insertions(+) create mode 100644 events_test.go diff --git a/events_test.go b/events_test.go new file mode 100644 index 000000000..ecfe6068c --- /dev/null +++ b/events_test.go @@ -0,0 +1,335 @@ +package events + +import ( + "fmt" + "math/rand" + "testing" + "time" +) + +// TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single +// listener to an event, and sends a string "data". +func TestAddListenerForEventFireOnce(t *testing.T) { + evsw := NewEventSwitch() + started, err := evsw.Start() + if started == false || err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + messages := make(chan EventData) + evsw.AddListenerForEvent("listener", "event", + func(data EventData) { + messages <- data + }) + go evsw.FireEvent("event", "data") + received := <-messages + if received != "data" { + t.Errorf("Message received does not match: %v", received) + } +} + +// TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single +// listener to an event, and sends a thousand integers. +func TestAddListenerForEventFireMany(t *testing.T) { + evsw := NewEventSwitch() + started, err := evsw.Start() + if started == false || err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum := make(chan uint64) + doneSending := make(chan uint64) + numbers := make(chan uint64, 4) + // subscribe one listener for one event + evsw.AddListenerForEvent("listener", "event", + func(data EventData) { + numbers <- data.(uint64) + }) + // collect received events + go sumReceivedNumbers(numbers, doneSum) + // go fire events + go fireEvents(evsw, "event", doneSending, uint64(1)) + checkSum := <-doneSending + close(numbers) + eventSum := <-doneSum + if checkSum != eventSum { + t.Errorf("Not all messages sent were received.\n") + } +} + +// TestAddListenerForDifferentEvents sets up an EventSwitch, subscribes a single +// listener to three different events and sends a thousand integers for each +// of the three events. +func TestAddListenerForDifferentEvents(t *testing.T) { + evsw := NewEventSwitch() + started, err := evsw.Start() + if started == false || err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers := make(chan uint64, 4) + // subscribe one listener to three events + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + numbers <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + numbers <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event3", + func(data EventData) { + numbers <- data.(uint64) + }) + // collect received events + go sumReceivedNumbers(numbers, doneSum) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(evsw, "event2", doneSending2, uint64(1)) + go fireEvents(evsw, "event3", doneSending3, uint64(1)) + var checkSum uint64 = 0 + checkSum += <-doneSending1 + checkSum += <-doneSending2 + checkSum += <-doneSending3 + close(numbers) + eventSum := <-doneSum + if checkSum != eventSum { + t.Errorf("Not all messages sent were received.\n") + } +} + +// TestAddDifferentListenerForDifferentEvents sets up an EventSwitch, +// subscribes a first listener to three events, and subscribes a second +// listener to two of those three events, and then sends a thousand integers +// for each of the three events. +func TestAddDifferentListenerForDifferentEvents(t *testing.T) { + evsw := NewEventSwitch() + started, err := evsw.Start() + if started == false || err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for listener1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for listener2 + go sumReceivedNumbers(numbers2, doneSum2) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + checkSumEvent1 := <-doneSending1 + checkSumEvent2 := <-doneSending2 + checkSumEvent3 := <-doneSending3 + checkSum1 := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 + checkSum2 := checkSumEvent2 + checkSumEvent3 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSum1 != eventSum1 || + checkSum2 != eventSum2 { + t.Errorf("Not all messages sent were received for different listeners to different events.\n") + } +} + +// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to +// two events, fires a thousand integers for the first event, then unsubscribes +// the listener and fires a thousand integers for the second event. +func TestAddAndRemoveListener(t *testing.T) { + evsw := NewEventSwitch() + started, err := evsw.Start() + if started == false || err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for event1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for event2 + go sumReceivedNumbers(numbers2, doneSum2) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + checkSumEvent1 := <-doneSending1 + // after sending all event1, unsubscribe for all events + evsw.RemoveListener("listener") + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + checkSumEvent2 := <-doneSending2 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSumEvent1 != eventSum1 || + // correct value asserted by preceding tests, suffices to be non-zero + checkSumEvent2 == uint64(0) || + eventSum2 != uint64(0) { + t.Errorf("Not all messages sent were received or unsubscription did not register.\n") + } +} + +// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two +// listeners to three events, and fires a thousand integers for each event. +// These two listeners serve as the baseline validation while other listeners +// are randomly subscribed and unsubscribed. +// More precisely it randomly subscribes new listeners (different from the first +// two listeners) to one of these three events. At the same time it starts +// randomly unsubscribing these additional listeners from all events they are +// at that point subscribed to. +// NOTE: it is important to run this test with race conditions tracking on, +// `go test -race`, to examine for possible race conditions. +func TestRemoveListenersAsync(t *testing.T) { + evsw := NewEventSwitch() + started, err := evsw.Start() + if started == false || err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event1", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for event1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for event2 + go sumReceivedNumbers(numbers2, doneSum2) + addListenersStress := func() { + s1 := rand.NewSource(time.Now().UnixNano()) + r1 := rand.New(s1) + for k := uint16(0); k < 400; k++ { + listenerNumber := r1.Intn(100) + 3 + eventNumber := r1.Intn(3) + 1 + go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), + fmt.Sprintf("event%v", eventNumber), + func(_ EventData) {}) + } + } + removeListenersStress := func() { + s2 := rand.NewSource(time.Now().UnixNano()) + r2 := rand.New(s2) + for k := uint16(0); k < 80; k++ { + listenerNumber := r2.Intn(100) + 3 + go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber)) + } + } + addListenersStress() + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + removeListenersStress() + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + checkSumEvent1 := <-doneSending1 + checkSumEvent2 := <-doneSending2 + checkSumEvent3 := <-doneSending3 + checkSum := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSum != eventSum1 || + checkSum != eventSum2 { + t.Errorf("Not all messages sent were received.\n") + } +} + +//------------------------------------------------------------------------------ +// Helper functions + +// sumReceivedNumbers takes two channels and adds all numbers received +// until the receiving channel `numbers` is closed; it then sends the sum +// on `doneSum` and closes that channel. Expected to be run in a go-routine. +func sumReceivedNumbers(numbers, doneSum chan uint64) { + var sum uint64 = 0 + for { + j, more := <-numbers + sum += j + if !more { + doneSum <- sum + close(doneSum) + return + } + } +} + +// fireEvents takes an EventSwitch and fires a thousand integers under +// a given `event` with the integers mootonically increasing from `offset` +// to `offset` + 999. It additionally returns the addition of all integers +// sent on `doneChan` for assertion that all events have been sent, and enabling +// the test to assert all events have also been received. +func fireEvents(evsw *EventSwitch, event string, doneChan chan uint64, + offset uint64) { + var sentSum uint64 = 0 + for i := offset; i <= offset+uint64(999); i++ { + sentSum += i + evsw.FireEvent(event, i) + } + doneChan <- sentSum + close(doneChan) + return +} From c15bcd487f65464eb7dd3be550d4e65682d8998d Mon Sep 17 00:00:00 2001 From: Benjamin Bollen Date: Fri, 17 Jun 2016 13:47:15 +0200 Subject: [PATCH 047/515] fixes #1; events.go: claim full lock on delete listener --- events.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/events.go b/events.go index 04c9a9b2e..4c8604a25 100644 --- a/events.go +++ b/events.go @@ -74,13 +74,15 @@ func (evsw *EventSwitch) RemoveListener(listenerID string) { // Get and remove listener evsw.mtx.RLock() listener := evsw.listeners[listenerID] - delete(evsw.listeners, listenerID) evsw.mtx.RUnlock() - if listener == nil { return } + evsw.mtx.Lock() + delete(evsw.listeners, listenerID) + evsw.mtx.Unlock() + // Remove callback for each event. listener.SetRemoved() for _, event := range listener.GetEvents() { From cefb3a45c0bf3c493a04e9bcd9b1540528be59f2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 26 Jun 2016 00:45:56 -0400 Subject: [PATCH 048/515] expose LvlNotice --- log.go | 1 + 1 file changed, 1 insertion(+) diff --git a/log.go b/log.go index 5387d4cac..07c1e6a49 100644 --- a/log.go +++ b/log.go @@ -72,5 +72,6 @@ func getLevel(lvlString string) log15.Lvl { var LvlFilterHandler = log15.LvlFilterHandler var LvlDebug = log15.LvlDebug var LvlInfo = log15.LvlInfo +var LvlNotice = log15.LvlNotice var LvlWarn = log15.LvlWarn var LvlError = log15.LvlError From 3dabf304a16e42ce2db6a456ef508a75cdf17679 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 21 Jun 2016 14:35:00 -0400 Subject: [PATCH 049/515] fix race condition in repeat_timer --- repeat_timer.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/repeat_timer.go b/repeat_timer.go index e2aa18ea8..f027af3f3 100644 --- a/repeat_timer.go +++ b/repeat_timer.go @@ -15,6 +15,7 @@ type RepeatTimer struct { name string ticker *time.Ticker quit chan struct{} + done chan struct{} dur time.Duration } @@ -23,6 +24,7 @@ func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { Ch: make(chan time.Time), ticker: time.NewTicker(dur), quit: make(chan struct{}), + done: make(chan struct{}), name: name, dur: dur, } @@ -36,6 +38,8 @@ func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { case t_ := <-ticker.C: t.Ch <- t_ case <-t.quit: + // needed so we know when we can reset t.quit + t.done <- struct{}{} return } } @@ -64,9 +68,10 @@ func (t *RepeatTimer) Stop() bool { exists := t.ticker != nil if exists { - t.ticker.Stop() - t.ticker = nil + t.ticker.Stop() // does not close the channel close(t.quit) + <-t.done + t.ticker = nil } return exists } From b0c0631468391008440ad9b6d7611e3f04f09ed8 Mon Sep 17 00:00:00 2001 From: Silas Davis Date: Wed, 13 Jul 2016 18:50:06 +0100 Subject: [PATCH 050/515] Spelling --- errors.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/errors.go b/errors.go index e168a75b7..3a1b09542 100644 --- a/errors.go +++ b/errors.go @@ -23,23 +23,23 @@ func (se StackError) Error() string { // A panic resulting from a sanity check means there is a programmer error // and some gaurantee is not satisfied. func PanicSanity(v interface{}) { - panic(Fmt("Paniced on a Sanity Check: %v", v)) + panic(Fmt("Panicked on a Sanity Check: %v", v)) } // A panic here means something has gone horribly wrong, in the form of data corruption or // failure of the operating system. In a correct/healthy system, these should never fire. // If they do, it's indicative of a much more serious problem. func PanicCrisis(v interface{}) { - panic(Fmt("Paniced on a Crisis: %v", v)) + panic(Fmt("Panicked on a Crisis: %v", v)) } // Indicates a failure of consensus. Someone was malicious or something has // gone horribly wrong. These should really boot us into an "emergency-recover" mode func PanicConsensus(v interface{}) { - panic(Fmt("Paniced on a Consensus Failure: %v", v)) + panic(Fmt("Panicked on a Consensus Failure: %v", v)) } // For those times when we're not sure if we should panic func PanicQ(v interface{}) { - panic(Fmt("Paniced questionably: %v", v)) + panic(Fmt("Panicked questionably: %v", v)) } From 3baa390bbaf7634251c42ad69a8682e7e3990552 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 23 Jul 2016 11:23:29 -0400 Subject: [PATCH 051/515] reduce numTimes in test --- clist_test.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/clist_test.go b/clist_test.go index d5a3f8ed7..ab5cf4b26 100644 --- a/clist_test.go +++ b/clist_test.go @@ -4,6 +4,7 @@ import ( "fmt" "math/rand" "runtime" + "sync/atomic" "testing" "time" ) @@ -54,11 +55,11 @@ func TestSmall(t *testing.T) { This test is quite hacky because it relies on SetFinalizer which isn't guaranteed to run at all. */ -func TestGCFifo(t *testing.T) { +func _TestGCFifo(t *testing.T) { const numElements = 1000000 l := New() - gcCount := 0 + gcCount := new(uint64) // SetFinalizer doesn't work well with circular structures, // so we construct a trivial non-circular structure to @@ -66,13 +67,14 @@ func TestGCFifo(t *testing.T) { type value struct { Int int } + done := make(chan struct{}) for i := 0; i < numElements; i++ { v := new(value) v.Int = i l.PushBack(v) runtime.SetFinalizer(v, func(v *value) { - gcCount += 1 + atomic.AddUint64(gcCount, 1) }) } @@ -86,10 +88,13 @@ func TestGCFifo(t *testing.T) { runtime.GC() time.Sleep(time.Second * 3) + runtime.GC() + time.Sleep(time.Second * 3) + _ = done - if gcCount != numElements { + if *gcCount != numElements { t.Errorf("Expected gcCount to be %v, got %v", numElements, - gcCount) + *gcCount) } } @@ -97,7 +102,7 @@ func TestGCFifo(t *testing.T) { This test is quite hacky because it relies on SetFinalizer which isn't guaranteed to run at all. */ -func TestGCRandom(t *testing.T) { +func _TestGCRandom(t *testing.T) { const numElements = 1000000 l := New() @@ -142,7 +147,7 @@ func TestGCRandom(t *testing.T) { func TestScanRightDeleteRandom(t *testing.T) { const numElements = 10000 - const numTimes = 10000000 + const numTimes = 100000 const numScanners = 10 l := New() From 9dc4dc19609262c2d8d0ad74b445b2eac69c8a60 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 5 Sep 2016 18:26:43 -0700 Subject: [PATCH 052/515] Add BitArray.Update() --- bit_array.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/bit_array.go b/bit_array.go index 78cf840f3..5590fe61b 100644 --- a/bit_array.go +++ b/bit_array.go @@ -302,3 +302,16 @@ func (bA *BitArray) Bytes() []byte { } return bytes } + +// NOTE: other bitarray o is not locked when reading, +// so if necessary, caller must copy or lock o prior to calling Update. +// If bA is nil, does nothing. +func (bA *BitArray) Update(o *BitArray) { + if bA == nil { + return + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + + copy(bA.Elems, o.Elems) +} From 930880f5742d927ccd6fa1d751a5e449d97f8d17 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 11 Jul 2016 22:17:09 -0400 Subject: [PATCH 053/515] throttle_timer: fix race, use mtx instead of atomic --- throttle_timer.go | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/throttle_timer.go b/throttle_timer.go index b19896d5d..0966e913c 100644 --- a/throttle_timer.go +++ b/throttle_timer.go @@ -1,7 +1,7 @@ package common import ( - "sync/atomic" + "sync" "time" ) @@ -12,12 +12,14 @@ If a long continuous burst of .Set() calls happens, ThrottleTimer fires at most once every "dur". */ type ThrottleTimer struct { - Name string - Ch chan struct{} - quit chan struct{} - dur time.Duration + Name string + Ch chan struct{} + quit chan struct{} + dur time.Duration + + mtx sync.Mutex timer *time.Timer - isSet uint32 + isSet bool } func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { @@ -30,9 +32,11 @@ func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { } func (t *ThrottleTimer) fireRoutine() { + t.mtx.Lock() + defer t.mtx.Unlock() select { case t.Ch <- struct{}{}: - atomic.StoreUint32(&t.isSet, 0) + t.isSet = false case <-t.quit: // do nothing default: @@ -41,13 +45,18 @@ func (t *ThrottleTimer) fireRoutine() { } func (t *ThrottleTimer) Set() { - if atomic.CompareAndSwapUint32(&t.isSet, 0, 1) { + t.mtx.Lock() + defer t.mtx.Unlock() + if !t.isSet { + t.isSet = true t.timer.Reset(t.dur) } } func (t *ThrottleTimer) Unset() { - atomic.StoreUint32(&t.isSet, 0) + t.mtx.Lock() + defer t.mtx.Unlock() + t.isSet = false t.timer.Stop() } @@ -58,5 +67,7 @@ func (t *ThrottleTimer) Stop() bool { return false } close(t.quit) + t.mtx.Lock() + defer t.mtx.Unlock() return t.timer.Stop() } From 1652dc8b3f7780079aa98c3ce20a83ee90b9758b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 10 Oct 2016 03:20:42 -0400 Subject: [PATCH 054/515] EventSwitch is an interface --- events.go | 37 +++++++++++++++++++++++-------------- events_test.go | 2 +- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/events.go b/events.go index 4c8604a25..9d4f2fa2e 100644 --- a/events.go +++ b/events.go @@ -15,7 +15,7 @@ type EventData interface { // reactors and other modules should export // this interface to become eventable type Eventable interface { - SetEventSwitch(evsw *EventSwitch) + SetEventSwitch(evsw EventSwitch) } // an event switch or cache implements fireable @@ -23,7 +23,16 @@ type Fireable interface { FireEvent(event string, data EventData) } -type EventSwitch struct { +type EventSwitch interface { + Service + Fireable + + AddListenerForEvent(listenerID, event string, cb EventCallback) + RemoveListenerForEvent(event string, listenerID string) + RemoveListener(listenerID string) +} + +type eventSwitch struct { BaseService mtx sync.RWMutex @@ -31,26 +40,26 @@ type EventSwitch struct { listeners map[string]*eventListener } -func NewEventSwitch() *EventSwitch { - evsw := &EventSwitch{} +func NewEventSwitch() EventSwitch { + evsw := &eventSwitch{} evsw.BaseService = *NewBaseService(log, "EventSwitch", evsw) return evsw } -func (evsw *EventSwitch) OnStart() error { +func (evsw *eventSwitch) OnStart() error { evsw.BaseService.OnStart() evsw.eventCells = make(map[string]*eventCell) evsw.listeners = make(map[string]*eventListener) return nil } -func (evsw *EventSwitch) OnStop() { +func (evsw *eventSwitch) OnStop() { evsw.BaseService.OnStop() evsw.eventCells = nil evsw.listeners = nil } -func (evsw *EventSwitch) AddListenerForEvent(listenerID, event string, cb eventCallback) { +func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) { // Get/Create eventCell and listener evsw.mtx.Lock() eventCell := evsw.eventCells[event] @@ -70,7 +79,7 @@ func (evsw *EventSwitch) AddListenerForEvent(listenerID, event string, cb eventC listener.AddEvent(event) } -func (evsw *EventSwitch) RemoveListener(listenerID string) { +func (evsw *eventSwitch) RemoveListener(listenerID string) { // Get and remove listener evsw.mtx.RLock() listener := evsw.listeners[listenerID] @@ -90,7 +99,7 @@ func (evsw *EventSwitch) RemoveListener(listenerID string) { } } -func (evsw *EventSwitch) RemoveListenerForEvent(event string, listenerID string) { +func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) { // Get eventCell evsw.mtx.Lock() eventCell := evsw.eventCells[event] @@ -116,7 +125,7 @@ func (evsw *EventSwitch) RemoveListenerForEvent(event string, listenerID string) } } -func (evsw *EventSwitch) FireEvent(event string, data EventData) { +func (evsw *eventSwitch) FireEvent(event string, data EventData) { // Get the eventCell evsw.mtx.RLock() eventCell := evsw.eventCells[event] @@ -135,16 +144,16 @@ func (evsw *EventSwitch) FireEvent(event string, data EventData) { // eventCell handles keeping track of listener callbacks for a given event. type eventCell struct { mtx sync.RWMutex - listeners map[string]eventCallback + listeners map[string]EventCallback } func newEventCell() *eventCell { return &eventCell{ - listeners: make(map[string]eventCallback), + listeners: make(map[string]EventCallback), } } -func (cell *eventCell) AddListener(listenerID string, cb eventCallback) { +func (cell *eventCell) AddListener(listenerID string, cb EventCallback) { cell.mtx.Lock() cell.listeners[listenerID] = cb cell.mtx.Unlock() @@ -168,7 +177,7 @@ func (cell *eventCell) FireEvent(data EventData) { //----------------------------------------------------------------------------- -type eventCallback func(data EventData) +type EventCallback func(data EventData) type eventListener struct { id string diff --git a/events_test.go b/events_test.go index ecfe6068c..8766a57d2 100644 --- a/events_test.go +++ b/events_test.go @@ -322,7 +322,7 @@ func sumReceivedNumbers(numbers, doneSum chan uint64) { // to `offset` + 999. It additionally returns the addition of all integers // sent on `doneChan` for assertion that all events have been sent, and enabling // the test to assert all events have also been received. -func fireEvents(evsw *EventSwitch, event string, doneChan chan uint64, +func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, offset uint64) { var sentSum uint64 = 0 for i := offset; i <= offset+uint64(999); i++ { From 2e424ee663147cc9e731413641d185aa81dbefa7 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 11 Aug 2016 00:04:07 -0400 Subject: [PATCH 055/515] service: Reset() for restarts --- service.go | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/service.go b/service.go index 0ea69e231..86ef20ead 100644 --- a/service.go +++ b/service.go @@ -1,12 +1,13 @@ /* Classical-inheritance-style service declarations. -Services can be started, then stopped. +Services can be started, then stopped, then optionally restarted. Users can override the OnStart/OnStop methods. -These methods are guaranteed to be called at most once. +By default, these methods are guaranteed to be called at most once. +A call to Reset will panic, unless OnReset is overwritten, allowing OnStart/OnStop to be called again. Caller must ensure that Start() and Stop() are not called concurrently. It is ok to call Stop() without calling Start() first. -Services cannot be re-started unless otherwise documented. +Services cannot be re-started unless OnReset is overwritten to allow it. Typical usage: @@ -51,6 +52,9 @@ type Service interface { Stop() bool OnStop() + Reset() (bool, error) + OnReset() error + IsRunning() bool String() string @@ -119,6 +123,29 @@ func (bs *BaseService) Stop() bool { // Implements Service func (bs *BaseService) OnStop() {} +// Implements Service +func (bs *BaseService) Reset() (bool, error) { + if atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { + // whether or not we've started, we can reset + atomic.CompareAndSwapUint32(&bs.started, 1, 0) + + return true, bs.impl.OnReset() + } else { + if bs.log != nil { + bs.log.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) + } + return false, nil + } + // never happens + return false, nil +} + +// Implements Service +func (bs *BaseService) OnReset() error { + PanicSanity("The service cannot be reset") + return nil +} + // Implements Service func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 From 1a24e6e237a28354459281fe6a96419ed9e3e4fe Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 17 Oct 2016 16:15:57 -0700 Subject: [PATCH 056/515] Let SIGHUP close AutoFiles --- os.go | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++---- os_test.go | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+), 4 deletions(-) create mode 100644 os_test.go diff --git a/os.go b/os.go index 1d0b538d2..a273bec48 100644 --- a/os.go +++ b/os.go @@ -8,6 +8,7 @@ import ( "os/signal" "strings" "sync" + "syscall" "time" ) @@ -15,6 +16,10 @@ var ( GoPath = os.Getenv("GOPATH") ) +func init() { + initAFSIGHUPWatcher() +} + func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) @@ -134,6 +139,7 @@ const autoFileOpenDuration = 1000 * time.Millisecond // Automatically closes and re-opens file for writing. // This is useful for using a log file with the logrotate tool. type AutoFile struct { + ID string Path string ticker *time.Ticker mtx sync.Mutex @@ -142,6 +148,7 @@ type AutoFile struct { func OpenAutoFile(path string) (af *AutoFile, err error) { af = &AutoFile{ + ID: RandStr(12) + ":" + path, Path: path, ticker: time.NewTicker(autoFileOpenDuration), } @@ -149,14 +156,14 @@ func OpenAutoFile(path string) (af *AutoFile, err error) { return } go af.processTicks() + autoFileWatchers.addAutoFile(af) return } func (af *AutoFile) Close() error { af.ticker.Stop() - af.mtx.Lock() err := af.closeFile() - af.mtx.Unlock() + autoFileWatchers.removeAutoFile(af) return err } @@ -166,13 +173,14 @@ func (af *AutoFile) processTicks() { if !ok { return // Done. } - af.mtx.Lock() af.closeFile() - af.mtx.Unlock() } } func (af *AutoFile) closeFile() (err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + file := af.file if file == nil { return nil @@ -201,6 +209,56 @@ func (af *AutoFile) openFile() error { return nil } +//-------------------------------------------------------------------------------- + +var autoFileWatchers *afSIGHUPWatcher + +func initAFSIGHUPWatcher() { + autoFileWatchers = newAFSIGHUPWatcher() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + go func() { + for _ = range c { + autoFileWatchers.closeAll() + } + }() +} + +type afSIGHUPWatcher struct { + mtx sync.Mutex + autoFiles map[string]*AutoFile +} + +func newAFSIGHUPWatcher() *afSIGHUPWatcher { + return &afSIGHUPWatcher{ + autoFiles: make(map[string]*AutoFile, 10), + } +} + +func (afw *afSIGHUPWatcher) addAutoFile(af *AutoFile) { + afw.mtx.Lock() + afw.autoFiles[af.ID] = af + afw.mtx.Unlock() +} + +func (afw *afSIGHUPWatcher) removeAutoFile(af *AutoFile) { + afw.mtx.Lock() + delete(afw.autoFiles, af.ID) + afw.mtx.Unlock() +} + +func (afw *afSIGHUPWatcher) closeAll() { + afw.mtx.Lock() + for _, af := range afw.autoFiles { + af.closeFile() + } + afw.mtx.Unlock() +} + +//-------------------------------------------------------------------------------- + func Tempfile(prefix string) (*os.File, string) { file, err := ioutil.TempFile("", prefix) if err != nil { @@ -209,6 +267,8 @@ func Tempfile(prefix string) (*os.File, string) { return file, file.Name() } +//-------------------------------------------------------------------------------- + func Prompt(prompt string, defaultValue string) (string, error) { fmt.Print(prompt) reader := bufio.NewReader(os.Stdin) diff --git a/os_test.go b/os_test.go new file mode 100644 index 000000000..c0effdc2b --- /dev/null +++ b/os_test.go @@ -0,0 +1,64 @@ +package common + +import ( + "os" + "syscall" + "testing" +) + +func TestSIGHUP(t *testing.T) { + + // First, create an AutoFile writing to a tempfile dir + file, name := Tempfile("sighup_test") + err := file.Close() + if err != nil { + t.Fatalf("Error creating tempfile: %v", err) + } + // Here is the actual AutoFile + af, err := OpenAutoFile(name) + if err != nil { + t.Fatalf("Error creating autofile: %v", err) + } + + // Write to the file. + _, err = af.Write([]byte("Line 1\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 2\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + + // Send SIGHUP to self. + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + + // Move the file over + err = os.Rename(name, name+"_old") + if err != nil { + t.Fatalf("Error moving autofile: %v", err) + } + + // Write more to the file. + _, err = af.Write([]byte("Line 3\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 4\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + err = af.Close() + if err != nil { + t.Fatalf("Error closing autofile") + } + + // Both files should exist + if body := MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { + t.Errorf("Unexpected body %s", body) + } + if body := MustReadFile(name); string(body) != "Line 3\nLine 4\n" { + t.Errorf("Unexpected body %s", body) + } + +} From 28b3d52948b1590be9e3d2e456780d5e41929aca Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 26 Oct 2016 16:22:43 -0700 Subject: [PATCH 057/515] first commit --- README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..23799200c --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# go-autofile From 1859c4d5fe2a0cbb0071b010ef8f604bb397feca Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 26 Oct 2016 16:23:19 -0700 Subject: [PATCH 058/515] First commit --- .gitignore | 2 + autofile.go | 116 ++++++++++++++ autofile_test.go | 73 +++++++++ group.go | 396 ++++++++++++++++++++++++++++++++++++++++++++++ group_test.go | 110 +++++++++++++ sighup_watcher.go | 63 ++++++++ 6 files changed, 760 insertions(+) create mode 100644 .gitignore create mode 100644 autofile.go create mode 100644 autofile_test.go create mode 100644 group.go create mode 100644 group_test.go create mode 100644 sighup_watcher.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..381931381 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +*.swp +*.swo diff --git a/autofile.go b/autofile.go new file mode 100644 index 000000000..ed9d549bf --- /dev/null +++ b/autofile.go @@ -0,0 +1,116 @@ +package autofile + +import ( + . "github.com/tendermint/go-common" + "os" + "sync" + "time" +) + +/* AutoFile usage + +// Create/Append to ./autofile_test +af, err := OpenAutoFile("autofile_test") +if err != nil { + panic(err) +} + +// Stream of writes. +// During this time, the file may be moved e.g. by logRotate. +for i := 0; i < 60; i++ { + af.Write([]byte(Fmt("LOOP(%v)", i))) + time.Sleep(time.Second) +} + +// Close the AutoFile +err = af.Close() +if err != nil { + panic(err) +} +*/ + +const autoFileOpenDuration = 1000 * time.Millisecond + +// Automatically closes and re-opens file for writing. +// This is useful for using a log file with the logrotate tool. +type AutoFile struct { + ID string + Path string + ticker *time.Ticker + mtx sync.Mutex + file *os.File +} + +func OpenAutoFile(path string) (af *AutoFile, err error) { + af = &AutoFile{ + ID: RandStr(12) + ":" + path, + Path: path, + ticker: time.NewTicker(autoFileOpenDuration), + } + if err = af.openFile(); err != nil { + return + } + go af.processTicks() + sighupWatchers.addAutoFile(af) + return +} + +func (af *AutoFile) Close() error { + af.ticker.Stop() + err := af.closeFile() + sighupWatchers.removeAutoFile(af) + return err +} + +func (af *AutoFile) processTicks() { + for { + _, ok := <-af.ticker.C + if !ok { + return // Done. + } + af.closeFile() + } +} + +func (af *AutoFile) closeFile() (err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + file := af.file + if file == nil { + return nil + } + af.file = nil + return file.Close() +} + +func (af *AutoFile) Write(b []byte) (n int, err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + if af.file == nil { + if err = af.openFile(); err != nil { + return + } + } + return af.file.Write(b) +} + +func (af *AutoFile) openFile() error { + file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + return err + } + af.file = file + return nil +} + +func (af *AutoFile) Size() (int64, error) { + af.mtx.Lock() + defer af.mtx.Unlock() + stat, err := af.file.Stat() + if err != nil { + return -1, err + } + return stat.Size(), nil + +} diff --git a/autofile_test.go b/autofile_test.go new file mode 100644 index 000000000..243125ca6 --- /dev/null +++ b/autofile_test.go @@ -0,0 +1,73 @@ +package autofile + +import ( + . "github.com/tendermint/go-common" + "os" + "sync/atomic" + "syscall" + "testing" + "time" +) + +func TestSIGHUP(t *testing.T) { + + // First, create an AutoFile writing to a tempfile dir + file, name := Tempfile("sighup_test") + err := file.Close() + if err != nil { + t.Fatalf("Error creating tempfile: %v", err) + } + // Here is the actual AutoFile + af, err := OpenAutoFile(name) + if err != nil { + t.Fatalf("Error creating autofile: %v", err) + } + + // Write to the file. + _, err = af.Write([]byte("Line 1\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 2\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + + // Move the file over + err = os.Rename(name, name+"_old") + if err != nil { + t.Fatalf("Error moving autofile: %v", err) + } + + // Send SIGHUP to self. + oldSighupCounter := atomic.LoadInt32(&sighupCounter) + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + + // Wait a bit... signals are not handled synchronously. + for atomic.LoadInt32(&sighupCounter) == oldSighupCounter { + time.Sleep(time.Millisecond * 10) + } + + // Write more to the file. + _, err = af.Write([]byte("Line 3\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 4\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + err = af.Close() + if err != nil { + t.Fatalf("Error closing autofile") + } + + // Both files should exist + if body := MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { + t.Errorf("Unexpected body %s", body) + } + if body := MustReadFile(name); string(body) != "Line 3\nLine 4\n" { + t.Errorf("Unexpected body %s", body) + } + +} diff --git a/group.go b/group.go new file mode 100644 index 000000000..c0d199e1d --- /dev/null +++ b/group.go @@ -0,0 +1,396 @@ +package autofile + +import ( + "bufio" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" +) + +/* +You can open a Group to keep restrictions on an AutoFile, like +the maximum size of each chunk, and/or the total amount of bytes +stored in the group. + +The Group can also be used to binary-search, and to read atomically +with respect to the Group's Head (the AutoFile being appended to) +*/ + +const groupCheckDuration = 1000 * time.Millisecond + +type Group struct { + ID string + Head *AutoFile // The head AutoFile to write to + Dir string // Directory that contains .Head + ticker *time.Ticker + mtx sync.Mutex + headSizeLimit int64 + totalSizeLimit int64 +} + +func OpenGroup(head *AutoFile) (g *Group, err error) { + dir := path.Dir(head.Path) + + g = &Group{ + ID: "group:" + head.ID, + Head: head, + Dir: dir, + ticker: time.NewTicker(groupCheckDuration), + } + go g.processTicks() + return +} + +func (g *Group) SetHeadSizeLimit(limit int64) { + g.mtx.Lock() + g.headSizeLimit = limit + g.mtx.Unlock() +} + +func (g *Group) HeadSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headSizeLimit +} + +func (g *Group) SetTotalSizeLimit(limit int64) { + g.mtx.Lock() + g.totalSizeLimit = limit + g.mtx.Unlock() +} + +func (g *Group) TotalSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.totalSizeLimit +} + +func (g *Group) Close() error { + g.ticker.Stop() + return nil +} + +func (g *Group) processTicks() { + for { + _, ok := <-g.ticker.C + if !ok { + return // Done. + } + // TODO Check head size limit + // TODO check total size limit + } +} + +// NOTE: for testing +func (g *Group) stopTicker() { + g.ticker.Stop() +} + +// NOTE: this function is called manually in tests. +func (g *Group) checkHeadSizeLimit() { + size, err := g.Head.Size() + if err != nil { + panic(err) + } + if size >= g.HeadSizeLimit() { + g.RotateFile() + } +} + +func (g *Group) checkTotalSizeLimit() { + // TODO enforce total size limit +} + +func (g *Group) RotateFile() { + g.mtx.Lock() + defer g.mtx.Unlock() + + gInfo := g.readGroupInfo() + dstPath := filePathForIndex(g.Head.Path, gInfo.MaxIndex+1) + err := os.Rename(g.Head.Path, dstPath) + if err != nil { + panic(err) + } + err = g.Head.closeFile() + if err != nil { + panic(err) + } +} + +func (g *Group) NewReader(index int) *GroupReader { + r := newGroupReader(g) + r.SetIndex(index) + return r +} + +// Returns -1 if line comes after, 0 if found, 1 if line comes before. +type SearchFunc func(line string) (int, error) + +// Searches for the right file in Group, +// then returns a GroupReader to start streaming lines +// CONTRACT: caller is responsible for closing GroupReader. +func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, error) { + gInfo := g.ReadGroupInfo() + minIndex, maxIndex := gInfo.MinIndex, gInfo.MaxIndex + curIndex := (minIndex + maxIndex + 1) / 2 + + for { + + // Base case, when there's only 1 choice left. + if minIndex == maxIndex { + r := g.NewReader(maxIndex) + err := scanUntil(r, prefix, cmp) + if err != nil { + r.Close() + return nil, err + } else { + return r, err + } + } + + // Read starting roughly at the middle file, + // until we find line that has prefix. + r := g.NewReader(curIndex) + foundIndex, line, err := scanFirst(r, prefix) + r.Close() + if err != nil { + return nil, err + } + + // Compare this line to our search query. + val, err := cmp(line) + if err != nil { + return nil, err + } + if val < 0 { + // Line will come later + minIndex = foundIndex + } else if val == 0 { + // Stroke of luck, found the line + r := g.NewReader(foundIndex) + err := scanUntil(r, prefix, cmp) + if err != nil { + r.Close() + return nil, err + } else { + return r, err + } + } else { + // We passed it + maxIndex = curIndex - 1 + } + } + +} + +// Scans and returns the first line that starts with 'prefix' +func scanFirst(r *GroupReader, prefix string) (int, string, error) { + for { + line, err := r.ReadLine() + if err != nil { + return 0, "", err + } + if !strings.HasPrefix(line, prefix) { + continue + } + index := r.CurIndex() + return index, line, nil + } +} + +func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) error { + for { + line, err := r.ReadLine() + if err != nil { + return err + } + if !strings.HasPrefix(line, prefix) { + continue + } + val, err := cmp(line) + if err != nil { + return err + } + if val < 0 { + continue + } else { + r.PushLine(line) + return nil + } + } +} + +type GroupInfo struct { + MinIndex int + MaxIndex int + TotalSize int64 + HeadSize int64 +} + +// Returns info after scanning all files in g.Head's dir +func (g *Group) ReadGroupInfo() GroupInfo { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.readGroupInfo() +} + +// CONTRACT: caller should have called g.mtx.Lock +func (g *Group) readGroupInfo() GroupInfo { + groupDir := filepath.Dir(g.Head.Path) + headBase := filepath.Base(g.Head.Path) + var minIndex, maxIndex int = -1, -1 + var totalSize, headSize int64 = 0, 0 + + dir, err := os.Open(groupDir) + if err != nil { + panic(err) + } + fiz, err := dir.Readdir(0) + if err != nil { + panic(err) + } + + // For each file in the directory, filter by pattern + for _, fileInfo := range fiz { + if fileInfo.Name() == headBase { + fileSize := fileInfo.Size() + totalSize += fileSize + headSize = fileSize + continue + } else if strings.HasPrefix(fileInfo.Name(), headBase) { + fileSize := fileInfo.Size() + totalSize += fileSize + indexedFilePattern := regexp.MustCompile(`^.+\.([0-9]{3,})$`) + submatch := indexedFilePattern.FindSubmatch([]byte(fileInfo.Name())) + if len(submatch) != 0 { + // Matches + fileIndex, err := strconv.Atoi(string(submatch[1])) + if err != nil { + panic(err) + } + if maxIndex < fileIndex { + maxIndex = fileIndex + } + if minIndex == -1 || fileIndex < minIndex { + minIndex = fileIndex + } + } + } + } + + return GroupInfo{minIndex, maxIndex, totalSize, headSize} +} + +func filePathForIndex(headPath string, index int) string { + return fmt.Sprintf("%v.%03d", headPath, index) +} + +//-------------------------------------------------------------------------------- + +type GroupReader struct { + *Group + mtx sync.Mutex + curIndex int + curFile *os.File + curReader *bufio.Reader + curLine []byte +} + +func newGroupReader(g *Group) *GroupReader { + return &GroupReader{ + Group: g, + curIndex: -1, + curFile: nil, + curReader: nil, + curLine: nil, + } +} + +func (g *GroupReader) ReadLine() (string, error) { + g.mtx.Lock() + defer g.mtx.Unlock() + + // From PushLine + if g.curLine != nil { + line := string(g.curLine) + g.curLine = nil + return line, nil + } + + // Open file if not open yet + if g.curReader == nil { + err := g.openFile(0) + if err != nil { + return "", err + } + } + + // Iterate over files until line is found + for { + bytes, err := g.curReader.ReadBytes('\n') + if err != nil { + if err != io.EOF { + return string(bytes), err + } else { + // Open the next file + err := g.openFile(g.curIndex + 1) + if err != nil { + return "", err + } + } + } + } +} + +// CONTRACT: caller should hold g.mtx +func (g *GroupReader) openFile(index int) error { + + // Lock on Group to ensure that head doesn't move in the meanwhile. + g.Group.mtx.Lock() + defer g.Group.mtx.Unlock() + + curFilePath := filePathForIndex(g.Head.Path, index) + curFile, err := os.Open(curFilePath) + if err != nil { + return err + } + curReader := bufio.NewReader(curFile) + + // Update g.cur* + g.curIndex = index + g.curFile = curFile + g.curReader = curReader + g.curLine = nil + return nil +} + +func (g *GroupReader) PushLine(line string) { + g.mtx.Lock() + defer g.mtx.Unlock() + + if g.curLine == nil { + g.curLine = []byte(line) + } else { + panic("PushLine failed, already have line") + } +} + +// Cursor's file index. +func (g *GroupReader) CurIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.curIndex +} + +func (g *GroupReader) SetIndex(index int) { + g.mtx.Lock() + defer g.mtx.Unlock() + g.openFile(index) +} diff --git a/group_test.go b/group_test.go new file mode 100644 index 000000000..8c1b7b6a8 --- /dev/null +++ b/group_test.go @@ -0,0 +1,110 @@ +package autofile + +import ( + "testing" + + . "github.com/tendermint/go-common" +) + +func createTestGroup(t *testing.T, headPath string) *Group { + autofile, err := OpenAutoFile(headPath) + if err != nil { + t.Fatal("Error opening AutoFile", headPath, err) + } + g, err := OpenGroup(autofile) + if err != nil { + t.Fatal("Error opening Group", err) + } + return g +} + +func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { + if gInfo.MinIndex != minIndex { + t.Errorf("GroupInfo MinIndex expected %v, got %v", minIndex, gInfo.MinIndex) + } + if gInfo.MaxIndex != maxIndex { + t.Errorf("GroupInfo MaxIndex expected %v, got %v", maxIndex, gInfo.MaxIndex) + } + if gInfo.TotalSize != totalSize { + t.Errorf("GroupInfo TotalSize expected %v, got %v", totalSize, gInfo.TotalSize) + } + if gInfo.HeadSize != headSize { + t.Errorf("GroupInfo HeadSize expected %v, got %v", headSize, gInfo.HeadSize) + } +} + +func TestCreateGroup(t *testing.T) { + testID := RandStr(12) + testDir := "_test_" + testID + err := EnsureDir(testDir, 0700) + if err != nil { + t.Fatal("Error creating dir", err) + } + + g := createTestGroup(t, testDir+"/myfile") + if g == nil { + t.Error("Failed to create Group") + } + g.SetHeadSizeLimit(1000 * 1000) + g.stopTicker() + + // At first, there are no files. + assertGroupInfo(t, g.ReadGroupInfo(), -1, -1, 0, 0) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + _, err := g.Head.Write([]byte(RandStr(999) + "\n")) + if err != nil { + t.Fatal("Error appending to head", err) + } + } + assertGroupInfo(t, g.ReadGroupInfo(), -1, -1, 999000, 999000) + + // Even calling checkHeadSizeLimit manually won't rotate it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), -1, -1, 999000, 999000) + + // Write 1000 more bytes. + _, err = g.Head.Write([]byte(RandStr(999) + "\n")) + if err != nil { + t.Fatal("Error appending to head", err) + } + + // Calling checkHeadSizeLimit this time rolls it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 1000000, 0) + + // Write 1000 more bytes. + _, err = g.Head.Write([]byte(RandStr(999) + "\n")) + if err != nil { + t.Fatal("Error appending to head", err) + } + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 1001000, 1000) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + _, err := g.Head.Write([]byte(RandStr(999) + "\n")) + if err != nil { + t.Fatal("Error appending to head", err) + } + } + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 2000000, 1000000) + + // Calling checkHeadSizeLimit rolls it again. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 0) + + // Write 1000 more bytes. + _, err = g.Head.Write([]byte(RandStr(999) + "\n")) + if err != nil { + t.Fatal("Error appending to head", err) + } + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2001000, 1000) + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2001000, 1000) +} diff --git a/sighup_watcher.go b/sighup_watcher.go new file mode 100644 index 000000000..facc238d5 --- /dev/null +++ b/sighup_watcher.go @@ -0,0 +1,63 @@ +package autofile + +import ( + "os" + "os/signal" + "sync" + "sync/atomic" + "syscall" +) + +func init() { + initSighupWatcher() +} + +var sighupWatchers *SighupWatcher +var sighupCounter int32 // For testing + +func initSighupWatcher() { + sighupWatchers = newSighupWatcher() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + go func() { + for _ = range c { + sighupWatchers.closeAll() + atomic.AddInt32(&sighupCounter, 1) + } + }() +} + +// Watchces for SIGHUP events and notifies registered AutoFiles +type SighupWatcher struct { + mtx sync.Mutex + autoFiles map[string]*AutoFile +} + +func newSighupWatcher() *SighupWatcher { + return &SighupWatcher{ + autoFiles: make(map[string]*AutoFile, 10), + } +} + +func (w *SighupWatcher) addAutoFile(af *AutoFile) { + w.mtx.Lock() + w.autoFiles[af.ID] = af + w.mtx.Unlock() +} + +// If AutoFile isn't registered or was already removed, does nothing. +func (w *SighupWatcher) removeAutoFile(af *AutoFile) { + w.mtx.Lock() + delete(w.autoFiles, af.ID) + w.mtx.Unlock() +} + +func (w *SighupWatcher) closeAll() { + w.mtx.Lock() + for _, af := range w.autoFiles { + af.closeFile() + } + w.mtx.Unlock() +} From c26b857900009ac81c78c1bc03f85e0c8e47818a Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 26 Oct 2016 21:50:07 -0700 Subject: [PATCH 059/515] Fix Search and add test --- group.go | 181 +++++++++++++++++++++++++++++++++---------------- group_test.go | 183 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 284 insertions(+), 80 deletions(-) diff --git a/group.go b/group.go index c0d199e1d..84aa8a228 100644 --- a/group.go +++ b/group.go @@ -24,6 +24,7 @@ with respect to the Group's Head (the AutoFile being appended to) */ const groupCheckDuration = 1000 * time.Millisecond +const defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB type Group struct { ID string @@ -33,17 +34,25 @@ type Group struct { mtx sync.Mutex headSizeLimit int64 totalSizeLimit int64 + minIndex int // Includes head + maxIndex int // Includes head, where Head will move to } func OpenGroup(head *AutoFile) (g *Group, err error) { dir := path.Dir(head.Path) g = &Group{ - ID: "group:" + head.ID, - Head: head, - Dir: dir, - ticker: time.NewTicker(groupCheckDuration), + ID: "group:" + head.ID, + Head: head, + Dir: dir, + ticker: time.NewTicker(groupCheckDuration), + headSizeLimit: defaultHeadSizeLimit, + minIndex: 0, + maxIndex: 0, } + gInfo := g.readGroupInfo() + g.minIndex = gInfo.MinIndex + g.maxIndex = gInfo.MaxIndex go g.processTicks() return } @@ -72,6 +81,12 @@ func (g *Group) TotalSizeLimit() int64 { return g.totalSizeLimit } +func (g *Group) MaxIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.maxIndex +} + func (g *Group) Close() error { g.ticker.Stop() return nil @@ -83,8 +98,8 @@ func (g *Group) processTicks() { if !ok { return // Done. } - // TODO Check head size limit - // TODO check total size limit + g.checkHeadSizeLimit() + g.checkTotalSizeLimit() } } @@ -112,8 +127,7 @@ func (g *Group) RotateFile() { g.mtx.Lock() defer g.mtx.Unlock() - gInfo := g.readGroupInfo() - dstPath := filePathForIndex(g.Head.Path, gInfo.MaxIndex+1) + dstPath := filePathForIndex(g.Head.Path, g.maxIndex) err := os.Rename(g.Head.Path, dstPath) if err != nil { panic(err) @@ -122,6 +136,7 @@ func (g *Group) RotateFile() { if err != nil { panic(err) } + g.maxIndex += 1 } func (g *Group) NewReader(index int) *GroupReader { @@ -135,23 +150,29 @@ type SearchFunc func(line string) (int, error) // Searches for the right file in Group, // then returns a GroupReader to start streaming lines +// Returns true if an exact match was found, otherwise returns +// the next greater line that starts with prefix. // CONTRACT: caller is responsible for closing GroupReader. -func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, error) { - gInfo := g.ReadGroupInfo() - minIndex, maxIndex := gInfo.MinIndex, gInfo.MaxIndex - curIndex := (minIndex + maxIndex + 1) / 2 +func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + // Now minIndex/maxIndex may change meanwhile, + // but it shouldn't be a big deal + // (maybe we'll want to limit scanUntil though) for { + curIndex := (minIndex + maxIndex + 1) / 2 // Base case, when there's only 1 choice left. if minIndex == maxIndex { r := g.NewReader(maxIndex) - err := scanUntil(r, prefix, cmp) + match, err := scanUntil(r, prefix, cmp) if err != nil { r.Close() - return nil, err + return nil, false, err } else { - return r, err + return r, match, err } } @@ -161,13 +182,13 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, error) { foundIndex, line, err := scanFirst(r, prefix) r.Close() if err != nil { - return nil, err + return nil, false, err } // Compare this line to our search query. val, err := cmp(line) if err != nil { - return nil, err + return nil, false, err } if val < 0 { // Line will come later @@ -175,12 +196,15 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, error) { } else if val == 0 { // Stroke of luck, found the line r := g.NewReader(foundIndex) - err := scanUntil(r, prefix, cmp) + match, err := scanUntil(r, prefix, cmp) + if !match { + panic("Expected match to be true") + } if err != nil { r.Close() - return nil, err + return nil, false, err } else { - return r, err + return r, true, err } } else { // We passed it @@ -205,24 +229,28 @@ func scanFirst(r *GroupReader, prefix string) (int, string, error) { } } -func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) error { +// Returns true iff an exact match was found. +func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { for { line, err := r.ReadLine() if err != nil { - return err + return false, err } if !strings.HasPrefix(line, prefix) { continue } val, err := cmp(line) if err != nil { - return err + return false, err } if val < 0 { continue + } else if val == 0 { + r.PushLine(line) + return true, nil } else { r.PushLine(line) - return nil + return false, nil } } } @@ -241,6 +269,7 @@ func (g *Group) ReadGroupInfo() GroupInfo { return g.readGroupInfo() } +// Index includes the head. // CONTRACT: caller should have called g.mtx.Lock func (g *Group) readGroupInfo() GroupInfo { groupDir := filepath.Dir(g.Head.Path) @@ -285,6 +314,15 @@ func (g *Group) readGroupInfo() GroupInfo { } } + // Now account for the head. + if minIndex == -1 { + // If there were no numbered files, + // then the head is index 0. + minIndex, maxIndex = 0, 0 + } else { + // Otherwise, the head file is 1 greater + maxIndex += 1 + } return GroupInfo{minIndex, maxIndex, totalSize, headSize} } @@ -306,27 +344,43 @@ type GroupReader struct { func newGroupReader(g *Group) *GroupReader { return &GroupReader{ Group: g, - curIndex: -1, + curIndex: 0, curFile: nil, curReader: nil, curLine: nil, } } -func (g *GroupReader) ReadLine() (string, error) { - g.mtx.Lock() - defer g.mtx.Unlock() +func (gr *GroupReader) Close() error { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + if gr.curReader != nil { + err := gr.curFile.Close() + gr.curIndex = 0 + gr.curReader = nil + gr.curFile = nil + gr.curLine = nil + return err + } else { + return nil + } +} + +func (gr *GroupReader) ReadLine() (string, error) { + gr.mtx.Lock() + defer gr.mtx.Unlock() // From PushLine - if g.curLine != nil { - line := string(g.curLine) - g.curLine = nil + if gr.curLine != nil { + line := string(gr.curLine) + gr.curLine = nil return line, nil } // Open file if not open yet - if g.curReader == nil { - err := g.openFile(0) + if gr.curReader == nil { + err := gr.openFile(gr.curIndex) if err != nil { return "", err } @@ -334,63 +388,74 @@ func (g *GroupReader) ReadLine() (string, error) { // Iterate over files until line is found for { - bytes, err := g.curReader.ReadBytes('\n') + bytes, err := gr.curReader.ReadBytes('\n') if err != nil { if err != io.EOF { return string(bytes), err } else { // Open the next file - err := g.openFile(g.curIndex + 1) + err := gr.openFile(gr.curIndex + 1) if err != nil { return "", err } + continue } } + return string(bytes), nil } } -// CONTRACT: caller should hold g.mtx -func (g *GroupReader) openFile(index int) error { +// IF index > gr.Group.maxIndex, returns io.EOF +// CONTRACT: caller should hold gr.mtx +func (gr *GroupReader) openFile(index int) error { // Lock on Group to ensure that head doesn't move in the meanwhile. - g.Group.mtx.Lock() - defer g.Group.mtx.Unlock() + gr.Group.mtx.Lock() + defer gr.Group.mtx.Unlock() + + var curFilePath string + if index == gr.Group.maxIndex { + curFilePath = gr.Head.Path + } else if index > gr.Group.maxIndex { + return io.EOF + } else { + curFilePath = filePathForIndex(gr.Head.Path, index) + } - curFilePath := filePathForIndex(g.Head.Path, index) curFile, err := os.Open(curFilePath) if err != nil { return err } curReader := bufio.NewReader(curFile) - // Update g.cur* - g.curIndex = index - g.curFile = curFile - g.curReader = curReader - g.curLine = nil + // Update gr.cur* + gr.curIndex = index + gr.curFile = curFile + gr.curReader = curReader + gr.curLine = nil return nil } -func (g *GroupReader) PushLine(line string) { - g.mtx.Lock() - defer g.mtx.Unlock() +func (gr *GroupReader) PushLine(line string) { + gr.mtx.Lock() + defer gr.mtx.Unlock() - if g.curLine == nil { - g.curLine = []byte(line) + if gr.curLine == nil { + gr.curLine = []byte(line) } else { panic("PushLine failed, already have line") } } // Cursor's file index. -func (g *GroupReader) CurIndex() int { - g.mtx.Lock() - defer g.mtx.Unlock() - return g.curIndex +func (gr *GroupReader) CurIndex() int { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.curIndex } -func (g *GroupReader) SetIndex(index int) { - g.mtx.Lock() - defer g.mtx.Unlock() - g.openFile(index) +func (gr *GroupReader) SetIndex(index int) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + gr.openFile(index) } diff --git a/group_test.go b/group_test.go index 8c1b7b6a8..ced88c8bb 100644 --- a/group_test.go +++ b/group_test.go @@ -1,12 +1,25 @@ package autofile import ( + "errors" + "io" + "os" + "strconv" + "strings" "testing" . "github.com/tendermint/go-common" ) -func createTestGroup(t *testing.T, headPath string) *Group { +// NOTE: Returned group has ticker stopped +func createTestGroup(t *testing.T, headSizeLimit int64) *Group { + testID := RandStr(12) + testDir := "_test_" + testID + err := EnsureDir(testDir, 0700) + if err != nil { + t.Fatal("Error creating dir", err) + } + headPath := testDir + "/myfile" autofile, err := OpenAutoFile(headPath) if err != nil { t.Fatal("Error opening AutoFile", headPath, err) @@ -15,9 +28,18 @@ func createTestGroup(t *testing.T, headPath string) *Group { if err != nil { t.Fatal("Error opening Group", err) } + g.SetHeadSizeLimit(headSizeLimit) + g.stopTicker() return g } +func destroyTestGroup(t *testing.T, g *Group) { + err := os.RemoveAll(g.Dir) + if err != nil { + t.Fatal("Error removing test Group directory", err) + } +} + func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { if gInfo.MinIndex != minIndex { t.Errorf("GroupInfo MinIndex expected %v, got %v", minIndex, gInfo.MinIndex) @@ -33,23 +55,14 @@ func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, tota } } -func TestCreateGroup(t *testing.T) { - testID := RandStr(12) - testDir := "_test_" + testID - err := EnsureDir(testDir, 0700) - if err != nil { - t.Fatal("Error creating dir", err) - } - - g := createTestGroup(t, testDir+"/myfile") +func TestCheckHeadSizeLimit(t *testing.T) { + g := createTestGroup(t, 1000*1000) if g == nil { t.Error("Failed to create Group") } - g.SetHeadSizeLimit(1000 * 1000) - g.stopTicker() // At first, there are no files. - assertGroupInfo(t, g.ReadGroupInfo(), -1, -1, 0, 0) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { @@ -58,21 +71,21 @@ func TestCreateGroup(t *testing.T) { t.Fatal("Error appending to head", err) } } - assertGroupInfo(t, g.ReadGroupInfo(), -1, -1, 999000, 999000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Even calling checkHeadSizeLimit manually won't rotate it. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), -1, -1, 999000, 999000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Write 1000 more bytes. - _, err = g.Head.Write([]byte(RandStr(999) + "\n")) + _, err := g.Head.Write([]byte(RandStr(999) + "\n")) if err != nil { t.Fatal("Error appending to head", err) } // Calling checkHeadSizeLimit this time rolls it. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 1000000, 0) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) // Write 1000 more bytes. _, err = g.Head.Write([]byte(RandStr(999) + "\n")) @@ -82,7 +95,7 @@ func TestCreateGroup(t *testing.T) { // Calling checkHeadSizeLimit does nothing. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 1001000, 1000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000) // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { @@ -91,20 +104,146 @@ func TestCreateGroup(t *testing.T) { t.Fatal("Error appending to head", err) } } - assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 2000000, 1000000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) // Calling checkHeadSizeLimit rolls it again. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 0) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) // Write 1000 more bytes. _, err = g.Head.Write([]byte(RandStr(999) + "\n")) if err != nil { t.Fatal("Error appending to head", err) } - assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2001000, 1000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) // Calling checkHeadSizeLimit does nothing. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2001000, 1000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestSearch(t *testing.T) { + g := createTestGroup(t, 10*1000) + if g == nil { + t.Error("Failed to create Group") + } + + // Create some files in the group that have several INFO lines in them. + // Try to put the INFO lines in various spots. + for i := 0; i < 100; i++ { + // The random junk at the end ensures that this INFO linen + // is equally likely to show up at the end. + _, err := g.Head.Write([]byte(Fmt("INFO %v %v\n", i, RandStr(123)))) + if err != nil { + t.Error("Failed to write to head") + } + g.checkHeadSizeLimit() + for j := 0; j < 10; j++ { + _, err := g.Head.Write([]byte(RandStr(123) + "\n")) + if err != nil { + t.Error("Failed to write to head") + } + g.checkHeadSizeLimit() + } + } + + // Create a search func that searches for line + makeSearchFunc := func(target int) SearchFunc { + return func(line string) (int, error) { + parts := strings.Split(line, " ") + if len(parts) != 3 { + return -1, errors.New("Line did not have 3 parts") + } + i, err := strconv.Atoi(parts[1]) + if err != nil { + return -1, errors.New("Failed to parse INFO: " + err.Error()) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } + } + + // Now search for each number + for i := 0; i < 100; i++ { + t.Log("Testing for i", i) + gr, match, err := g.Search("INFO", makeSearchFunc(i)) + if err != nil { + t.Fatal("Failed to search for line:", err) + } + if !match { + t.Error("Expected Search to return exact match") + } + line, err := gr.ReadLine() + if err != nil { + t.Fatal("Failed to read line after search", err) + } + if !strings.HasPrefix(line, Fmt("INFO %v ", i)) { + t.Fatal("Failed to get correct line") + } + // Make sure we can continue to read from there. + cur := i + 1 + for { + line, err := gr.ReadLine() + if err == io.EOF { + if cur == 99+1 { + // OK! + break + } else { + t.Fatal("Got EOF after the wrong INFO #") + } + } else if err != nil { + t.Fatal("Error reading line", err) + } + if !strings.HasPrefix(line, "INFO ") { + continue + } + if !strings.HasPrefix(line, Fmt("INFO %v ", cur)) { + t.Fatalf("Unexpected INFO #. Expected %v got:\n%v", cur, line) + } + cur += 1 + } + gr.Close() + } + + // Now search for something that is too small. + // We should get the first available line. + { + gr, match, err := g.Search("INFO", makeSearchFunc(-999)) + if err != nil { + t.Fatal("Failed to search for line:", err) + } + if match { + t.Error("Expected Search to not return exact match") + } + line, err := gr.ReadLine() + if err != nil { + t.Fatal("Failed to read line after search", err) + } + if !strings.HasPrefix(line, "INFO 0 ") { + t.Error("Failed to fetch correct line, which is the earliest INFO") + } + } + + // Now search for something that is too large. + // We should get an EOF error. + { + gr, _, err := g.Search("INFO", makeSearchFunc(999)) + if err != io.EOF { + t.Error("Expected to get an EOF error") + } + if gr != nil { + t.Error("Expected to get nil GroupReader") + } + } + + // Cleanup + destroyTestGroup(t, g) } From 44f2818a3d6c0174da7c16c97387cb97fe6c63af Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 26 Oct 2016 21:50:28 -0700 Subject: [PATCH 060/515] Remove AutoFile; Use go-autofile instead --- os.go | 154 ---------------------------------------------------------- 1 file changed, 154 deletions(-) diff --git a/os.go b/os.go index a273bec48..9f38027fa 100644 --- a/os.go +++ b/os.go @@ -7,19 +7,12 @@ import ( "os" "os/signal" "strings" - "sync" - "syscall" - "time" ) var ( GoPath = os.Getenv("GOPATH") ) -func init() { - initAFSIGHUPWatcher() -} - func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) @@ -112,153 +105,6 @@ func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { //-------------------------------------------------------------------------------- -/* AutoFile usage - -// Create/Append to ./autofile_test -af, err := OpenAutoFile("autofile_test") -if err != nil { - panic(err) -} - -// Stream of writes. -// During this time, the file may be moved e.g. by logRotate. -for i := 0; i < 60; i++ { - af.Write([]byte(Fmt("LOOP(%v)", i))) - time.Sleep(time.Second) -} - -// Close the AutoFile -err = af.Close() -if err != nil { - panic(err) -} -*/ - -const autoFileOpenDuration = 1000 * time.Millisecond - -// Automatically closes and re-opens file for writing. -// This is useful for using a log file with the logrotate tool. -type AutoFile struct { - ID string - Path string - ticker *time.Ticker - mtx sync.Mutex - file *os.File -} - -func OpenAutoFile(path string) (af *AutoFile, err error) { - af = &AutoFile{ - ID: RandStr(12) + ":" + path, - Path: path, - ticker: time.NewTicker(autoFileOpenDuration), - } - if err = af.openFile(); err != nil { - return - } - go af.processTicks() - autoFileWatchers.addAutoFile(af) - return -} - -func (af *AutoFile) Close() error { - af.ticker.Stop() - err := af.closeFile() - autoFileWatchers.removeAutoFile(af) - return err -} - -func (af *AutoFile) processTicks() { - for { - _, ok := <-af.ticker.C - if !ok { - return // Done. - } - af.closeFile() - } -} - -func (af *AutoFile) closeFile() (err error) { - af.mtx.Lock() - defer af.mtx.Unlock() - - file := af.file - if file == nil { - return nil - } - af.file = nil - return file.Close() -} - -func (af *AutoFile) Write(b []byte) (n int, err error) { - af.mtx.Lock() - defer af.mtx.Unlock() - if af.file == nil { - if err = af.openFile(); err != nil { - return - } - } - return af.file.Write(b) -} - -func (af *AutoFile) openFile() error { - file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - if err != nil { - return err - } - af.file = file - return nil -} - -//-------------------------------------------------------------------------------- - -var autoFileWatchers *afSIGHUPWatcher - -func initAFSIGHUPWatcher() { - autoFileWatchers = newAFSIGHUPWatcher() - - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP) - - go func() { - for _ = range c { - autoFileWatchers.closeAll() - } - }() -} - -type afSIGHUPWatcher struct { - mtx sync.Mutex - autoFiles map[string]*AutoFile -} - -func newAFSIGHUPWatcher() *afSIGHUPWatcher { - return &afSIGHUPWatcher{ - autoFiles: make(map[string]*AutoFile, 10), - } -} - -func (afw *afSIGHUPWatcher) addAutoFile(af *AutoFile) { - afw.mtx.Lock() - afw.autoFiles[af.ID] = af - afw.mtx.Unlock() -} - -func (afw *afSIGHUPWatcher) removeAutoFile(af *AutoFile) { - afw.mtx.Lock() - delete(afw.autoFiles, af.ID) - afw.mtx.Unlock() -} - -func (afw *afSIGHUPWatcher) closeAll() { - afw.mtx.Lock() - for _, af := range afw.autoFiles { - af.closeFile() - } - afw.mtx.Unlock() -} - -//-------------------------------------------------------------------------------- - func Tempfile(prefix string) (*os.File, string) { file, err := ioutil.TempFile("", prefix) if err != nil { From d741b81ab5634483d3abc45615cd10b1befc99bc Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 26 Oct 2016 22:11:43 -0700 Subject: [PATCH 061/515] Add better docs for Group --- group.go | 24 ++++++++++++++++++++++-- group_test.go | 4 ++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/group.go b/group.go index 84aa8a228..04403bb74 100644 --- a/group.go +++ b/group.go @@ -19,8 +19,28 @@ You can open a Group to keep restrictions on an AutoFile, like the maximum size of each chunk, and/or the total amount of bytes stored in the group. -The Group can also be used to binary-search, and to read atomically -with respect to the Group's Head (the AutoFile being appended to) +The first file to be written in the Group.Dir is the head file. + + Dir/ + - + +Once the Head file reaches the size limit, it will be rotated. + + Dir/ + - .000 // First rolled file + - // New head path, starts empty. + // The implicit index is 001. + +As more files are written, the index numbers grow... + + Dir/ + - .000 // First rolled file + - .001 // Second rolled file + - ... + - // New head path + +The Group can also be used to binary-search for some line, +assuming that marker lines are written occasionally. */ const groupCheckDuration = 1000 * time.Millisecond diff --git a/group_test.go b/group_test.go index ced88c8bb..f7c70b709 100644 --- a/group_test.go +++ b/group_test.go @@ -230,6 +230,10 @@ func TestSearch(t *testing.T) { if !strings.HasPrefix(line, "INFO 0 ") { t.Error("Failed to fetch correct line, which is the earliest INFO") } + err = gr.Close() + if err != nil { + t.Error("Failed to close GroupReader", err) + } } // Now search for something that is too large. From 03110423360a1ad2440f510102577d3500404006 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 28 Oct 2016 09:10:33 -0700 Subject: [PATCH 062/515] Add CHALLENGE --- group.go | 1 + 1 file changed, 1 insertion(+) diff --git a/group.go b/group.go index 04403bb74..f382ea0b5 100644 --- a/group.go +++ b/group.go @@ -141,6 +141,7 @@ func (g *Group) checkHeadSizeLimit() { func (g *Group) checkTotalSizeLimit() { // TODO enforce total size limit + // CHALLENGE } func (g *Group) RotateFile() { From 2781df39e576c4258b7d10d1404eaf3c029d92f8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 28 Oct 2016 12:09:22 -0700 Subject: [PATCH 063/515] QuitService->BaseService --- service.go | 29 ++++++++++++++--------------- service_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 15 deletions(-) create mode 100644 service_test.go diff --git a/service.go b/service.go index 86ef20ead..7336446ff 100644 --- a/service.go +++ b/service.go @@ -65,6 +65,7 @@ type BaseService struct { name string started uint32 // atomic stopped uint32 // atomic + Quit chan struct{} // The "subclass" of BaseService impl Service @@ -74,6 +75,7 @@ func NewBaseService(log log15.Logger, name string, impl Service) *BaseService { return &BaseService{ log: log, name: name, + Quit: make(chan struct{}), impl: impl, } } @@ -102,6 +104,8 @@ func (bs *BaseService) Start() (bool, error) { } // Implements Service +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStart() func (bs *BaseService) OnStart() error { return nil } // Implements Service @@ -111,6 +115,7 @@ func (bs *BaseService) Stop() bool { bs.log.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) } bs.impl.OnStop() + close(bs.Quit) return true } else { if bs.log != nil { @@ -121,6 +126,8 @@ func (bs *BaseService) Stop() bool { } // Implements Service +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStop() func (bs *BaseService) OnStop() {} // Implements Service @@ -151,6 +158,10 @@ func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 } +func (bs *QuitService) Wait() { + <-bs.Quit +} + // Implements Servce func (bs *BaseService) String() string { return bs.name @@ -160,25 +171,13 @@ func (bs *BaseService) String() string { type QuitService struct { BaseService - Quit chan struct{} } func NewQuitService(log log15.Logger, name string, impl Service) *QuitService { + if log != nil { + log.Warn("QuitService is deprecated, use BaseService instead") + } return &QuitService{ BaseService: *NewBaseService(log, name, impl), - Quit: nil, - } -} - -// NOTE: when overriding OnStart, must call .QuitService.OnStart(). -func (qs *QuitService) OnStart() error { - qs.Quit = make(chan struct{}) - return nil -} - -// NOTE: when overriding OnStop, must call .QuitService.OnStop(). -func (qs *QuitService) OnStop() { - if qs.Quit != nil { - close(qs.Quit) } } diff --git a/service_test.go b/service_test.go new file mode 100644 index 000000000..6e24dad6a --- /dev/null +++ b/service_test.go @@ -0,0 +1,24 @@ +package common + +import ( + "testing" +) + +func TestBaseServiceWait(t *testing.T) { + + type TestService struct { + BaseService + } + ts := &TestService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + go func() { + ts.Stop() + }() + + for i := 0; i < 10; i++ { + ts.Wait() + } + +} From fa3daa7abc253264c916c12fecce3effa01a1287 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 28 Oct 2016 12:09:34 -0700 Subject: [PATCH 064/515] Remove AutoFile tests --- os_test.go | 64 ------------------------------------------------------ service.go | 2 +- 2 files changed, 1 insertion(+), 65 deletions(-) delete mode 100644 os_test.go diff --git a/os_test.go b/os_test.go deleted file mode 100644 index c0effdc2b..000000000 --- a/os_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package common - -import ( - "os" - "syscall" - "testing" -) - -func TestSIGHUP(t *testing.T) { - - // First, create an AutoFile writing to a tempfile dir - file, name := Tempfile("sighup_test") - err := file.Close() - if err != nil { - t.Fatalf("Error creating tempfile: %v", err) - } - // Here is the actual AutoFile - af, err := OpenAutoFile(name) - if err != nil { - t.Fatalf("Error creating autofile: %v", err) - } - - // Write to the file. - _, err = af.Write([]byte("Line 1\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - _, err = af.Write([]byte("Line 2\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - - // Send SIGHUP to self. - syscall.Kill(syscall.Getpid(), syscall.SIGHUP) - - // Move the file over - err = os.Rename(name, name+"_old") - if err != nil { - t.Fatalf("Error moving autofile: %v", err) - } - - // Write more to the file. - _, err = af.Write([]byte("Line 3\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - _, err = af.Write([]byte("Line 4\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - err = af.Close() - if err != nil { - t.Fatalf("Error closing autofile") - } - - // Both files should exist - if body := MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { - t.Errorf("Unexpected body %s", body) - } - if body := MustReadFile(name); string(body) != "Line 3\nLine 4\n" { - t.Errorf("Unexpected body %s", body) - } - -} diff --git a/service.go b/service.go index 7336446ff..e2d31925b 100644 --- a/service.go +++ b/service.go @@ -158,7 +158,7 @@ func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 } -func (bs *QuitService) Wait() { +func (bs *BaseService) Wait() { <-bs.Quit } From 5e9c5dc413eb3d4455567e1c84c5324510fd9c6e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 28 Oct 2016 13:56:31 -0700 Subject: [PATCH 065/515] Add Group.WriteLine --- group.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/group.go b/group.go index f382ea0b5..48c770ca6 100644 --- a/group.go +++ b/group.go @@ -107,6 +107,14 @@ func (g *Group) MaxIndex() int { return g.maxIndex } +// Auto appends "\n" +// TODO: Make it halt if space is unavailable +func (g *Group) WriteLine(line string) error { + _, err := g.Head.Write([]byte(line + "\n")) + return err +} + +// NOTE: g.Head must be closed separately func (g *Group) Close() error { g.ticker.Stop() return nil From 916f3d789b6afaf7bfe161aeec391c8a35e354a8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 28 Oct 2016 14:50:46 -0700 Subject: [PATCH 066/515] Size() returns 0 if file doesn't exist --- autofile.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/autofile.go b/autofile.go index ed9d549bf..36de9984a 100644 --- a/autofile.go +++ b/autofile.go @@ -107,6 +107,16 @@ func (af *AutoFile) openFile() error { func (af *AutoFile) Size() (int64, error) { af.mtx.Lock() defer af.mtx.Unlock() + if af.file == nil { + err := af.openFile() + if err != nil { + if err == os.ErrNotExist { + return 0, nil + } else { + return -1, err + } + } + } stat, err := af.file.Stat() if err != nil { return -1, err From 1261fca1608264cd14635585b6948ab359c88e37 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 30 Oct 2016 02:40:39 -0700 Subject: [PATCH 067/515] FindLast --- group.go | 96 ++++++++++++++++++++++++++---- group_test.go | 161 ++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 235 insertions(+), 22 deletions(-) diff --git a/group.go b/group.go index 48c770ca6..331b7e9e0 100644 --- a/group.go +++ b/group.go @@ -168,10 +168,16 @@ func (g *Group) RotateFile() { g.maxIndex += 1 } -func (g *Group) NewReader(index int) *GroupReader { +// NOTE: if error, returns no GroupReader. +// CONTRACT: Caller must close the returned GroupReader +func (g *Group) NewReader(index int) (*GroupReader, error) { r := newGroupReader(g) - r.SetIndex(index) - return r + err := r.SetIndex(index) + if err != nil { + return nil, err + } else { + return r, nil + } } // Returns -1 if line comes after, 0 if found, 1 if line comes before. @@ -181,7 +187,7 @@ type SearchFunc func(line string) (int, error) // then returns a GroupReader to start streaming lines // Returns true if an exact match was found, otherwise returns // the next greater line that starts with prefix. -// CONTRACT: caller is responsible for closing GroupReader. +// CONTRACT: Caller must close the returned GroupReader func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { g.mtx.Lock() minIndex, maxIndex := g.minIndex, g.maxIndex @@ -195,7 +201,10 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error // Base case, when there's only 1 choice left. if minIndex == maxIndex { - r := g.NewReader(maxIndex) + r, err := g.NewReader(maxIndex) + if err != nil { + return nil, false, err + } match, err := scanUntil(r, prefix, cmp) if err != nil { r.Close() @@ -207,8 +216,11 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error // Read starting roughly at the middle file, // until we find line that has prefix. - r := g.NewReader(curIndex) - foundIndex, line, err := scanFirst(r, prefix) + r, err := g.NewReader(curIndex) + if err != nil { + return nil, false, err + } + foundIndex, line, err := scanNext(r, prefix) r.Close() if err != nil { return nil, false, err @@ -224,7 +236,10 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error minIndex = foundIndex } else if val == 0 { // Stroke of luck, found the line - r := g.NewReader(foundIndex) + r, err := g.NewReader(foundIndex) + if err != nil { + return nil, false, err + } match, err := scanUntil(r, prefix, cmp) if !match { panic("Expected match to be true") @@ -244,7 +259,8 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error } // Scans and returns the first line that starts with 'prefix' -func scanFirst(r *GroupReader, prefix string) (int, string, error) { +// Consumes line and returns it. +func scanNext(r *GroupReader, prefix string) (int, string, error) { for { line, err := r.ReadLine() if err != nil { @@ -259,6 +275,7 @@ func scanFirst(r *GroupReader, prefix string) (int, string, error) { } // Returns true iff an exact match was found. +// Pushes line, does not consume it. func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { for { line, err := r.ReadLine() @@ -284,6 +301,47 @@ func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { } } +// Searches for the last line in Group with prefix. +func (g *Group) FindLast(prefix string) (match string, found bool, err error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + + r, err := g.NewReader(maxIndex) + if err != nil { + return "", false, err + } + defer r.Close() + + // Open files from the back and read +GROUP_LOOP: + for i := maxIndex; i >= minIndex; i-- { + err := r.SetIndex(i) + if err != nil { + return "", false, err + } + // Scan each line and test whether line matches + for { + line, err := r.ReadLineInCurrent() + if err == io.EOF { + if found { + return match, found, nil + } else { + continue GROUP_LOOP + } + } else if err != nil { + return "", false, err + } + if strings.HasPrefix(line, prefix) { + match = line + found = true + } + } + } + + return +} + type GroupInfo struct { MinIndex int MaxIndex int @@ -399,6 +457,18 @@ func (gr *GroupReader) Close() error { func (gr *GroupReader) ReadLine() (string, error) { gr.mtx.Lock() defer gr.mtx.Unlock() + return gr.readLineWithOptions(false) +} + +func (gr *GroupReader) ReadLineInCurrent() (string, error) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.readLineWithOptions(true) +} + +// curFileOnly: if True, do not open new files, +// just return io.EOF if no new lines found. +func (gr *GroupReader) readLineWithOptions(curFileOnly bool) (string, error) { // From PushLine if gr.curLine != nil { @@ -420,7 +490,9 @@ func (gr *GroupReader) ReadLine() (string, error) { bytes, err := gr.curReader.ReadBytes('\n') if err != nil { if err != io.EOF { - return string(bytes), err + return "", err + } else if curFileOnly { + return "", err } else { // Open the next file err := gr.openFile(gr.curIndex + 1) @@ -483,8 +555,8 @@ func (gr *GroupReader) CurIndex() int { return gr.curIndex } -func (gr *GroupReader) SetIndex(index int) { +func (gr *GroupReader) SetIndex(index int) error { gr.mtx.Lock() defer gr.mtx.Unlock() - gr.openFile(index) + return gr.openFile(index) } diff --git a/group_test.go b/group_test.go index f7c70b709..672bd4d90 100644 --- a/group_test.go +++ b/group_test.go @@ -3,6 +3,7 @@ package autofile import ( "errors" "io" + "io/ioutil" "os" "strconv" "strings" @@ -30,6 +31,10 @@ func createTestGroup(t *testing.T, headSizeLimit int64) *Group { } g.SetHeadSizeLimit(headSizeLimit) g.stopTicker() + + if g == nil { + t.Fatal("Failed to create Group") + } return g } @@ -57,16 +62,13 @@ func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, tota func TestCheckHeadSizeLimit(t *testing.T) { g := createTestGroup(t, 1000*1000) - if g == nil { - t.Error("Failed to create Group") - } // At first, there are no files. assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { - _, err := g.Head.Write([]byte(RandStr(999) + "\n")) + err := g.WriteLine(RandStr(999)) if err != nil { t.Fatal("Error appending to head", err) } @@ -78,7 +80,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Write 1000 more bytes. - _, err := g.Head.Write([]byte(RandStr(999) + "\n")) + err := g.WriteLine(RandStr(999)) if err != nil { t.Fatal("Error appending to head", err) } @@ -88,7 +90,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) // Write 1000 more bytes. - _, err = g.Head.Write([]byte(RandStr(999) + "\n")) + err = g.WriteLine(RandStr(999)) if err != nil { t.Fatal("Error appending to head", err) } @@ -99,7 +101,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { - _, err := g.Head.Write([]byte(RandStr(999) + "\n")) + err := g.WriteLine(RandStr(999)) if err != nil { t.Fatal("Error appending to head", err) } @@ -127,9 +129,6 @@ func TestCheckHeadSizeLimit(t *testing.T) { func TestSearch(t *testing.T) { g := createTestGroup(t, 10*1000) - if g == nil { - t.Error("Failed to create Group") - } // Create some files in the group that have several INFO lines in them. // Try to put the INFO lines in various spots. @@ -251,3 +250,145 @@ func TestSearch(t *testing.T) { // Cleanup destroyTestGroup(t, g) } + +func TestRotateFile(t *testing.T) { + g := createTestGroup(t, 0) + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + + // Read g.Head.Path+"000" + body1, err := ioutil.ReadFile(g.Head.Path + ".000") + if err != nil { + t.Error("Failed to read first rolled file") + } + if string(body1) != "Line 1\nLine 2\nLine 3\n" { + t.Errorf("Got unexpected contents: [%v]", string(body1)) + } + + // Read g.Head.Path + body2, err := ioutil.ReadFile(g.Head.Path) + if err != nil { + t.Error("Failed to read first rolled file") + } + if string(body2) != "Line 4\nLine 5\nLine 6\n" { + t.Errorf("Got unexpected contents: [%v]", string(body2)) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast1(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("# a") + g.WriteLine("Line 3") + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.WriteLine("# b") + + match, found, err := g.FindLast("#") + if err != nil { + t.Error("Unexpected error", err) + } + if !found { + t.Error("Expected found=True") + } + if match != "# b\n" { + t.Errorf("Unexpected match: [%v]", match) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast2(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.RotateFile() + g.WriteLine("# a") + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("# b") + g.WriteLine("Line 6") + + match, found, err := g.FindLast("#") + if err != nil { + t.Error("Unexpected error", err) + } + if !found { + t.Error("Expected found=True") + } + if match != "# b\n" { + t.Errorf("Unexpected match: [%v]", match) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast3(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("# a") + g.WriteLine("Line 2") + g.WriteLine("# b") + g.WriteLine("Line 3") + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + + match, found, err := g.FindLast("#") + if err != nil { + t.Error("Unexpected error", err) + } + if !found { + t.Error("Expected found=True") + } + if match != "# b\n" { + t.Errorf("Unexpected match: [%v]", match) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast4(t *testing.T) { + g := createTestGroup(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + + match, found, err := g.FindLast("#") + if err != nil { + t.Error("Unexpected error", err) + } + if found { + t.Error("Expected found=False") + } + if match != "" { + t.Errorf("Unexpected match: [%v]", match) + } + + // Cleanup + destroyTestGroup(t, g) +} From a20c98e61957faa93b4014fbd902f20ab9317a6a Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 4 Nov 2016 06:14:49 -0700 Subject: [PATCH 068/515] Add Monitor.SetREMA() --- flowrate/flowrate.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flowrate/flowrate.go b/flowrate/flowrate.go index 1b727721e..e233eae0f 100644 --- a/flowrate/flowrate.go +++ b/flowrate/flowrate.go @@ -72,6 +72,14 @@ func (m *Monitor) Update(n int) int { return n } +// Hack to set the current rEMA. +func (m *Monitor) SetREMA(rEMA float64) { + m.mu.Lock() + m.rEMA = rEMA + m.samples++ + m.mu.Unlock() +} + // IO is a convenience method intended to wrap io.Reader and io.Writer method // execution. It calls m.Update(n) and then returns (n, err) unmodified. func (m *Monitor) IO(n int, err error) (int, error) { From dc8fa06e642c53339987acfd90154c81c1ab4c6d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 5 Nov 2016 17:58:50 -0700 Subject: [PATCH 069/515] Add MakeSimpleSearchFunc --- autofile.go | 4 ++++ group.go | 46 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 5 deletions(-) diff --git a/autofile.go b/autofile.go index 36de9984a..93ae8ec86 100644 --- a/autofile.go +++ b/autofile.go @@ -95,6 +95,10 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { return af.file.Write(b) } +func (af *AutoFile) Sync() error { + return af.file.Sync() +} + func (af *AutoFile) openFile() error { file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { diff --git a/group.go b/group.go index 331b7e9e0..0788b1635 100644 --- a/group.go +++ b/group.go @@ -2,6 +2,7 @@ package autofile import ( "bufio" + "errors" "fmt" "io" "os" @@ -12,6 +13,8 @@ import ( "strings" "sync" "time" + + . "github.com/tendermint/go-common" ) /* @@ -56,6 +59,9 @@ type Group struct { totalSizeLimit int64 minIndex int // Includes head maxIndex int // Includes head, where Head will move to + + // TODO: When we start deleting files, we need to start tracking GroupReaders + // and their dependencies. } func OpenGroup(head *AutoFile) (g *Group, err error) { @@ -183,10 +189,10 @@ func (g *Group) NewReader(index int) (*GroupReader, error) { // Returns -1 if line comes after, 0 if found, 1 if line comes before. type SearchFunc func(line string) (int, error) -// Searches for the right file in Group, -// then returns a GroupReader to start streaming lines -// Returns true if an exact match was found, otherwise returns -// the next greater line that starts with prefix. +// Searches for the right file in Group, then returns a GroupReader to start +// streaming lines. +// Returns true if an exact match was found, otherwise returns the next greater +// line that starts with prefix. // CONTRACT: Caller must close the returned GroupReader func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { g.mtx.Lock() @@ -301,7 +307,8 @@ func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { } } -// Searches for the last line in Group with prefix. +// Searches backwards for the last line in Group with prefix. +// Scans each file forward until the end to find the last match. func (g *Group) FindLast(prefix string) (match string, found bool, err error) { g.mtx.Lock() minIndex, maxIndex := g.minIndex, g.maxIndex @@ -560,3 +567,32 @@ func (gr *GroupReader) SetIndex(index int) error { defer gr.mtx.Unlock() return gr.openFile(index) } + +//-------------------------------------------------------------------------------- + +// A simple SearchFunc that assumes that the marker is of form +// . +// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form: +// +// #HEIGHT:1 +// ... +// #HEIGHT:2 +// ... +func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { + return func(line string) (int, error) { + if !strings.HasPrefix(line, prefix) { + return -1, errors.New(Fmt("Marker line did not have prefix: %v", prefix)) + } + i, err := strconv.Atoi(line[len(prefix):]) + if err != nil { + return -1, errors.New(Fmt("Failed to parse marker line: %v", err.Error())) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } +} From d1848762cf184eb76d50664ba6b568080c067137 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 20 Nov 2016 17:19:15 -0800 Subject: [PATCH 070/515] Fix issue where buffered writes may split a line to two files --- autofile.go | 8 ++++++-- group.go | 55 +++++++++++++++++++++++++++-------------------------- 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/autofile.go b/autofile.go index 93ae8ec86..60a314a42 100644 --- a/autofile.go +++ b/autofile.go @@ -1,10 +1,11 @@ package autofile import ( - . "github.com/tendermint/go-common" "os" "sync" "time" + + . "github.com/tendermint/go-common" ) /* AutoFile usage @@ -87,12 +88,15 @@ func (af *AutoFile) closeFile() (err error) { func (af *AutoFile) Write(b []byte) (n int, err error) { af.mtx.Lock() defer af.mtx.Unlock() + if af.file == nil { if err = af.openFile(); err != nil { return } } - return af.file.Write(b) + + n, err = af.file.Write(b) + return } func (af *AutoFile) Sync() error { diff --git a/group.go b/group.go index 0788b1635..1c9842496 100644 --- a/group.go +++ b/group.go @@ -3,7 +3,6 @@ package autofile import ( "bufio" "errors" - "fmt" "io" "os" "path" @@ -52,7 +51,8 @@ const defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB type Group struct { ID string Head *AutoFile // The head AutoFile to write to - Dir string // Directory that contains .Head + headBuf *bufio.Writer + Dir string // Directory that contains .Head ticker *time.Ticker mtx sync.Mutex headSizeLimit int64 @@ -70,6 +70,7 @@ func OpenGroup(head *AutoFile) (g *Group, err error) { g = &Group{ ID: "group:" + head.ID, Head: head, + headBuf: bufio.NewWriterSize(head, 4096*10), Dir: dir, ticker: time.NewTicker(groupCheckDuration), headSizeLimit: defaultHeadSizeLimit, @@ -114,9 +115,10 @@ func (g *Group) MaxIndex() int { } // Auto appends "\n" +// NOTE: Writes are buffered so they don't write synchronously // TODO: Make it halt if space is unavailable func (g *Group) WriteLine(line string) error { - _, err := g.Head.Write([]byte(line + "\n")) + _, err := g.headBuf.Write([]byte(line + "\n")) return err } @@ -329,7 +331,7 @@ GROUP_LOOP: } // Scan each line and test whether line matches for { - line, err := r.ReadLineInCurrent() + line, err := r.ReadLine() if err == io.EOF { if found { return match, found, nil @@ -343,6 +345,13 @@ GROUP_LOOP: match = line found = true } + if r.CurIndex() > i { + if found { + return match, found, nil + } else { + continue GROUP_LOOP + } + } } } @@ -461,21 +470,11 @@ func (gr *GroupReader) Close() error { } } +// Reads a line (without delimiter) +// just return io.EOF if no new lines found. func (gr *GroupReader) ReadLine() (string, error) { gr.mtx.Lock() defer gr.mtx.Unlock() - return gr.readLineWithOptions(false) -} - -func (gr *GroupReader) ReadLineInCurrent() (string, error) { - gr.mtx.Lock() - defer gr.mtx.Unlock() - return gr.readLineWithOptions(true) -} - -// curFileOnly: if True, do not open new files, -// just return io.EOF if no new lines found. -func (gr *GroupReader) readLineWithOptions(curFileOnly bool) (string, error) { // From PushLine if gr.curLine != nil { @@ -493,23 +492,25 @@ func (gr *GroupReader) readLineWithOptions(curFileOnly bool) (string, error) { } // Iterate over files until line is found + var linePrefix string for { - bytes, err := gr.curReader.ReadBytes('\n') - if err != nil { - if err != io.EOF { - return "", err - } else if curFileOnly { + bytesRead, err := gr.curReader.ReadBytes('\n') + if err == io.EOF { + // Open the next file + err := gr.openFile(gr.curIndex + 1) + if err != nil { return "", err + } + if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil } else { - // Open the next file - err := gr.openFile(gr.curIndex + 1) - if err != nil { - return "", err - } + linePrefix += string(bytesRead) continue } + } else if err != nil { + return "", err } - return string(bytes), nil + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil } } From a528af55d3c8354f676b4a5f718ab51d9b9fbb9f Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 21 Nov 2016 19:09:14 -0800 Subject: [PATCH 071/515] Group is a BaseService; TotalSizeLimit enforced; tests fixed --- group.go | 117 ++++++++++++++++++++++++++++++++++++++------------ group_test.go | 27 ++++++++---- 2 files changed, 108 insertions(+), 36 deletions(-) diff --git a/group.go b/group.go index 1c9842496..a2584dfc7 100644 --- a/group.go +++ b/group.go @@ -3,7 +3,9 @@ package autofile import ( "bufio" "errors" + "fmt" "io" + "log" "os" "path" "path/filepath" @@ -45,10 +47,14 @@ The Group can also be used to binary-search for some line, assuming that marker lines are written occasionally. */ -const groupCheckDuration = 1000 * time.Millisecond -const defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB +const groupCheckDuration = 5000 * time.Millisecond +const defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB +const defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB +const maxFilesToRemove = 4 // needs to be greater than 1 type Group struct { + BaseService + ID string Head *AutoFile // The head AutoFile to write to headBuf *bufio.Writer @@ -64,23 +70,43 @@ type Group struct { // and their dependencies. } -func OpenGroup(head *AutoFile) (g *Group, err error) { - dir := path.Dir(head.Path) +func OpenGroup(headPath string) (g *Group, err error) { + + dir := path.Dir(headPath) + head, err := OpenAutoFile(headPath) + if err != nil { + return nil, err + } g = &Group{ - ID: "group:" + head.ID, - Head: head, - headBuf: bufio.NewWriterSize(head, 4096*10), - Dir: dir, - ticker: time.NewTicker(groupCheckDuration), - headSizeLimit: defaultHeadSizeLimit, - minIndex: 0, - maxIndex: 0, + ID: "group:" + head.ID, + Head: head, + headBuf: bufio.NewWriterSize(head, 4096*10), + Dir: dir, + ticker: time.NewTicker(groupCheckDuration), + headSizeLimit: defaultHeadSizeLimit, + totalSizeLimit: defaultTotalSizeLimit, + minIndex: 0, + maxIndex: 0, } + g.BaseService = *NewBaseService(nil, "Group", g) + gInfo := g.readGroupInfo() g.minIndex = gInfo.MinIndex g.maxIndex = gInfo.MaxIndex + return +} + +func (g *Group) OnStart() error { + g.BaseService.OnStart() go g.processTicks() + return nil +} + +// NOTE: g.Head must be closed separately +func (g *Group) OnStop() { + g.BaseService.OnStop() + g.ticker.Stop() return } @@ -118,14 +144,16 @@ func (g *Group) MaxIndex() int { // NOTE: Writes are buffered so they don't write synchronously // TODO: Make it halt if space is unavailable func (g *Group) WriteLine(line string) error { + g.mtx.Lock() + defer g.mtx.Unlock() _, err := g.headBuf.Write([]byte(line + "\n")) return err } -// NOTE: g.Head must be closed separately -func (g *Group) Close() error { - g.ticker.Stop() - return nil +func (g *Group) Flush() error { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headBuf.Flush() } func (g *Group) processTicks() { @@ -146,25 +174,58 @@ func (g *Group) stopTicker() { // NOTE: this function is called manually in tests. func (g *Group) checkHeadSizeLimit() { + limit := g.HeadSizeLimit() + if limit == 0 { + return + } size, err := g.Head.Size() if err != nil { panic(err) } - if size >= g.HeadSizeLimit() { + if size >= limit { g.RotateFile() } } func (g *Group) checkTotalSizeLimit() { - // TODO enforce total size limit - // CHALLENGE + limit := g.TotalSizeLimit() + if limit == 0 { + return + } + + gInfo := g.readGroupInfo() + totalSize := gInfo.TotalSize + for i := 0; i < maxFilesToRemove; i++ { + fmt.Println(">>", gInfo, totalSize, i) + index := gInfo.MinIndex + i + if totalSize < limit { + return + } + if index == gInfo.MaxIndex { + // Special degenerate case, just do nothing. + log.Println("WARNING: Group's head " + g.Head.Path + "may grow without bound") + return + } + pathToRemove := filePathForIndex(g.Head.Path, gInfo.MinIndex, gInfo.MaxIndex) + fileInfo, err := os.Stat(pathToRemove) + if err != nil { + log.Println("WARNING: Failed to fetch info for file @" + pathToRemove) + continue + } + err = os.Remove(pathToRemove) + if err != nil { + log.Println(err) + return + } + totalSize -= fileInfo.Size() + } } func (g *Group) RotateFile() { g.mtx.Lock() defer g.mtx.Unlock() - dstPath := filePathForIndex(g.Head.Path, g.maxIndex) + dstPath := filePathForIndex(g.Head.Path, g.maxIndex, g.maxIndex+1) err := os.Rename(g.Head.Path, dstPath) if err != nil { panic(err) @@ -429,8 +490,12 @@ func (g *Group) readGroupInfo() GroupInfo { return GroupInfo{minIndex, maxIndex, totalSize, headSize} } -func filePathForIndex(headPath string, index int) string { - return fmt.Sprintf("%v.%03d", headPath, index) +func filePathForIndex(headPath string, index int, maxIndex int) string { + if index == maxIndex { + return headPath + } else { + return fmt.Sprintf("%v.%03d", headPath, index) + } } //-------------------------------------------------------------------------------- @@ -522,15 +587,11 @@ func (gr *GroupReader) openFile(index int) error { gr.Group.mtx.Lock() defer gr.Group.mtx.Unlock() - var curFilePath string - if index == gr.Group.maxIndex { - curFilePath = gr.Head.Path - } else if index > gr.Group.maxIndex { + if index > gr.Group.maxIndex { return io.EOF - } else { - curFilePath = filePathForIndex(gr.Head.Path, index) } + curFilePath := filePathForIndex(gr.Head.Path, index, gr.Group.maxIndex) curFile, err := os.Open(curFilePath) if err != nil { return err diff --git a/group_test.go b/group_test.go index 672bd4d90..1c2280e83 100644 --- a/group_test.go +++ b/group_test.go @@ -21,11 +21,7 @@ func createTestGroup(t *testing.T, headSizeLimit int64) *Group { t.Fatal("Error creating dir", err) } headPath := testDir + "/myfile" - autofile, err := OpenAutoFile(headPath) - if err != nil { - t.Fatal("Error opening AutoFile", headPath, err) - } - g, err := OpenGroup(autofile) + g, err := OpenGroup(headPath) if err != nil { t.Fatal("Error opening Group", err) } @@ -73,6 +69,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { t.Fatal("Error appending to head", err) } } + g.Flush() assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Even calling checkHeadSizeLimit manually won't rotate it. @@ -84,6 +81,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { if err != nil { t.Fatal("Error appending to head", err) } + g.Flush() // Calling checkHeadSizeLimit this time rolls it. g.checkHeadSizeLimit() @@ -94,6 +92,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { if err != nil { t.Fatal("Error appending to head", err) } + g.Flush() // Calling checkHeadSizeLimit does nothing. g.checkHeadSizeLimit() @@ -106,6 +105,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { t.Fatal("Error appending to head", err) } } + g.Flush() assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) // Calling checkHeadSizeLimit rolls it again. @@ -117,6 +117,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { if err != nil { t.Fatal("Error appending to head", err) } + g.Flush() assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) // Calling checkHeadSizeLimit does nothing. @@ -256,10 +257,12 @@ func TestRotateFile(t *testing.T) { g.WriteLine("Line 1") g.WriteLine("Line 2") g.WriteLine("Line 3") + g.Flush() g.RotateFile() g.WriteLine("Line 4") g.WriteLine("Line 5") g.WriteLine("Line 6") + g.Flush() // Read g.Head.Path+"000" body1, err := ioutil.ReadFile(g.Head.Path + ".000") @@ -290,11 +293,13 @@ func TestFindLast1(t *testing.T) { g.WriteLine("Line 2") g.WriteLine("# a") g.WriteLine("Line 3") + g.Flush() g.RotateFile() g.WriteLine("Line 4") g.WriteLine("Line 5") g.WriteLine("Line 6") g.WriteLine("# b") + g.Flush() match, found, err := g.FindLast("#") if err != nil { @@ -303,7 +308,7 @@ func TestFindLast1(t *testing.T) { if !found { t.Error("Expected found=True") } - if match != "# b\n" { + if match != "# b" { t.Errorf("Unexpected match: [%v]", match) } @@ -317,12 +322,14 @@ func TestFindLast2(t *testing.T) { g.WriteLine("Line 1") g.WriteLine("Line 2") g.WriteLine("Line 3") + g.Flush() g.RotateFile() g.WriteLine("# a") g.WriteLine("Line 4") g.WriteLine("Line 5") g.WriteLine("# b") g.WriteLine("Line 6") + g.Flush() match, found, err := g.FindLast("#") if err != nil { @@ -331,7 +338,7 @@ func TestFindLast2(t *testing.T) { if !found { t.Error("Expected found=True") } - if match != "# b\n" { + if match != "# b" { t.Errorf("Unexpected match: [%v]", match) } @@ -347,10 +354,12 @@ func TestFindLast3(t *testing.T) { g.WriteLine("Line 2") g.WriteLine("# b") g.WriteLine("Line 3") + g.Flush() g.RotateFile() g.WriteLine("Line 4") g.WriteLine("Line 5") g.WriteLine("Line 6") + g.Flush() match, found, err := g.FindLast("#") if err != nil { @@ -359,7 +368,7 @@ func TestFindLast3(t *testing.T) { if !found { t.Error("Expected found=True") } - if match != "# b\n" { + if match != "# b" { t.Errorf("Unexpected match: [%v]", match) } @@ -373,10 +382,12 @@ func TestFindLast4(t *testing.T) { g.WriteLine("Line 1") g.WriteLine("Line 2") g.WriteLine("Line 3") + g.Flush() g.RotateFile() g.WriteLine("Line 4") g.WriteLine("Line 5") g.WriteLine("Line 6") + g.Flush() match, found, err := g.FindLast("#") if err != nil { From dd12bd8f1b59b6ee75ae6ce1c1c70a5c2dc32f11 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 21 Nov 2016 19:57:17 -0800 Subject: [PATCH 072/515] Fix checkTotalSizeLimit bug; remove more than 1 file at a time --- group.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/group.go b/group.go index a2584dfc7..d3f724ad1 100644 --- a/group.go +++ b/group.go @@ -206,7 +206,7 @@ func (g *Group) checkTotalSizeLimit() { log.Println("WARNING: Group's head " + g.Head.Path + "may grow without bound") return } - pathToRemove := filePathForIndex(g.Head.Path, gInfo.MinIndex, gInfo.MaxIndex) + pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) fileInfo, err := os.Stat(pathToRemove) if err != nil { log.Println("WARNING: Failed to fetch info for file @" + pathToRemove) From 6b4160f2a57487f277c42bf06fd280195dfdb278 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 21 Nov 2016 20:01:11 -0800 Subject: [PATCH 073/515] Add Tempdir --- os.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/os.go b/os.go index 9f38027fa..e8943c0c5 100644 --- a/os.go +++ b/os.go @@ -113,6 +113,19 @@ func Tempfile(prefix string) (*os.File, string) { return file, file.Name() } +func Tempdir(prefix string) (*os.File, string) { + tempDir := os.TempDir() + "/" + prefix + RandStr(12) + err := EnsureDir(tempDir, 0700) + if err != nil { + panic(Fmt("Error creating temp dir: %v", err)) + } + dir, err := os.Open(tempDir) + if err != nil { + panic(Fmt("Error opening temp dir: %v", err)) + } + return dir, tempDir +} + //-------------------------------------------------------------------------------- func Prompt(prompt string, defaultValue string) (string, error) { From a6a67ea9b28257fb18026c2d8ec5430edb7d2afd Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 26 Oct 2016 21:50:28 -0700 Subject: [PATCH 074/515] Remove AutoFile; Use go-autofile instead --- os.go | 154 ---------------------------------------------------------- 1 file changed, 154 deletions(-) diff --git a/os.go b/os.go index a273bec48..9f38027fa 100644 --- a/os.go +++ b/os.go @@ -7,19 +7,12 @@ import ( "os" "os/signal" "strings" - "sync" - "syscall" - "time" ) var ( GoPath = os.Getenv("GOPATH") ) -func init() { - initAFSIGHUPWatcher() -} - func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) @@ -112,153 +105,6 @@ func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { //-------------------------------------------------------------------------------- -/* AutoFile usage - -// Create/Append to ./autofile_test -af, err := OpenAutoFile("autofile_test") -if err != nil { - panic(err) -} - -// Stream of writes. -// During this time, the file may be moved e.g. by logRotate. -for i := 0; i < 60; i++ { - af.Write([]byte(Fmt("LOOP(%v)", i))) - time.Sleep(time.Second) -} - -// Close the AutoFile -err = af.Close() -if err != nil { - panic(err) -} -*/ - -const autoFileOpenDuration = 1000 * time.Millisecond - -// Automatically closes and re-opens file for writing. -// This is useful for using a log file with the logrotate tool. -type AutoFile struct { - ID string - Path string - ticker *time.Ticker - mtx sync.Mutex - file *os.File -} - -func OpenAutoFile(path string) (af *AutoFile, err error) { - af = &AutoFile{ - ID: RandStr(12) + ":" + path, - Path: path, - ticker: time.NewTicker(autoFileOpenDuration), - } - if err = af.openFile(); err != nil { - return - } - go af.processTicks() - autoFileWatchers.addAutoFile(af) - return -} - -func (af *AutoFile) Close() error { - af.ticker.Stop() - err := af.closeFile() - autoFileWatchers.removeAutoFile(af) - return err -} - -func (af *AutoFile) processTicks() { - for { - _, ok := <-af.ticker.C - if !ok { - return // Done. - } - af.closeFile() - } -} - -func (af *AutoFile) closeFile() (err error) { - af.mtx.Lock() - defer af.mtx.Unlock() - - file := af.file - if file == nil { - return nil - } - af.file = nil - return file.Close() -} - -func (af *AutoFile) Write(b []byte) (n int, err error) { - af.mtx.Lock() - defer af.mtx.Unlock() - if af.file == nil { - if err = af.openFile(); err != nil { - return - } - } - return af.file.Write(b) -} - -func (af *AutoFile) openFile() error { - file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - if err != nil { - return err - } - af.file = file - return nil -} - -//-------------------------------------------------------------------------------- - -var autoFileWatchers *afSIGHUPWatcher - -func initAFSIGHUPWatcher() { - autoFileWatchers = newAFSIGHUPWatcher() - - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP) - - go func() { - for _ = range c { - autoFileWatchers.closeAll() - } - }() -} - -type afSIGHUPWatcher struct { - mtx sync.Mutex - autoFiles map[string]*AutoFile -} - -func newAFSIGHUPWatcher() *afSIGHUPWatcher { - return &afSIGHUPWatcher{ - autoFiles: make(map[string]*AutoFile, 10), - } -} - -func (afw *afSIGHUPWatcher) addAutoFile(af *AutoFile) { - afw.mtx.Lock() - afw.autoFiles[af.ID] = af - afw.mtx.Unlock() -} - -func (afw *afSIGHUPWatcher) removeAutoFile(af *AutoFile) { - afw.mtx.Lock() - delete(afw.autoFiles, af.ID) - afw.mtx.Unlock() -} - -func (afw *afSIGHUPWatcher) closeAll() { - afw.mtx.Lock() - for _, af := range afw.autoFiles { - af.closeFile() - } - afw.mtx.Unlock() -} - -//-------------------------------------------------------------------------------- - func Tempfile(prefix string) (*os.File, string) { file, err := ioutil.TempFile("", prefix) if err != nil { From 25dc9ae3451db5b03bcf1c02bd3559094101e402 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 28 Oct 2016 12:09:22 -0700 Subject: [PATCH 075/515] QuitService->BaseService --- service.go | 29 ++++++++++++++--------------- service_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 15 deletions(-) create mode 100644 service_test.go diff --git a/service.go b/service.go index 86ef20ead..7336446ff 100644 --- a/service.go +++ b/service.go @@ -65,6 +65,7 @@ type BaseService struct { name string started uint32 // atomic stopped uint32 // atomic + Quit chan struct{} // The "subclass" of BaseService impl Service @@ -74,6 +75,7 @@ func NewBaseService(log log15.Logger, name string, impl Service) *BaseService { return &BaseService{ log: log, name: name, + Quit: make(chan struct{}), impl: impl, } } @@ -102,6 +104,8 @@ func (bs *BaseService) Start() (bool, error) { } // Implements Service +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStart() func (bs *BaseService) OnStart() error { return nil } // Implements Service @@ -111,6 +115,7 @@ func (bs *BaseService) Stop() bool { bs.log.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) } bs.impl.OnStop() + close(bs.Quit) return true } else { if bs.log != nil { @@ -121,6 +126,8 @@ func (bs *BaseService) Stop() bool { } // Implements Service +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStop() func (bs *BaseService) OnStop() {} // Implements Service @@ -151,6 +158,10 @@ func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 } +func (bs *QuitService) Wait() { + <-bs.Quit +} + // Implements Servce func (bs *BaseService) String() string { return bs.name @@ -160,25 +171,13 @@ func (bs *BaseService) String() string { type QuitService struct { BaseService - Quit chan struct{} } func NewQuitService(log log15.Logger, name string, impl Service) *QuitService { + if log != nil { + log.Warn("QuitService is deprecated, use BaseService instead") + } return &QuitService{ BaseService: *NewBaseService(log, name, impl), - Quit: nil, - } -} - -// NOTE: when overriding OnStart, must call .QuitService.OnStart(). -func (qs *QuitService) OnStart() error { - qs.Quit = make(chan struct{}) - return nil -} - -// NOTE: when overriding OnStop, must call .QuitService.OnStop(). -func (qs *QuitService) OnStop() { - if qs.Quit != nil { - close(qs.Quit) } } diff --git a/service_test.go b/service_test.go new file mode 100644 index 000000000..6e24dad6a --- /dev/null +++ b/service_test.go @@ -0,0 +1,24 @@ +package common + +import ( + "testing" +) + +func TestBaseServiceWait(t *testing.T) { + + type TestService struct { + BaseService + } + ts := &TestService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + go func() { + ts.Stop() + }() + + for i := 0; i < 10; i++ { + ts.Wait() + } + +} From 890e24073036b6dffb7c56d8980545d7d660026b Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 28 Oct 2016 12:09:34 -0700 Subject: [PATCH 076/515] Remove AutoFile tests --- os_test.go | 64 ------------------------------------------------------ service.go | 2 +- 2 files changed, 1 insertion(+), 65 deletions(-) delete mode 100644 os_test.go diff --git a/os_test.go b/os_test.go deleted file mode 100644 index c0effdc2b..000000000 --- a/os_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package common - -import ( - "os" - "syscall" - "testing" -) - -func TestSIGHUP(t *testing.T) { - - // First, create an AutoFile writing to a tempfile dir - file, name := Tempfile("sighup_test") - err := file.Close() - if err != nil { - t.Fatalf("Error creating tempfile: %v", err) - } - // Here is the actual AutoFile - af, err := OpenAutoFile(name) - if err != nil { - t.Fatalf("Error creating autofile: %v", err) - } - - // Write to the file. - _, err = af.Write([]byte("Line 1\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - _, err = af.Write([]byte("Line 2\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - - // Send SIGHUP to self. - syscall.Kill(syscall.Getpid(), syscall.SIGHUP) - - // Move the file over - err = os.Rename(name, name+"_old") - if err != nil { - t.Fatalf("Error moving autofile: %v", err) - } - - // Write more to the file. - _, err = af.Write([]byte("Line 3\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - _, err = af.Write([]byte("Line 4\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - err = af.Close() - if err != nil { - t.Fatalf("Error closing autofile") - } - - // Both files should exist - if body := MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { - t.Errorf("Unexpected body %s", body) - } - if body := MustReadFile(name); string(body) != "Line 3\nLine 4\n" { - t.Errorf("Unexpected body %s", body) - } - -} diff --git a/service.go b/service.go index 7336446ff..e2d31925b 100644 --- a/service.go +++ b/service.go @@ -158,7 +158,7 @@ func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 } -func (bs *QuitService) Wait() { +func (bs *BaseService) Wait() { <-bs.Quit } From f40b1b65f81b695b44bae7ffe8f98c485b11846e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 21 Nov 2016 20:01:11 -0800 Subject: [PATCH 077/515] Add Tempdir --- os.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/os.go b/os.go index 9f38027fa..e8943c0c5 100644 --- a/os.go +++ b/os.go @@ -113,6 +113,19 @@ func Tempfile(prefix string) (*os.File, string) { return file, file.Name() } +func Tempdir(prefix string) (*os.File, string) { + tempDir := os.TempDir() + "/" + prefix + RandStr(12) + err := EnsureDir(tempDir, 0700) + if err != nil { + panic(Fmt("Error creating temp dir: %v", err)) + } + dir, err := os.Open(tempDir) + if err != nil { + panic(Fmt("Error opening temp dir: %v", err)) + } + return dir, tempDir +} + //-------------------------------------------------------------------------------- func Prompt(prompt string, defaultValue string) (string, error) { From 2a306419c88d10fab038f19dcbe4535e740b0aa0 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 21 Nov 2016 20:19:01 -0800 Subject: [PATCH 078/515] Remove spurious fmt --- group.go | 1 - 1 file changed, 1 deletion(-) diff --git a/group.go b/group.go index d3f724ad1..4de0d0346 100644 --- a/group.go +++ b/group.go @@ -196,7 +196,6 @@ func (g *Group) checkTotalSizeLimit() { gInfo := g.readGroupInfo() totalSize := gInfo.TotalSize for i := 0; i < maxFilesToRemove; i++ { - fmt.Println(">>", gInfo, totalSize, i) index := gInfo.MinIndex + i if totalSize < limit { return From 63186e34b33d78ae47fb0d25e5717b307fdf3603 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 21 Nov 2016 20:26:47 -0800 Subject: [PATCH 079/515] Fix race condition --- autofile.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autofile.go b/autofile.go index 60a314a42..e61bbb832 100644 --- a/autofile.go +++ b/autofile.go @@ -100,6 +100,9 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { } func (af *AutoFile) Sync() error { + af.mtx.Lock() + defer af.mtx.Unlock() + return af.file.Sync() } @@ -115,6 +118,7 @@ func (af *AutoFile) openFile() error { func (af *AutoFile) Size() (int64, error) { af.mtx.Lock() defer af.mtx.Unlock() + if af.file == nil { err := af.openFile() if err != nil { From 9f81134388b0ddceeb0384f57c4f210018f27297 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 29 Nov 2016 16:06:36 -0800 Subject: [PATCH 080/515] Add support for levigo bindings --- db.go | 14 ++++++ level_db.go | 27 ++++++++++ level_db2.go | 124 ++++++++++++++++++++++++++++++++++++++++++++++ level_db2_test.go | 84 +++++++++++++++++++++++++++++++ mem_db.go | 64 +++++++++++++++++++++++- 5 files changed, 312 insertions(+), 1 deletion(-) create mode 100644 level_db2.go create mode 100644 level_db2_test.go diff --git a/db.go b/db.go index 6bb1efae7..ab38501e2 100644 --- a/db.go +++ b/db.go @@ -13,16 +13,24 @@ type DB interface { Delete([]byte) DeleteSync([]byte) Close() + NewBatch() Batch // For debugging Print() } +type Batch interface { + Set(key, value []byte) + Delete(key []byte) + Write() +} + //----------------------------------------------------------------------------- // Database types const DBBackendMemDB = "memdb" const DBBackendLevelDB = "leveldb" +const DBBackendLevelDB2 = "leveldb2" func NewDB(name string, backend string, dir string) DB { switch backend { @@ -35,6 +43,12 @@ func NewDB(name string, backend string, dir string) DB { PanicCrisis(err) } return db + case DBBackendLevelDB2: + db, err := NewLevelDB2(path.Join(dir, name+".db")) + if err != nil { + PanicCrisis(err) + } + return db default: PanicSanity(Fmt("Unknown DB backend: %v", backend)) } diff --git a/level_db.go b/level_db.go index dee57a321..360362285 100644 --- a/level_db.go +++ b/level_db.go @@ -81,3 +81,30 @@ func (db *LevelDB) Print() { fmt.Printf("[%X]:\t[%X]\n", key, value) } } + +func (db *LevelDB) NewBatch() Batch { + batch := new(leveldb.Batch) + return &levelDBBatch{db, batch} +} + +//-------------------------------------------------------------------------------- + +type levelDBBatch struct { + db *LevelDB + batch *leveldb.Batch +} + +func (mBatch *levelDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +func (mBatch *levelDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +func (mBatch *levelDBBatch) Write() { + err := mBatch.db.db.Write(mBatch.batch, nil) + if err != nil { + PanicCrisis(err) + } +} diff --git a/level_db2.go b/level_db2.go new file mode 100644 index 000000000..e3049aae3 --- /dev/null +++ b/level_db2.go @@ -0,0 +1,124 @@ +package db + +import ( + "fmt" + "path" + + "github.com/jmhodges/levigo" + + . "github.com/tendermint/go-common" +) + +type LevelDB2 struct { + db *levigo.DB + ro *levigo.ReadOptions + wo *levigo.WriteOptions + woSync *levigo.WriteOptions +} + +func NewLevelDB2(name string) (*LevelDB2, error) { + dbPath := path.Join(name) + + opts := levigo.NewOptions() + opts.SetCache(levigo.NewLRUCache(1 << 30)) + opts.SetCreateIfMissing(true) + db, err := levigo.Open(dbPath, opts) + if err != nil { + return nil, err + } + ro := levigo.NewReadOptions() + wo := levigo.NewWriteOptions() + woSync := levigo.NewWriteOptions() + woSync.SetSync(true) + database := &LevelDB2{ + db: db, + ro: ro, + wo: wo, + woSync: woSync, + } + return database, nil +} + +func (db *LevelDB2) Get(key []byte) []byte { + res, err := db.db.Get(db.ro, key) + if err != nil { + PanicCrisis(err) + } + return res +} + +func (db *LevelDB2) Set(key []byte, value []byte) { + err := db.db.Put(db.wo, key, value) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB2) SetSync(key []byte, value []byte) { + err := db.db.Put(db.woSync, key, value) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB2) Delete(key []byte) { + err := db.db.Delete(db.wo, key) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB2) DeleteSync(key []byte) { + err := db.db.Delete(db.woSync, key) + if err != nil { + PanicCrisis(err) + } +} + +func (db *LevelDB2) DB() *levigo.DB { + return db.db +} + +func (db *LevelDB2) Close() { + db.db.Close() + db.ro.Close() + db.wo.Close() + db.woSync.Close() +} + +func (db *LevelDB2) Print() { + iter := db.db.NewIterator(db.ro) + defer iter.Close() + for iter.Seek(nil); iter.Valid(); iter.Next() { + key := iter.Key() + value := iter.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +func (db *LevelDB2) NewBatch() Batch { + batch := levigo.NewWriteBatch() + return &levelDB2Batch{db, batch} +} + +//-------------------------------------------------------------------------------- + +type levelDB2Batch struct { + db *LevelDB2 + batch *levigo.WriteBatch +} + +func (mBatch *levelDB2Batch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +func (mBatch *levelDB2Batch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +func (mBatch *levelDB2Batch) Write() { + err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) + if err != nil { + PanicCrisis(err) + } +} diff --git a/level_db2_test.go b/level_db2_test.go new file mode 100644 index 000000000..27a558407 --- /dev/null +++ b/level_db2_test.go @@ -0,0 +1,84 @@ +package db + +import ( + "bytes" + "fmt" + "testing" + + . "github.com/tendermint/go-common" +) + +func BenchmarkRandomReadsWrites2(b *testing.B) { + b.StopTimer() + + numItems := int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + db, err := NewLevelDB2(Fmt("test_%x", RandStr(12))) + if err != nil { + b.Fatal(err.Error()) + return + } + + fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := (int64(RandInt()) % numItems) + internal[idx] += 1 + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set( + idxBytes, + valBytes, + ) + } + // Read something + { + idx := (int64(RandInt()) % numItems) + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if val == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %X for %v, got %X", + nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", + idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if val != valGot { + b.Errorf("Expected %v for %v, got %v", + val, idx, valGot) + break + } + } + } + } + + db.Close() +} + +/* +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} +*/ diff --git a/mem_db.go b/mem_db.go index b7d8918d4..d27159dab 100644 --- a/mem_db.go +++ b/mem_db.go @@ -2,10 +2,12 @@ package db import ( "fmt" + "sync" ) type MemDB struct { - db map[string][]byte + mtx sync.Mutex + db map[string][]byte } func NewMemDB() *MemDB { @@ -14,31 +16,91 @@ func NewMemDB() *MemDB { } func (db *MemDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() return db.db[string(key)] } func (db *MemDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() db.db[string(key)] = value } func (db *MemDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() delete(db.db, string(key)) } func (db *MemDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() delete(db.db, string(key)) } func (db *MemDB) Close() { + db.mtx.Lock() + defer db.mtx.Unlock() db = nil } func (db *MemDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() for key, value := range db.db { fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) } } + +func (db *MemDB) NewBatch() Batch { + return &memDBBatch{db, nil} +} + +//-------------------------------------------------------------------------------- + +type memDBBatch struct { + db *MemDB + ops []operation +} + +type opType int + +const ( + opTypeSet = 1 + opTypeDelete = 2 +) + +type operation struct { + opType + key []byte + value []byte +} + +func (mBatch *memDBBatch) Set(key, value []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) +} + +func (mBatch *memDBBatch) Delete(key []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) +} + +func (mBatch *memDBBatch) Write() { + mBatch.db.mtx.Lock() + defer mBatch.db.mtx.Unlock() + + for _, op := range mBatch.ops { + if op.opType == opTypeSet { + mBatch.db.db[string(op.key)] = op.value + } else if op.opType == opTypeDelete { + delete(mBatch.db.db, string(op.key)) + } + } + +} From 226eb6554f293ef042298a51d9598ee76891796e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 29 Nov 2016 16:55:32 -0800 Subject: [PATCH 081/515] Support for Run() convenience function --- util.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 util.go diff --git a/util.go b/util.go new file mode 100644 index 000000000..4763ff715 --- /dev/null +++ b/util.go @@ -0,0 +1,22 @@ +package process + +import ( + . "github.com/tendermint/go-common" +) + +// Runs a command and gets the result. +func Run(command string, args []string) (string, bool, error) { + outFile := NewBufferCloser(nil) + proc, err := StartProcess("", command, args, nil, outFile) + if err != nil { + return "", false, err + } + + <-proc.WaitCh + + if proc.ExitState.Success() { + return string(outFile.Bytes()), true, nil + } else { + return string(outFile.Bytes()), false, nil + } +} From 5ab5538d6c4559991b9d468aa4c1824a2ab14763 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 30 Nov 2016 20:15:29 -0800 Subject: [PATCH 082/515] Move cleveldb to own file w/ build tag condition 'gcc' --- level_db2.go => c_level_db.go | 46 +++++++++++++---------- level_db2_test.go => c_level_db_test.go | 4 +- db.go | 50 +++++++++++-------------- level_db.go => go_level_db.go | 44 ++++++++++++---------- level_db_test.go => go_level_db_test.go | 2 +- mem_db.go | 6 +++ 6 files changed, 84 insertions(+), 68 deletions(-) rename level_db2.go => c_level_db.go (63%) rename level_db2_test.go => c_level_db_test.go (95%) rename level_db.go => go_level_db.go (58%) rename level_db_test.go => go_level_db_test.go (96%) diff --git a/level_db2.go b/c_level_db.go similarity index 63% rename from level_db2.go rename to c_level_db.go index e3049aae3..638be41b1 100644 --- a/level_db2.go +++ b/c_level_db.go @@ -1,3 +1,5 @@ +// +build gcc + package db import ( @@ -9,15 +11,21 @@ import ( . "github.com/tendermint/go-common" ) -type LevelDB2 struct { +func init() { + registerDBCreator(CLevelDBBackendStr, func(name string, dir string) (DB, error) { + return NewCLevelDB(name, dir) + }, false) +} + +type CLevelDB struct { db *levigo.DB ro *levigo.ReadOptions wo *levigo.WriteOptions woSync *levigo.WriteOptions } -func NewLevelDB2(name string) (*LevelDB2, error) { - dbPath := path.Join(name) +func NewCLevelDB(name string, dir string) (*CLevelDB, error) { + dbPath := path.Join(dir, name+".db") opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1 << 30)) @@ -30,7 +38,7 @@ func NewLevelDB2(name string) (*LevelDB2, error) { wo := levigo.NewWriteOptions() woSync := levigo.NewWriteOptions() woSync.SetSync(true) - database := &LevelDB2{ + database := &CLevelDB{ db: db, ro: ro, wo: wo, @@ -39,7 +47,7 @@ func NewLevelDB2(name string) (*LevelDB2, error) { return database, nil } -func (db *LevelDB2) Get(key []byte) []byte { +func (db *CLevelDB) Get(key []byte) []byte { res, err := db.db.Get(db.ro, key) if err != nil { PanicCrisis(err) @@ -47,46 +55,46 @@ func (db *LevelDB2) Get(key []byte) []byte { return res } -func (db *LevelDB2) Set(key []byte, value []byte) { +func (db *CLevelDB) Set(key []byte, value []byte) { err := db.db.Put(db.wo, key, value) if err != nil { PanicCrisis(err) } } -func (db *LevelDB2) SetSync(key []byte, value []byte) { +func (db *CLevelDB) SetSync(key []byte, value []byte) { err := db.db.Put(db.woSync, key, value) if err != nil { PanicCrisis(err) } } -func (db *LevelDB2) Delete(key []byte) { +func (db *CLevelDB) Delete(key []byte) { err := db.db.Delete(db.wo, key) if err != nil { PanicCrisis(err) } } -func (db *LevelDB2) DeleteSync(key []byte) { +func (db *CLevelDB) DeleteSync(key []byte) { err := db.db.Delete(db.woSync, key) if err != nil { PanicCrisis(err) } } -func (db *LevelDB2) DB() *levigo.DB { +func (db *CLevelDB) DB() *levigo.DB { return db.db } -func (db *LevelDB2) Close() { +func (db *CLevelDB) Close() { db.db.Close() db.ro.Close() db.wo.Close() db.woSync.Close() } -func (db *LevelDB2) Print() { +func (db *CLevelDB) Print() { iter := db.db.NewIterator(db.ro) defer iter.Close() for iter.Seek(nil); iter.Valid(); iter.Next() { @@ -96,27 +104,27 @@ func (db *LevelDB2) Print() { } } -func (db *LevelDB2) NewBatch() Batch { +func (db *CLevelDB) NewBatch() Batch { batch := levigo.NewWriteBatch() - return &levelDB2Batch{db, batch} + return &cLevelDBBatch{db, batch} } //-------------------------------------------------------------------------------- -type levelDB2Batch struct { - db *LevelDB2 +type cLevelDBBatch struct { + db *CLevelDB batch *levigo.WriteBatch } -func (mBatch *levelDB2Batch) Set(key, value []byte) { +func (mBatch *cLevelDBBatch) Set(key, value []byte) { mBatch.batch.Put(key, value) } -func (mBatch *levelDB2Batch) Delete(key []byte) { +func (mBatch *cLevelDBBatch) Delete(key []byte) { mBatch.batch.Delete(key) } -func (mBatch *levelDB2Batch) Write() { +func (mBatch *cLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) if err != nil { PanicCrisis(err) diff --git a/level_db2_test.go b/c_level_db_test.go similarity index 95% rename from level_db2_test.go rename to c_level_db_test.go index 27a558407..dbebcd902 100644 --- a/level_db2_test.go +++ b/c_level_db_test.go @@ -1,3 +1,5 @@ +// +build gcc + package db import ( @@ -16,7 +18,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewLevelDB2(Fmt("test_%x", RandStr(12))) + db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12))) if err != nil { b.Fatal(err.Error()) return diff --git a/db.go b/db.go index ab38501e2..dac6df0cc 100644 --- a/db.go +++ b/db.go @@ -1,10 +1,6 @@ package db -import ( - "path" - - . "github.com/tendermint/go-common" -) +import . "github.com/tendermint/go-common" type DB interface { Get([]byte) []byte @@ -27,30 +23,28 @@ type Batch interface { //----------------------------------------------------------------------------- -// Database types -const DBBackendMemDB = "memdb" -const DBBackendLevelDB = "leveldb" -const DBBackendLevelDB2 = "leveldb2" +const ( + CLevelDBBackendStr = "goleveldb" + GoLevelDBBackendStr = "cleveldb" + MemDBBackendStr = "memdb" +) + +type dbCreator func(name string, dir string) (DB, error) + +var backends map[string]dbCreator + +func registerDBCreator(backend string, creator dbCreator, force bool) { + _, ok := backends[backend] + if !force && ok { + return + } + backends[backend] = creator +} func NewDB(name string, backend string, dir string) DB { - switch backend { - case DBBackendMemDB: - db := NewMemDB() - return db - case DBBackendLevelDB: - db, err := NewLevelDB(path.Join(dir, name+".db")) - if err != nil { - PanicCrisis(err) - } - return db - case DBBackendLevelDB2: - db, err := NewLevelDB2(path.Join(dir, name+".db")) - if err != nil { - PanicCrisis(err) - } - return db - default: - PanicSanity(Fmt("Unknown DB backend: %v", backend)) + db, err := backends[backend](name, dir) + if err != nil { + PanicSanity(Fmt("Error initializing DB: %v", err)) } - return nil + return db } diff --git a/level_db.go b/go_level_db.go similarity index 58% rename from level_db.go rename to go_level_db.go index 360362285..73c307b24 100644 --- a/level_db.go +++ b/go_level_db.go @@ -11,21 +11,27 @@ import ( . "github.com/tendermint/go-common" ) -type LevelDB struct { +func init() { + registerDBCreator(GoLevelDBBackendStr, func(name string, dir string) (DB, error) { + return NewGoLevelDB(name, dir) + }, false) +} + +type GoLevelDB struct { db *leveldb.DB } -func NewLevelDB(name string) (*LevelDB, error) { - dbPath := path.Join(name) +func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { + dbPath := path.Join(dir, name+".db") db, err := leveldb.OpenFile(dbPath, nil) if err != nil { return nil, err } - database := &LevelDB{db: db} + database := &GoLevelDB{db: db} return database, nil } -func (db *LevelDB) Get(key []byte) []byte { +func (db *GoLevelDB) Get(key []byte) []byte { res, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { @@ -37,43 +43,43 @@ func (db *LevelDB) Get(key []byte) []byte { return res } -func (db *LevelDB) Set(key []byte, value []byte) { +func (db *GoLevelDB) Set(key []byte, value []byte) { err := db.db.Put(key, value, nil) if err != nil { PanicCrisis(err) } } -func (db *LevelDB) SetSync(key []byte, value []byte) { +func (db *GoLevelDB) SetSync(key []byte, value []byte) { err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) } } -func (db *LevelDB) Delete(key []byte) { +func (db *GoLevelDB) Delete(key []byte) { err := db.db.Delete(key, nil) if err != nil { PanicCrisis(err) } } -func (db *LevelDB) DeleteSync(key []byte) { +func (db *GoLevelDB) DeleteSync(key []byte) { err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) } } -func (db *LevelDB) DB() *leveldb.DB { +func (db *GoLevelDB) DB() *leveldb.DB { return db.db } -func (db *LevelDB) Close() { +func (db *GoLevelDB) Close() { db.db.Close() } -func (db *LevelDB) Print() { +func (db *GoLevelDB) Print() { iter := db.db.NewIterator(nil, nil) for iter.Next() { key := iter.Key() @@ -82,27 +88,27 @@ func (db *LevelDB) Print() { } } -func (db *LevelDB) NewBatch() Batch { +func (db *GoLevelDB) NewBatch() Batch { batch := new(leveldb.Batch) - return &levelDBBatch{db, batch} + return &goLevelDBBatch{db, batch} } //-------------------------------------------------------------------------------- -type levelDBBatch struct { - db *LevelDB +type goLevelDBBatch struct { + db *GoLevelDB batch *leveldb.Batch } -func (mBatch *levelDBBatch) Set(key, value []byte) { +func (mBatch *goLevelDBBatch) Set(key, value []byte) { mBatch.batch.Put(key, value) } -func (mBatch *levelDBBatch) Delete(key []byte) { +func (mBatch *goLevelDBBatch) Delete(key []byte) { mBatch.batch.Delete(key) } -func (mBatch *levelDBBatch) Write() { +func (mBatch *goLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.batch, nil) if err != nil { PanicCrisis(err) diff --git a/level_db_test.go b/go_level_db_test.go similarity index 96% rename from level_db_test.go rename to go_level_db_test.go index 2a8e9ac1e..b7fe07bd7 100644 --- a/level_db_test.go +++ b/go_level_db_test.go @@ -17,7 +17,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewLevelDB(Fmt("test_%x", RandStr(12))) + db, err := NewCLevelDB(Fmt("test_%x", RandStr(12))) if err != nil { b.Fatal(err.Error()) return diff --git a/mem_db.go b/mem_db.go index d27159dab..eb1e54b56 100644 --- a/mem_db.go +++ b/mem_db.go @@ -5,6 +5,12 @@ import ( "sync" ) +func init() { + registerDBCreator(MemDBBackendStr, func(name string, dir string) (DB, error) { + return NewMemDB(), nil + }, false) +} + type MemDB struct { mtx sync.Mutex db map[string][]byte From 5e2a1d3e300743380a329499804dde6bfb0af7d5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 30 Nov 2016 20:22:35 -0800 Subject: [PATCH 083/515] Fix tests; Support 'leveldb' for legacy --- c_level_db.go | 6 ++++-- c_level_db_test.go | 2 +- db.go | 3 ++- go_level_db.go | 6 ++++-- go_level_db_test.go | 2 +- 5 files changed, 12 insertions(+), 7 deletions(-) diff --git a/c_level_db.go b/c_level_db.go index 638be41b1..6c87c2949 100644 --- a/c_level_db.go +++ b/c_level_db.go @@ -12,9 +12,11 @@ import ( ) func init() { - registerDBCreator(CLevelDBBackendStr, func(name string, dir string) (DB, error) { + dbCreator := func(name string, dir string) (DB, error) { return NewCLevelDB(name, dir) - }, false) + } + registerDBCreator(LevelDBBackendStr, dbCreator, true) + registerDBCreator(CLevelDBBackendStr, dbCreator, false) } type CLevelDB struct { diff --git a/c_level_db_test.go b/c_level_db_test.go index dbebcd902..3baa8ba4c 100644 --- a/c_level_db_test.go +++ b/c_level_db_test.go @@ -18,7 +18,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12))) + db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return diff --git a/db.go b/db.go index dac6df0cc..0649c2fdd 100644 --- a/db.go +++ b/db.go @@ -24,6 +24,7 @@ type Batch interface { //----------------------------------------------------------------------------- const ( + LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb. CLevelDBBackendStr = "goleveldb" GoLevelDBBackendStr = "cleveldb" MemDBBackendStr = "memdb" @@ -31,7 +32,7 @@ const ( type dbCreator func(name string, dir string) (DB, error) -var backends map[string]dbCreator +var backends = map[string]dbCreator{} func registerDBCreator(backend string, creator dbCreator, force bool) { _, ok := backends[backend] diff --git a/go_level_db.go b/go_level_db.go index 73c307b24..a16c5d9e0 100644 --- a/go_level_db.go +++ b/go_level_db.go @@ -12,9 +12,11 @@ import ( ) func init() { - registerDBCreator(GoLevelDBBackendStr, func(name string, dir string) (DB, error) { + dbCreator := func(name string, dir string) (DB, error) { return NewGoLevelDB(name, dir) - }, false) + } + registerDBCreator(LevelDBBackendStr, dbCreator, false) + registerDBCreator(GoLevelDBBackendStr, dbCreator, false) } type GoLevelDB struct { diff --git a/go_level_db_test.go b/go_level_db_test.go index b7fe07bd7..b5dad1163 100644 --- a/go_level_db_test.go +++ b/go_level_db_test.go @@ -17,7 +17,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewCLevelDB(Fmt("test_%x", RandStr(12))) + db, err := NewCLevelDB(Fmt("test_%x", RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return From 70e694ee76f09058ea38c9ba81b4aa621bd54df1 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 6 Dec 2016 01:39:04 -0800 Subject: [PATCH 084/515] Fix ThrottleTimer null-pointer error / race condition --- throttle_timer.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/throttle_timer.go b/throttle_timer.go index 0966e913c..38ef4e9a3 100644 --- a/throttle_timer.go +++ b/throttle_timer.go @@ -26,7 +26,9 @@ func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { var ch = make(chan struct{}) var quit = make(chan struct{}) var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} + t.mtx.Lock() t.timer = time.AfterFunc(dur, t.fireRoutine) + t.mtx.Unlock() t.timer.Stop() return t } From 0416e0aa9c68205aa44844096f9f151ada9d0405 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 6 Dec 2016 01:46:23 -0800 Subject: [PATCH 085/515] Close opened files --- group.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/group.go b/group.go index 4de0d0346..ee1a94158 100644 --- a/group.go +++ b/group.go @@ -444,6 +444,7 @@ func (g *Group) readGroupInfo() GroupInfo { if err != nil { panic(err) } + defer dir.Close() fiz, err := dir.Readdir(0) if err != nil { panic(err) @@ -598,6 +599,9 @@ func (gr *GroupReader) openFile(index int) error { curReader := bufio.NewReader(curFile) // Update gr.cur* + if gr.curFile != nil { + gr.curFile.Close() // TODO return error? + } gr.curIndex = index gr.curFile = curFile gr.curReader = curReader From 7f507d69fa4c13b34e7a17ff5c87d1eaaa759145 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 6 Dec 2016 02:08:05 -0800 Subject: [PATCH 086/515] Include pwd dir to StartProcess --- process.go | 3 ++- util.go | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/process.go b/process.go index cf2aa0289..7d2ae9140 100644 --- a/process.go +++ b/process.go @@ -24,8 +24,9 @@ type Process struct { // execPath: command name // args: args to command. (should not include name) -func StartProcess(label string, execPath string, args []string, inFile io.Reader, outFile io.WriteCloser) (*Process, error) { +func StartProcess(label string, dir string, execPath string, args []string, inFile io.Reader, outFile io.WriteCloser) (*Process, error) { cmd := exec.Command(execPath, args...) + cmd.Dir = dir cmd.Stdout = outFile cmd.Stderr = outFile cmd.Stdin = inFile diff --git a/util.go b/util.go index 4763ff715..4976e46e5 100644 --- a/util.go +++ b/util.go @@ -5,9 +5,9 @@ import ( ) // Runs a command and gets the result. -func Run(command string, args []string) (string, bool, error) { +func Run(dir string, command string, args []string) (string, bool, error) { outFile := NewBufferCloser(nil) - proc, err := StartProcess("", command, args, nil, outFile) + proc, err := StartProcess("", dir, command, args, nil, outFile) if err != nil { return "", false, err } From a552e49b501bd438ed0055e180371b452aa2cfed Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 12 Dec 2016 23:08:02 -0500 Subject: [PATCH 087/515] Reverts commit f40b1b to a6a67e --- os.go | 165 ++++++++++++++++++++++++++++++++++++++++++++---- os_test.go | 64 +++++++++++++++++++ service.go | 29 +++++---- service_test.go | 24 ------- 4 files changed, 232 insertions(+), 50 deletions(-) create mode 100644 os_test.go delete mode 100644 service_test.go diff --git a/os.go b/os.go index e8943c0c5..a273bec48 100644 --- a/os.go +++ b/os.go @@ -7,12 +7,19 @@ import ( "os" "os/signal" "strings" + "sync" + "syscall" + "time" ) var ( GoPath = os.Getenv("GOPATH") ) +func init() { + initAFSIGHUPWatcher() +} + func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) @@ -105,25 +112,159 @@ func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { //-------------------------------------------------------------------------------- -func Tempfile(prefix string) (*os.File, string) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - PanicCrisis(err) +/* AutoFile usage + +// Create/Append to ./autofile_test +af, err := OpenAutoFile("autofile_test") +if err != nil { + panic(err) +} + +// Stream of writes. +// During this time, the file may be moved e.g. by logRotate. +for i := 0; i < 60; i++ { + af.Write([]byte(Fmt("LOOP(%v)", i))) + time.Sleep(time.Second) +} + +// Close the AutoFile +err = af.Close() +if err != nil { + panic(err) +} +*/ + +const autoFileOpenDuration = 1000 * time.Millisecond + +// Automatically closes and re-opens file for writing. +// This is useful for using a log file with the logrotate tool. +type AutoFile struct { + ID string + Path string + ticker *time.Ticker + mtx sync.Mutex + file *os.File +} + +func OpenAutoFile(path string) (af *AutoFile, err error) { + af = &AutoFile{ + ID: RandStr(12) + ":" + path, + Path: path, + ticker: time.NewTicker(autoFileOpenDuration), } - return file, file.Name() + if err = af.openFile(); err != nil { + return + } + go af.processTicks() + autoFileWatchers.addAutoFile(af) + return +} + +func (af *AutoFile) Close() error { + af.ticker.Stop() + err := af.closeFile() + autoFileWatchers.removeAutoFile(af) + return err +} + +func (af *AutoFile) processTicks() { + for { + _, ok := <-af.ticker.C + if !ok { + return // Done. + } + af.closeFile() + } +} + +func (af *AutoFile) closeFile() (err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + file := af.file + if file == nil { + return nil + } + af.file = nil + return file.Close() +} + +func (af *AutoFile) Write(b []byte) (n int, err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + if af.file == nil { + if err = af.openFile(); err != nil { + return + } + } + return af.file.Write(b) } -func Tempdir(prefix string) (*os.File, string) { - tempDir := os.TempDir() + "/" + prefix + RandStr(12) - err := EnsureDir(tempDir, 0700) +func (af *AutoFile) openFile() error { + file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { - panic(Fmt("Error creating temp dir: %v", err)) + return err } - dir, err := os.Open(tempDir) + af.file = file + return nil +} + +//-------------------------------------------------------------------------------- + +var autoFileWatchers *afSIGHUPWatcher + +func initAFSIGHUPWatcher() { + autoFileWatchers = newAFSIGHUPWatcher() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + go func() { + for _ = range c { + autoFileWatchers.closeAll() + } + }() +} + +type afSIGHUPWatcher struct { + mtx sync.Mutex + autoFiles map[string]*AutoFile +} + +func newAFSIGHUPWatcher() *afSIGHUPWatcher { + return &afSIGHUPWatcher{ + autoFiles: make(map[string]*AutoFile, 10), + } +} + +func (afw *afSIGHUPWatcher) addAutoFile(af *AutoFile) { + afw.mtx.Lock() + afw.autoFiles[af.ID] = af + afw.mtx.Unlock() +} + +func (afw *afSIGHUPWatcher) removeAutoFile(af *AutoFile) { + afw.mtx.Lock() + delete(afw.autoFiles, af.ID) + afw.mtx.Unlock() +} + +func (afw *afSIGHUPWatcher) closeAll() { + afw.mtx.Lock() + for _, af := range afw.autoFiles { + af.closeFile() + } + afw.mtx.Unlock() +} + +//-------------------------------------------------------------------------------- + +func Tempfile(prefix string) (*os.File, string) { + file, err := ioutil.TempFile("", prefix) if err != nil { - panic(Fmt("Error opening temp dir: %v", err)) + PanicCrisis(err) } - return dir, tempDir + return file, file.Name() } //-------------------------------------------------------------------------------- diff --git a/os_test.go b/os_test.go new file mode 100644 index 000000000..c0effdc2b --- /dev/null +++ b/os_test.go @@ -0,0 +1,64 @@ +package common + +import ( + "os" + "syscall" + "testing" +) + +func TestSIGHUP(t *testing.T) { + + // First, create an AutoFile writing to a tempfile dir + file, name := Tempfile("sighup_test") + err := file.Close() + if err != nil { + t.Fatalf("Error creating tempfile: %v", err) + } + // Here is the actual AutoFile + af, err := OpenAutoFile(name) + if err != nil { + t.Fatalf("Error creating autofile: %v", err) + } + + // Write to the file. + _, err = af.Write([]byte("Line 1\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 2\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + + // Send SIGHUP to self. + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + + // Move the file over + err = os.Rename(name, name+"_old") + if err != nil { + t.Fatalf("Error moving autofile: %v", err) + } + + // Write more to the file. + _, err = af.Write([]byte("Line 3\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + _, err = af.Write([]byte("Line 4\n")) + if err != nil { + t.Fatalf("Error writing to autofile: %v", err) + } + err = af.Close() + if err != nil { + t.Fatalf("Error closing autofile") + } + + // Both files should exist + if body := MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { + t.Errorf("Unexpected body %s", body) + } + if body := MustReadFile(name); string(body) != "Line 3\nLine 4\n" { + t.Errorf("Unexpected body %s", body) + } + +} diff --git a/service.go b/service.go index e2d31925b..86ef20ead 100644 --- a/service.go +++ b/service.go @@ -65,7 +65,6 @@ type BaseService struct { name string started uint32 // atomic stopped uint32 // atomic - Quit chan struct{} // The "subclass" of BaseService impl Service @@ -75,7 +74,6 @@ func NewBaseService(log log15.Logger, name string, impl Service) *BaseService { return &BaseService{ log: log, name: name, - Quit: make(chan struct{}), impl: impl, } } @@ -104,8 +102,6 @@ func (bs *BaseService) Start() (bool, error) { } // Implements Service -// NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStart() func (bs *BaseService) OnStart() error { return nil } // Implements Service @@ -115,7 +111,6 @@ func (bs *BaseService) Stop() bool { bs.log.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) } bs.impl.OnStop() - close(bs.Quit) return true } else { if bs.log != nil { @@ -126,8 +121,6 @@ func (bs *BaseService) Stop() bool { } // Implements Service -// NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStop() func (bs *BaseService) OnStop() {} // Implements Service @@ -158,10 +151,6 @@ func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 } -func (bs *BaseService) Wait() { - <-bs.Quit -} - // Implements Servce func (bs *BaseService) String() string { return bs.name @@ -171,13 +160,25 @@ func (bs *BaseService) String() string { type QuitService struct { BaseService + Quit chan struct{} } func NewQuitService(log log15.Logger, name string, impl Service) *QuitService { - if log != nil { - log.Warn("QuitService is deprecated, use BaseService instead") - } return &QuitService{ BaseService: *NewBaseService(log, name, impl), + Quit: nil, + } +} + +// NOTE: when overriding OnStart, must call .QuitService.OnStart(). +func (qs *QuitService) OnStart() error { + qs.Quit = make(chan struct{}) + return nil +} + +// NOTE: when overriding OnStop, must call .QuitService.OnStop(). +func (qs *QuitService) OnStop() { + if qs.Quit != nil { + close(qs.Quit) } } diff --git a/service_test.go b/service_test.go deleted file mode 100644 index 6e24dad6a..000000000 --- a/service_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package common - -import ( - "testing" -) - -func TestBaseServiceWait(t *testing.T) { - - type TestService struct { - BaseService - } - ts := &TestService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) - ts.Start() - - go func() { - ts.Stop() - }() - - for i := 0; i < 10; i++ { - ts.Wait() - } - -} From 33e35c47326ffe1b2245b39c04125dba01e94d35 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 22 Dec 2016 15:44:51 -0500 Subject: [PATCH 088/515] go and c strings were swapped --- db.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/db.go b/db.go index 0649c2fdd..8ab1c43be 100644 --- a/db.go +++ b/db.go @@ -25,8 +25,8 @@ type Batch interface { const ( LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb. - CLevelDBBackendStr = "goleveldb" - GoLevelDBBackendStr = "cleveldb" + CLevelDBBackendStr = "cleveldb" + GoLevelDBBackendStr = "goleveldb" MemDBBackendStr = "memdb" ) From 2337086736a6adeb2de6f66739b66ecd77535997 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 12 Jan 2017 14:32:48 -0500 Subject: [PATCH 089/515] use mtx in OnStop --- events.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/events.go b/events.go index 9d4f2fa2e..15cbfd5b4 100644 --- a/events.go +++ b/events.go @@ -54,6 +54,8 @@ func (evsw *eventSwitch) OnStart() error { } func (evsw *eventSwitch) OnStop() { + evsw.mtx.Lock() + defer evsw.mtx.Unlock() evsw.BaseService.OnStop() evsw.eventCells = nil evsw.listeners = nil From 72f6dacd22a686cdf7fcd60286503e3aceda77ba Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 12 Jan 2017 22:32:20 -0500 Subject: [PATCH 090/515] fix tests --- c_level_db_test.go | 2 +- go_level_db_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/c_level_db_test.go b/c_level_db_test.go index 3baa8ba4c..b90161498 100644 --- a/c_level_db_test.go +++ b/c_level_db_test.go @@ -18,7 +18,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12)), "") + db, err := NewCLevelDB(Fmt("test_%x", RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return diff --git a/go_level_db_test.go b/go_level_db_test.go index b5dad1163..24b64734d 100644 --- a/go_level_db_test.go +++ b/go_level_db_test.go @@ -17,7 +17,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewCLevelDB(Fmt("test_%x", RandStr(12)), "") + db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return From 339e135776142939d82bc8e699db0bf391fd938d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Fri, 27 Jan 2017 20:37:04 -0800 Subject: [PATCH 091/515] Add IsDirEmpty --- os.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/os.go b/os.go index e8943c0c5..9dc81c579 100644 --- a/os.go +++ b/os.go @@ -3,6 +3,7 @@ package common import ( "bufio" "fmt" + "io" "io/ioutil" "os" "os/signal" @@ -44,6 +45,20 @@ func EnsureDir(dir string, mode os.FileMode) error { return nil } +func IsDirEmpty(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + return true, err //folder is non-existent + } + defer f.Close() + + _, err = f.Readdirnames(1) // Or f.Readdir(1) + if err == io.EOF { + return true, nil + } + return false, err // Either not empty or error, suits both cases +} + func FileExists(filePath string) bool { _, err := os.Stat(filePath) return !os.IsNotExist(err) From c3b80061662fe0d3108fc35e283b5a79444d9fa7 Mon Sep 17 00:00:00 2001 From: Alessio Treglia Date: Sat, 28 Jan 2017 12:42:01 +0000 Subject: [PATCH 092/515] Add LICENSE file --- LICENSE | 203 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 203 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..3a48142a3 --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ +All files are Copyright (C) 2017 Tendermint + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 1c977f78fd17d2522123908016706cfcc27801e3 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 28 Jan 2017 08:01:29 -0800 Subject: [PATCH 093/515] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 3a48142a3..9527e2688 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -All files are Copyright (C) 2017 Tendermint +All files are Copyright (C) 2017 All in Bits, Inc Apache License Version 2.0, January 2004 From a816ff0babff42d13a50ab09679cd34cf871d5ae Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Mon, 27 Feb 2017 14:52:48 +0100 Subject: [PATCH 094/515] Add test --- Makefile | 9 +++ README.md | 177 ++++++++++++++++++++++++++++++++++++++++++++++++- events.go | 3 + events_test.go | 46 +++++++++++++ 4 files changed, 233 insertions(+), 2 deletions(-) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..295001f8d --- /dev/null +++ b/Makefile @@ -0,0 +1,9 @@ +.PHONY: docs +REPO:=github.com/tendermint/go-events + +docs: + @go get github.com/davecheney/godoc2md + godoc2md $(REPO) > README.md + +test: + go test -v ./... diff --git a/README.md b/README.md index 737cbaaec..c435e7283 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,175 @@ -# go-events -PubSub in Go with event caching. + + +# events +`import "github.com/tendermint/go-events"` + +* [Overview](#pkg-overview) +* [Index](#pkg-index) + +## Overview +Pub-Sub in go with event caching + + + + +## Index +* [type EventCache](#EventCache) + * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) + * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) + * [func (evc *EventCache) Flush()](#EventCache.Flush) +* [type EventCallback](#EventCallback) +* [type EventData](#EventData) +* [type EventSwitch](#EventSwitch) + * [func NewEventSwitch() EventSwitch](#NewEventSwitch) +* [type Eventable](#Eventable) +* [type Fireable](#Fireable) + + +#### Package files +[event_cache.go](/src/github.com/tendermint/go-events/event_cache.go) [events.go](/src/github.com/tendermint/go-events/events.go) [log.go](/src/github.com/tendermint/go-events/log.go) + + + + + + +## type [EventCache](/src/target/event_cache.go?s=152:215#L1) +``` go +type EventCache struct { + // contains filtered or unexported fields +} +``` +An EventCache buffers events for a Fireable +All events are cached. Filtering happens on Flush + + + + + + + +### func [NewEventCache](/src/target/event_cache.go?s=275:320#L5) +``` go +func NewEventCache(evsw Fireable) *EventCache +``` +Create a new EventCache with an EventSwitch as backend + + + + + +### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=534:596#L19) +``` go +func (evc *EventCache) FireEvent(event string, data EventData) +``` +Cache an event to be fired upon finality. + + + + +### func (\*EventCache) [Flush](/src/target/event_cache.go?s=773:803#L26) +``` go +func (evc *EventCache) Flush() +``` +Fire events by running evsw.FireEvent on all cached events. Blocks. +Clears cached events + + + + +## type [EventCallback](/src/target/events.go?s=4182:4221#L175) +``` go +type EventCallback func(data EventData) +``` + + + + + + + + + +## type [EventData](/src/target/events.go?s=236:287#L4) +``` go +type EventData interface { +} +``` +Generic event data can be typed and registered with tendermint/go-wire +via concrete implementation of this interface + + + + + + + + + + +## type [EventSwitch](/src/target/events.go?s=553:760#L19) +``` go +type EventSwitch interface { + Service + Fireable + + AddListenerForEvent(listenerID, event string, cb EventCallback) + RemoveListenerForEvent(event string, listenerID string) + RemoveListener(listenerID string) +} +``` + + + + + + +### func [NewEventSwitch](/src/target/events.go?s=902:935#L36) +``` go +func NewEventSwitch() EventSwitch +``` + + + + +## type [Eventable](/src/target/events.go?s=371:433#L10) +``` go +type Eventable interface { + SetEventSwitch(evsw EventSwitch) +} +``` +reactors and other modules should export +this interface to become eventable + + + + + + + + + + +## type [Fireable](/src/target/events.go?s=483:551#L15) +``` go +type Fireable interface { + FireEvent(event string, data EventData) +} +``` +an event switch or cache implements fireable + + + + + + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/events.go b/events.go index 15cbfd5b4..e613a8e04 100644 --- a/events.go +++ b/events.go @@ -1,3 +1,6 @@ +/* +Pub-Sub in go with event caching +*/ package events import ( diff --git a/events_test.go b/events_test.go index 8766a57d2..c1b48b16f 100644 --- a/events_test.go +++ b/events_test.go @@ -5,6 +5,8 @@ import ( "math/rand" "testing" "time" + + "github.com/stretchr/testify/assert" ) // TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single @@ -207,6 +209,50 @@ func TestAddAndRemoveListener(t *testing.T) { } } +// TestRemoveListener does basic tests on adding and removing +func TestRemoveListener(t *testing.T) { + evsw := NewEventSwitch() + started, err := evsw.Start() + if started == false || err != nil { + t.Errorf("Failed to start EventSwitch, error: %v", err) + } + count := 10 + sum1, sum2 := 0, 0 + // add some listeners and make sure they work + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + sum1 += 1 + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + sum2 += 1 + }) + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count, sum1) + assert.Equal(t, count, sum2) + + // remove one by event and make sure it is gone + evsw.RemoveListenerForEvent("event2", "listener") + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count*2, sum1) + assert.Equal(t, count, sum2) + + // remove the listener entirely and make sure both gone + evsw.RemoveListener("listener") + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count*2, sum1) + assert.Equal(t, count, sum2) +} + // TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two // listeners to three events, and fires a thousand integers for each event. // These two listeners serve as the baseline validation while other listeners From ab0cca734ad3bf74f3d8f8ec2d5ee55889b59b36 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 4 Mar 2017 17:11:59 -0800 Subject: [PATCH 095/515] Update LICENSE.md --- LICENSE.md | 206 +---------------------------------------------------- 1 file changed, 2 insertions(+), 204 deletions(-) diff --git a/LICENSE.md b/LICENSE.md index 25c3191e9..aa3671357 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,206 +1,4 @@ Tendermint Go-DB -Copyright (C) 2015 Tendermint +Copyright (C) 2015 All in Bits, Inc -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS +Released under the Apache2.0 license From 286cbbd99d9ff76681b7c18cc506d476ccf68725 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 4 Mar 2017 17:12:18 -0800 Subject: [PATCH 096/515] Update LICENSE.md --- LICENSE.md | 207 +---------------------------------------------------- 1 file changed, 2 insertions(+), 205 deletions(-) diff --git a/LICENSE.md b/LICENSE.md index 25c3191e9..ab8da59d8 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,206 +1,3 @@ -Tendermint Go-DB -Copyright (C) 2015 Tendermint +Tendermint Go-DB Copyright (C) 2015 All in Bits, Inc -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS +Released under the Apache2.0 license From dcb015dff6c7af21e65c8e2f3b450df19d38c777 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 5 Mar 2017 03:24:25 -0500 Subject: [PATCH 097/515] repeat_timer: drain channel in Stop; done -> wg --- repeat_timer.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/repeat_timer.go b/repeat_timer.go index f027af3f3..d7d9154d4 100644 --- a/repeat_timer.go +++ b/repeat_timer.go @@ -1,7 +1,9 @@ package common -import "time" -import "sync" +import ( + "sync" + "time" +) /* RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period. @@ -15,7 +17,7 @@ type RepeatTimer struct { name string ticker *time.Ticker quit chan struct{} - done chan struct{} + wg *sync.WaitGroup dur time.Duration } @@ -24,10 +26,11 @@ func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { Ch: make(chan time.Time), ticker: time.NewTicker(dur), quit: make(chan struct{}), - done: make(chan struct{}), + wg: new(sync.WaitGroup), name: name, dur: dur, } + t.wg.Add(1) go t.fireRoutine(t.ticker) return t } @@ -39,7 +42,7 @@ func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { t.Ch <- t_ case <-t.quit: // needed so we know when we can reset t.quit - t.done <- struct{}{} + t.wg.Done() return } } @@ -54,6 +57,7 @@ func (t *RepeatTimer) Reset() { t.ticker = time.NewTicker(t.dur) t.quit = make(chan struct{}) + t.wg.Add(1) go t.fireRoutine(t.ticker) } @@ -69,8 +73,13 @@ func (t *RepeatTimer) Stop() bool { exists := t.ticker != nil if exists { t.ticker.Stop() // does not close the channel + select { + case <-t.Ch: + // read off channel if there's anything there + default: + } close(t.quit) - <-t.done + t.wg.Wait() // must wait for quit to close else we race Reset t.ticker = nil } return exists From bb8104b6249b4429a0bcde533a2db6e17ca550f0 Mon Sep 17 00:00:00 2001 From: "Paul W. Homer" Date: Fri, 17 Mar 2017 13:27:20 -0400 Subject: [PATCH 098/515] Exposed the LevelDB iterator in the DB struct. --- db.go | 8 ++++++++ go_level_db.go | 9 +++++++++ mem_db.go | 10 ++++++++++ 3 files changed, 27 insertions(+) diff --git a/db.go b/db.go index 8ab1c43be..9cd4e1c21 100644 --- a/db.go +++ b/db.go @@ -12,6 +12,8 @@ type DB interface { NewBatch() Batch // For debugging + Iterator() Iterator + Next(Iterator) (key []byte, value []byte) Print() } @@ -21,6 +23,12 @@ type Batch interface { Write() } +type Iterator interface { + Next() bool + Key() []byte + Value() []byte +} + //----------------------------------------------------------------------------- const ( diff --git a/go_level_db.go b/go_level_db.go index a16c5d9e0..35c380ad8 100644 --- a/go_level_db.go +++ b/go_level_db.go @@ -90,6 +90,15 @@ func (db *GoLevelDB) Print() { } } +func (db *GoLevelDB) Iterator() Iterator { + return db.db.NewIterator(nil, nil) +} + +func (db *GoLevelDB) Next(iter Iterator) ([]byte, []byte) { + iter.Next() + return iter.Key(), iter.Value() +} + func (db *GoLevelDB) NewBatch() Batch { batch := new(leveldb.Batch) return &goLevelDBBatch{db, batch} diff --git a/mem_db.go b/mem_db.go index eb1e54b56..dcd86e6d5 100644 --- a/mem_db.go +++ b/mem_db.go @@ -65,6 +65,16 @@ func (db *MemDB) Print() { } } +// TODO: needs to be wired to range db.db +func (db *MemDB) Iterator() Iterator { + return nil +} + +// TODO: needs to be wired to range db.db +func (db *MemDB) Next(iter Iterator) (key []byte, value []byte) { + return nil, nil +} + func (db *MemDB) NewBatch() Batch { return &memDBBatch{db, nil} } From d6205eb4ca60631dd4abee93cef47d837be66af4 Mon Sep 17 00:00:00 2001 From: "Paul W. Homer" Date: Fri, 17 Mar 2017 14:34:11 -0400 Subject: [PATCH 099/515] Changed the iterations --- go_level_db.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/go_level_db.go b/go_level_db.go index 35c380ad8..3020d4a03 100644 --- a/go_level_db.go +++ b/go_level_db.go @@ -95,8 +95,10 @@ func (db *GoLevelDB) Iterator() Iterator { } func (db *GoLevelDB) Next(iter Iterator) ([]byte, []byte) { - iter.Next() - return iter.Key(), iter.Value() + if iter.Next() { + return iter.Key(), iter.Value() + } + return nil, nil } func (db *GoLevelDB) NewBatch() Batch { From 6064c80250f32dfc55a7bfd6113f4ed87c3fcd97 Mon Sep 17 00:00:00 2001 From: "Paul W. Homer" Date: Thu, 23 Mar 2017 11:37:46 -0400 Subject: [PATCH 100/515] Modified all db types to conform to the interface changes. --- c_level_db.go | 17 +++++++++++++++++ db.go | 5 +++-- go_level_db.go | 32 +++++++++++++++++++++++++------- mem_db.go | 49 +++++++++++++++++++++++++++++++++++++++++++------ 4 files changed, 88 insertions(+), 15 deletions(-) diff --git a/c_level_db.go b/c_level_db.go index 6c87c2949..41ed4dc16 100644 --- a/c_level_db.go +++ b/c_level_db.go @@ -106,6 +106,23 @@ func (db *CLevelDB) Print() { } } +func (db *CLevelDB) Stats() map[string]string { + keys := []string{} + + stats := make(map[string]string) + for _, key := range keys { + str, err := db.db.GetProperty(key) + if err == nil { + stats[key] = str + } + } + return stats +} + +func (db *CLevelDB) Iterator() Iterator { + return db.db.NewIterator(nil, nil) +} + func (db *CLevelDB) NewBatch() Batch { batch := levigo.NewWriteBatch() return &cLevelDBBatch{db, batch} diff --git a/db.go b/db.go index 9cd4e1c21..b88499092 100644 --- a/db.go +++ b/db.go @@ -12,9 +12,9 @@ type DB interface { NewBatch() Batch // For debugging - Iterator() Iterator - Next(Iterator) (key []byte, value []byte) Print() + Iterator() Iterator + Stats() map[string]string } type Batch interface { @@ -25,6 +25,7 @@ type Batch interface { type Iterator interface { Next() bool + Key() []byte Value() []byte } diff --git a/go_level_db.go b/go_level_db.go index 3020d4a03..ac936d01b 100644 --- a/go_level_db.go +++ b/go_level_db.go @@ -82,6 +82,9 @@ func (db *GoLevelDB) Close() { } func (db *GoLevelDB) Print() { + str, _ := db.db.GetProperty("leveldb.stats") + fmt.Printf("%v\n", str) + iter := db.db.NewIterator(nil, nil) for iter.Next() { key := iter.Key() @@ -90,15 +93,30 @@ func (db *GoLevelDB) Print() { } } -func (db *GoLevelDB) Iterator() Iterator { - return db.db.NewIterator(nil, nil) -} +func (db *GoLevelDB) Stats() map[string]string { + keys := []string{ + "leveldb.num-files-at-level{n}", + "leveldb.stats", + "leveldb.sstables", + "leveldb.blockpool", + "leveldb.cachedblock", + "leveldb.openedtables", + "leveldb.alivesnaps", + "leveldb.alibeiters", + } -func (db *GoLevelDB) Next(iter Iterator) ([]byte, []byte) { - if iter.Next() { - return iter.Key(), iter.Value() + stats := make(map[string]string) + for _, key := range keys { + str, err := db.db.GetProperty(key) + if err == nil { + stats[key] = str + } } - return nil, nil + return stats +} + +func (db *GoLevelDB) Iterator() Iterator { + return db.db.NewIterator(nil, nil) } func (db *GoLevelDB) NewBatch() Batch { diff --git a/mem_db.go b/mem_db.go index dcd86e6d5..ef410e88c 100644 --- a/mem_db.go +++ b/mem_db.go @@ -65,14 +65,51 @@ func (db *MemDB) Print() { } } -// TODO: needs to be wired to range db.db -func (db *MemDB) Iterator() Iterator { - return nil +func (db *MemDB) Stats() map[string]string { + stats := make(map[string]string) + stats["database.type"] = "memDB" + return stats +} + +type memDBIterator struct { + last int + keys []string + db *MemDB +} + +func (it *memDBIterator) Create(db *MemDB) *memDBIterator { + if it == nil { + it = &memDBIterator{} + } + it.db = db + it.last = -1 + + // unfortunately we need a copy of all of the keys + for key, _ := range db.db { + it.keys = append(it.keys, key) + } + return it } -// TODO: needs to be wired to range db.db -func (db *MemDB) Next(iter Iterator) (key []byte, value []byte) { - return nil, nil +func (it *memDBIterator) Next() bool { + if it.last >= len(it.keys) { + return false + } + it.last++ + return true +} + +func (it *memDBIterator) Key() []byte { + return []byte(it.keys[it.last]) +} + +func (it *memDBIterator) Value() []byte { + return it.db.db[it.keys[it.last]] +} + +func (db *MemDB) Iterator() Iterator { + var it *memDBIterator + return it.Create(db) } func (db *MemDB) NewBatch() Batch { From 097e0abca9e34e00477047acb29c861f140f141e Mon Sep 17 00:00:00 2001 From: "Paul W. Homer" Date: Thu, 23 Mar 2017 14:46:40 -0400 Subject: [PATCH 101/515] Added in locking --- mem_db.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mem_db.go b/mem_db.go index ef410e88c..5307d1a7a 100644 --- a/mem_db.go +++ b/mem_db.go @@ -78,6 +78,9 @@ type memDBIterator struct { } func (it *memDBIterator) Create(db *MemDB) *memDBIterator { + db.mtx.Lock() + defer db.mtx.Unlock() + if it == nil { it = &memDBIterator{} } @@ -104,6 +107,9 @@ func (it *memDBIterator) Key() []byte { } func (it *memDBIterator) Value() []byte { + it.db.mtx.Lock() + defer it.db.mtx.Unlock() + return it.db.db[it.keys[it.last]] } From c46ffe39a894fb0d7ba0ddc5caf2af5b7e779da5 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 27 Mar 2017 20:46:46 +0400 Subject: [PATCH 102/515] [service] check for error returned by impl otherwise, we mark it as started when it is not in fact --- service.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/service.go b/service.go index e2d31925b..8cd064721 100644 --- a/service.go +++ b/service.go @@ -94,6 +94,11 @@ func (bs *BaseService) Start() (bool, error) { } } err := bs.impl.OnStart() + if err != nil { + // revert flag + atomic.StoreUint32(&bs.started, 0) + return false, err + } return true, err } else { if bs.log != nil { From 7a12594edb113e07b864ff4fb5255e56e3cb70c1 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 28 Mar 2017 13:56:48 +0400 Subject: [PATCH 103/515] [service] recreate Quit channel on reset don't think that user should do this thing him/herself --- service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/service.go b/service.go index e2d31925b..f17371336 100644 --- a/service.go +++ b/service.go @@ -136,6 +136,7 @@ func (bs *BaseService) Reset() (bool, error) { // whether or not we've started, we can reset atomic.CompareAndSwapUint32(&bs.started, 1, 0) + bs.Quit = make(chan struct{}) return true, bs.impl.OnReset() } else { if bs.log != nil { From dec518eb06c0823bd6462a64decf59663402332c Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 29 Mar 2017 16:03:05 +0400 Subject: [PATCH 104/515] update comment [ci skip] [circleci skip] --- service.go | 81 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/service.go b/service.go index 8cd064721..c263bfa81 100644 --- a/service.go +++ b/service.go @@ -1,42 +1,3 @@ -/* - -Classical-inheritance-style service declarations. -Services can be started, then stopped, then optionally restarted. -Users can override the OnStart/OnStop methods. -By default, these methods are guaranteed to be called at most once. -A call to Reset will panic, unless OnReset is overwritten, allowing OnStart/OnStop to be called again. -Caller must ensure that Start() and Stop() are not called concurrently. -It is ok to call Stop() without calling Start() first. -Services cannot be re-started unless OnReset is overwritten to allow it. - -Typical usage: - -type FooService struct { - BaseService - // private fields -} - -func NewFooService() *FooService { - fs := &FooService{ - // init - } - fs.BaseService = *NewBaseService(log, "FooService", fs) - return fs -} - -func (fs *FooService) OnStart() error { - fs.BaseService.OnStart() // Always call the overridden method. - // initialize private fields - // start subroutines, etc. -} - -func (fs *FooService) OnStop() error { - fs.BaseService.OnStop() // Always call the overridden method. - // close/destroy private fields - // stop subroutines, etc. -} - -*/ package common import ( @@ -60,6 +21,48 @@ type Service interface { String() string } +/* +Classical-inheritance-style service declarations. Services can be started, then +stopped, then optionally restarted. + +Users can override the OnStart/OnStop methods. In the absence of errors, these +methods are guaranteed to be called at most once. If OnStart returns an error, +service won't be marked as started, so the user can call Start again. + +A call to Reset will panic, unless OnReset is overwritten, allowing +OnStart/OnStop to be called again. + +The caller must ensure that Start and Stop are not called concurrently. + +It is ok to call Stop without calling Start first. + +Typical usage: + + type FooService struct { + BaseService + // private fields + } + + func NewFooService() *FooService { + fs := &FooService{ + // init + } + fs.BaseService = *NewBaseService(log, "FooService", fs) + return fs + } + + func (fs *FooService) OnStart() error { + fs.BaseService.OnStart() // Always call the overridden method. + // initialize private fields + // start subroutines, etc. + } + + func (fs *FooService) OnStop() error { + fs.BaseService.OnStop() // Always call the overridden method. + // close/destroy private fields + // stop subroutines, etc. + } +*/ type BaseService struct { log log15.Logger name string From 34e2d6638dc452325c833cc141d23a986682dab7 Mon Sep 17 00:00:00 2001 From: "Paul W. Homer" Date: Wed, 29 Mar 2017 09:09:01 -0400 Subject: [PATCH 105/515] Fixed a typo in LevelDB property names. --- go_level_db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go_level_db.go b/go_level_db.go index ac936d01b..1b4a937cf 100644 --- a/go_level_db.go +++ b/go_level_db.go @@ -102,7 +102,7 @@ func (db *GoLevelDB) Stats() map[string]string { "leveldb.cachedblock", "leveldb.openedtables", "leveldb.alivesnaps", - "leveldb.alibeiters", + "leveldb.aliveiters", } stats := make(map[string]string) From 4fdcf51467a1af895483b4d29dde54432c286c39 Mon Sep 17 00:00:00 2001 From: "Paul W. Homer" Date: Thu, 30 Mar 2017 11:51:11 -0400 Subject: [PATCH 106/515] Refactored the iterator to follow Go constructor conventions. --- mem_db.go | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/mem_db.go b/mem_db.go index 5307d1a7a..c80060526 100644 --- a/mem_db.go +++ b/mem_db.go @@ -77,21 +77,8 @@ type memDBIterator struct { db *MemDB } -func (it *memDBIterator) Create(db *MemDB) *memDBIterator { - db.mtx.Lock() - defer db.mtx.Unlock() - - if it == nil { - it = &memDBIterator{} - } - it.db = db - it.last = -1 - - // unfortunately we need a copy of all of the keys - for key, _ := range db.db { - it.keys = append(it.keys, key) - } - return it +func newMemDBIterator() *memDBIterator { + return &memDBIterator{} } func (it *memDBIterator) Next() bool { @@ -114,8 +101,18 @@ func (it *memDBIterator) Value() []byte { } func (db *MemDB) Iterator() Iterator { - var it *memDBIterator - return it.Create(db) + it := newMemDBIterator() + it.db = db + it.last = -1 + + db.mtx.Lock() + defer db.mtx.Unlock() + + // unfortunately we need a copy of all of the keys + for key, _ := range db.db { + it.keys = append(it.keys, key) + } + return it } func (db *MemDB) NewBatch() Batch { From 2feff1ea168b481189cdc91a6a63ab9196416c8e Mon Sep 17 00:00:00 2001 From: "Paul W. Homer" Date: Sat, 1 Apr 2017 15:44:41 -0400 Subject: [PATCH 107/515] Commented the empty table in c_level_db, and cleaned up the mem_db Value call. --- c_level_db.go | 1 + mem_db.go | 5 +---- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/c_level_db.go b/c_level_db.go index 41ed4dc16..33a780094 100644 --- a/c_level_db.go +++ b/c_level_db.go @@ -107,6 +107,7 @@ func (db *CLevelDB) Print() { } func (db *CLevelDB) Stats() map[string]string { + // TODO: Find the available properties for the C LevelDB implementation keys := []string{} stats := make(map[string]string) diff --git a/mem_db.go b/mem_db.go index c80060526..286624294 100644 --- a/mem_db.go +++ b/mem_db.go @@ -94,10 +94,7 @@ func (it *memDBIterator) Key() []byte { } func (it *memDBIterator) Value() []byte { - it.db.mtx.Lock() - defer it.db.mtx.Unlock() - - return it.db.db[it.keys[it.last]] + return it.db.Get(it.Key()) } func (db *MemDB) Iterator() Iterator { From 5aecd325549fe2acf31df0e1591f2796aad80dfd Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:07 -0400 Subject: [PATCH 108/515] merge.sh file for the repo merge --- merge.sh | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 merge.sh diff --git a/merge.sh b/merge.sh new file mode 100644 index 000000000..9d2629bfb --- /dev/null +++ b/merge.sh @@ -0,0 +1,53 @@ +#! /bin/bash +set -e + +# NOTE: go-alert depends on go-common + +REPOS=("autofile" "clist" "db" "events" "flowrate" "logger" "process") + +mkdir common +git mv *.go common +git mv LICENSE common + +git commit -m "move all files to common/ to begin repo merge" + +for repo in "${REPOS[@]}"; do + # add and fetch the repo + git remote add -f "$repo" "https://github.com/tendermint/go-${repo}" + + # merge master and move into subdir + git merge "$repo/master" --no-edit + + if [[ "$repo" != "flowrate" ]]; then + mkdir "$repo" + git mv *.go "$repo/" + fi + + set +e # these might not exist + git mv *.md "$repo/" + git mv README "$repo/README.md" + git mv Makefile "$repo/Makefile" + git rm LICENSE + set -e + + # commit + git commit -m "merge go-${repo}" + + git remote rm "$repo" +done + +go get github.com/ebuchman/got +got replace "tendermint/go-common" "tendermint/go-common/common" +for repo in "${REPOS[@]}"; do + + if [[ "$repo" != "flowrate" ]]; then + got replace "tendermint/go-${repo}" "tendermint/go-common/${repo}" + else + got replace "tendermint/go-${repo}/flowrate" "tendermint/go-common/flowrate" + fi +done + +git add -u +git commit -m "update import paths" + +# TODO: change any paths in non-Go files From 356657a37bb8a7103eca078d322e6768a56bc1c2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:22 -0400 Subject: [PATCH 109/515] move all files to common/ to begin repo merge --- LICENSE => common/LICENSE | 0 array.go => common/array.go | 0 async.go => common/async.go | 0 bit_array.go => common/bit_array.go | 0 bit_array_test.go => common/bit_array_test.go | 0 byteslice.go => common/byteslice.go | 0 cmap.go => common/cmap.go | 0 colors.go => common/colors.go | 0 errors.go => common/errors.go | 0 heap.go => common/heap.go | 0 int.go => common/int.go | 0 io.go => common/io.go | 0 math.go => common/math.go | 0 net.go => common/net.go | 0 os.go => common/os.go | 0 random.go => common/random.go | 0 repeat_timer.go => common/repeat_timer.go | 0 service.go => common/service.go | 0 service_test.go => common/service_test.go | 0 string.go => common/string.go | 0 throttle_timer.go => common/throttle_timer.go | 0 word.go => common/word.go | 0 22 files changed, 0 insertions(+), 0 deletions(-) rename LICENSE => common/LICENSE (100%) rename array.go => common/array.go (100%) rename async.go => common/async.go (100%) rename bit_array.go => common/bit_array.go (100%) rename bit_array_test.go => common/bit_array_test.go (100%) rename byteslice.go => common/byteslice.go (100%) rename cmap.go => common/cmap.go (100%) rename colors.go => common/colors.go (100%) rename errors.go => common/errors.go (100%) rename heap.go => common/heap.go (100%) rename int.go => common/int.go (100%) rename io.go => common/io.go (100%) rename math.go => common/math.go (100%) rename net.go => common/net.go (100%) rename os.go => common/os.go (100%) rename random.go => common/random.go (100%) rename repeat_timer.go => common/repeat_timer.go (100%) rename service.go => common/service.go (100%) rename service_test.go => common/service_test.go (100%) rename string.go => common/string.go (100%) rename throttle_timer.go => common/throttle_timer.go (100%) rename word.go => common/word.go (100%) diff --git a/LICENSE b/common/LICENSE similarity index 100% rename from LICENSE rename to common/LICENSE diff --git a/array.go b/common/array.go similarity index 100% rename from array.go rename to common/array.go diff --git a/async.go b/common/async.go similarity index 100% rename from async.go rename to common/async.go diff --git a/bit_array.go b/common/bit_array.go similarity index 100% rename from bit_array.go rename to common/bit_array.go diff --git a/bit_array_test.go b/common/bit_array_test.go similarity index 100% rename from bit_array_test.go rename to common/bit_array_test.go diff --git a/byteslice.go b/common/byteslice.go similarity index 100% rename from byteslice.go rename to common/byteslice.go diff --git a/cmap.go b/common/cmap.go similarity index 100% rename from cmap.go rename to common/cmap.go diff --git a/colors.go b/common/colors.go similarity index 100% rename from colors.go rename to common/colors.go diff --git a/errors.go b/common/errors.go similarity index 100% rename from errors.go rename to common/errors.go diff --git a/heap.go b/common/heap.go similarity index 100% rename from heap.go rename to common/heap.go diff --git a/int.go b/common/int.go similarity index 100% rename from int.go rename to common/int.go diff --git a/io.go b/common/io.go similarity index 100% rename from io.go rename to common/io.go diff --git a/math.go b/common/math.go similarity index 100% rename from math.go rename to common/math.go diff --git a/net.go b/common/net.go similarity index 100% rename from net.go rename to common/net.go diff --git a/os.go b/common/os.go similarity index 100% rename from os.go rename to common/os.go diff --git a/random.go b/common/random.go similarity index 100% rename from random.go rename to common/random.go diff --git a/repeat_timer.go b/common/repeat_timer.go similarity index 100% rename from repeat_timer.go rename to common/repeat_timer.go diff --git a/service.go b/common/service.go similarity index 100% rename from service.go rename to common/service.go diff --git a/service_test.go b/common/service_test.go similarity index 100% rename from service_test.go rename to common/service_test.go diff --git a/string.go b/common/string.go similarity index 100% rename from string.go rename to common/string.go diff --git a/throttle_timer.go b/common/throttle_timer.go similarity index 100% rename from throttle_timer.go rename to common/throttle_timer.go diff --git a/word.go b/common/word.go similarity index 100% rename from word.go rename to common/word.go From a893bb119bc19418ef05cd8721a672c92ae4daf0 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:26 -0400 Subject: [PATCH 110/515] merge go-autofile --- LICENSE | 203 ------------------ README.md => autofile/README.md | 0 autofile.go => autofile/autofile.go | 0 autofile_test.go => autofile/autofile_test.go | 0 group.go => autofile/group.go | 0 group_test.go => autofile/group_test.go | 0 .../sighup_watcher.go | 0 7 files changed, 203 deletions(-) delete mode 100644 LICENSE rename README.md => autofile/README.md (100%) rename autofile.go => autofile/autofile.go (100%) rename autofile_test.go => autofile/autofile_test.go (100%) rename group.go => autofile/group.go (100%) rename group_test.go => autofile/group_test.go (100%) rename sighup_watcher.go => autofile/sighup_watcher.go (100%) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 9527e2688..000000000 --- a/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ -All files are Copyright (C) 2017 All in Bits, Inc - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README.md b/autofile/README.md similarity index 100% rename from README.md rename to autofile/README.md diff --git a/autofile.go b/autofile/autofile.go similarity index 100% rename from autofile.go rename to autofile/autofile.go diff --git a/autofile_test.go b/autofile/autofile_test.go similarity index 100% rename from autofile_test.go rename to autofile/autofile_test.go diff --git a/group.go b/autofile/group.go similarity index 100% rename from group.go rename to autofile/group.go diff --git a/group_test.go b/autofile/group_test.go similarity index 100% rename from group_test.go rename to autofile/group_test.go diff --git a/sighup_watcher.go b/autofile/sighup_watcher.go similarity index 100% rename from sighup_watcher.go rename to autofile/sighup_watcher.go From acbd7caaf8fef7184acf83c90c8828c8ae80ea72 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:27 -0400 Subject: [PATCH 111/515] merge go-clist --- LICENSE | 193 --------------------------- clist.go => clist/clist.go | 0 clist_test.go => clist/clist_test.go | 0 3 files changed, 193 deletions(-) delete mode 100644 LICENSE rename clist.go => clist/clist.go (100%) rename clist_test.go => clist/clist_test.go (100%) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 1ec9bd42c..000000000 --- a/LICENSE +++ /dev/null @@ -1,193 +0,0 @@ -Tendermint Go-CList -Copyright (C) 2015 Tendermint - - - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/clist.go b/clist/clist.go similarity index 100% rename from clist.go rename to clist/clist.go diff --git a/clist_test.go b/clist/clist_test.go similarity index 100% rename from clist_test.go rename to clist/clist_test.go From fdbb10827dbdca18abd55dd01284bb29eb0e215d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:35 -0400 Subject: [PATCH 112/515] merge go-db --- LICENSE.md => db/LICENSE.md | 0 README.md => db/README.md | 0 c_level_db.go => db/c_level_db.go | 0 c_level_db_test.go => db/c_level_db_test.go | 0 db.go => db/db.go | 0 go_level_db.go => db/go_level_db.go | 0 go_level_db_test.go => db/go_level_db_test.go | 0 mem_db.go => db/mem_db.go | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename LICENSE.md => db/LICENSE.md (100%) rename README.md => db/README.md (100%) rename c_level_db.go => db/c_level_db.go (100%) rename c_level_db_test.go => db/c_level_db_test.go (100%) rename db.go => db/db.go (100%) rename go_level_db.go => db/go_level_db.go (100%) rename go_level_db_test.go => db/go_level_db_test.go (100%) rename mem_db.go => db/mem_db.go (100%) diff --git a/LICENSE.md b/db/LICENSE.md similarity index 100% rename from LICENSE.md rename to db/LICENSE.md diff --git a/README.md b/db/README.md similarity index 100% rename from README.md rename to db/README.md diff --git a/c_level_db.go b/db/c_level_db.go similarity index 100% rename from c_level_db.go rename to db/c_level_db.go diff --git a/c_level_db_test.go b/db/c_level_db_test.go similarity index 100% rename from c_level_db_test.go rename to db/c_level_db_test.go diff --git a/db.go b/db/db.go similarity index 100% rename from db.go rename to db/db.go diff --git a/go_level_db.go b/db/go_level_db.go similarity index 100% rename from go_level_db.go rename to db/go_level_db.go diff --git a/go_level_db_test.go b/db/go_level_db_test.go similarity index 100% rename from go_level_db_test.go rename to db/go_level_db_test.go diff --git a/mem_db.go b/db/mem_db.go similarity index 100% rename from mem_db.go rename to db/mem_db.go From fe92e62a1921188998ad0b1b44665b80a876371b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:40 -0400 Subject: [PATCH 113/515] merge go-events --- LICENSE | 201 ------------------------ Makefile => events/Makefile | 0 README.md => events/README.md | 0 event_cache.go => events/event_cache.go | 0 events.go => events/events.go | 0 events_test.go => events/events_test.go | 0 log.go => events/log.go | 0 7 files changed, 201 deletions(-) delete mode 100644 LICENSE rename Makefile => events/Makefile (100%) rename README.md => events/README.md (100%) rename event_cache.go => events/event_cache.go (100%) rename events.go => events/events.go (100%) rename events_test.go => events/events_test.go (100%) rename log.go => events/log.go (100%) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/events/Makefile similarity index 100% rename from Makefile rename to events/Makefile diff --git a/README.md b/events/README.md similarity index 100% rename from README.md rename to events/README.md diff --git a/event_cache.go b/events/event_cache.go similarity index 100% rename from event_cache.go rename to events/event_cache.go diff --git a/events.go b/events/events.go similarity index 100% rename from events.go rename to events/events.go diff --git a/events_test.go b/events/events_test.go similarity index 100% rename from events_test.go rename to events/events_test.go diff --git a/log.go b/events/log.go similarity index 100% rename from log.go rename to events/log.go From 44274eeb581a9df071d425dbf68d798d970a1056 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:42 -0400 Subject: [PATCH 114/515] merge go-flowrate --- LICENSE | 29 ----------------------------- README => flowrate/README.md | 0 2 files changed, 29 deletions(-) delete mode 100644 LICENSE rename README => flowrate/README.md (100%) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index e9f9f628b..000000000 --- a/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - - * Neither the name of the go-flowrate project nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README b/flowrate/README.md similarity index 100% rename from README rename to flowrate/README.md From 024fcb4fdba3378156e02bf60728dd277e4b2e72 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:43 -0400 Subject: [PATCH 115/515] merge go-logger --- LICENSE | 193 ---------------------------------------- log.go => logger/log.go | 0 2 files changed, 193 deletions(-) delete mode 100644 LICENSE rename log.go => logger/log.go (100%) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 17ce702bf..000000000 --- a/LICENSE +++ /dev/null @@ -1,193 +0,0 @@ -Tendermint Go-Logger -Copyright (C) 2015 Tendermint - - - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/log.go b/logger/log.go similarity index 100% rename from log.go rename to logger/log.go From 6f49ba4c3e544549fbed4ce4896e530fc67b5032 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:44 -0400 Subject: [PATCH 116/515] merge go-process --- LICENSE | 193 ------------------------------- process.go => process/process.go | 0 util.go => process/util.go | 0 3 files changed, 193 deletions(-) delete mode 100644 LICENSE rename process.go => process/process.go (100%) rename util.go => process/util.go (100%) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index b0c35f3b8..000000000 --- a/LICENSE +++ /dev/null @@ -1,193 +0,0 @@ -Tendermint Go-Process -Copyright (C) 2015 Tendermint - - - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/process.go b/process/process.go similarity index 100% rename from process.go rename to process/process.go diff --git a/util.go b/process/util.go similarity index 100% rename from util.go rename to process/util.go From 900be74e8fc9df8800401235301a538c074c43aa Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 16:33:51 -0400 Subject: [PATCH 117/515] update import paths --- autofile/autofile.go | 2 +- autofile/autofile_test.go | 2 +- autofile/group.go | 2 +- autofile/group_test.go | 2 +- db/c_level_db.go | 2 +- db/c_level_db_test.go | 2 +- db/db.go | 2 +- db/go_level_db.go | 2 +- db/go_level_db_test.go | 2 +- events/events.go | 2 +- events/log.go | 2 +- logger/log.go | 2 +- process/util.go | 2 +- test/mutate.go | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/autofile/autofile.go b/autofile/autofile.go index e61bbb832..83395a536 100644 --- a/autofile/autofile.go +++ b/autofile/autofile.go @@ -5,7 +5,7 @@ import ( "sync" "time" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) /* AutoFile usage diff --git a/autofile/autofile_test.go b/autofile/autofile_test.go index 243125ca6..c8b81ed39 100644 --- a/autofile/autofile_test.go +++ b/autofile/autofile_test.go @@ -1,7 +1,7 @@ package autofile import ( - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" "os" "sync/atomic" "syscall" diff --git a/autofile/group.go b/autofile/group.go index ee1a94158..faeba6e74 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -15,7 +15,7 @@ import ( "sync" "time" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) /* diff --git a/autofile/group_test.go b/autofile/group_test.go index 1c2280e83..aa4c794fa 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) // NOTE: Returned group has ticker stopped diff --git a/db/c_level_db.go b/db/c_level_db.go index 33a780094..8de2732ea 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -8,7 +8,7 @@ import ( "github.com/jmhodges/levigo" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) func init() { diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index b90161498..1ce2202c8 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) func BenchmarkRandomReadsWrites2(b *testing.B) { diff --git a/db/db.go b/db/db.go index b88499092..682ecfc4c 100644 --- a/db/db.go +++ b/db/db.go @@ -1,6 +1,6 @@ package db -import . "github.com/tendermint/go-common" +import . "github.com/tendermint/go-common/common" type DB interface { Get([]byte) []byte diff --git a/db/go_level_db.go b/db/go_level_db.go index 1b4a937cf..9b6e25a1e 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -8,7 +8,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) func init() { diff --git a/db/go_level_db_test.go b/db/go_level_db_test.go index 24b64734d..eb0631665 100644 --- a/db/go_level_db_test.go +++ b/db/go_level_db_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) func BenchmarkRandomReadsWrites(b *testing.B) { diff --git a/events/events.go b/events/events.go index e613a8e04..208cea26e 100644 --- a/events/events.go +++ b/events/events.go @@ -6,7 +6,7 @@ package events import ( "sync" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) // Generic event data can be typed and registered with tendermint/go-wire diff --git a/events/log.go b/events/log.go index 525462294..f79b657bd 100644 --- a/events/log.go +++ b/events/log.go @@ -1,7 +1,7 @@ package events import ( - "github.com/tendermint/go-logger" + "github.com/tendermint/go-common/logger" ) var log = logger.New("module", "events") diff --git a/logger/log.go b/logger/log.go index 07c1e6a49..edc2ddcf5 100644 --- a/logger/log.go +++ b/logger/log.go @@ -3,7 +3,7 @@ package logger import ( "os" - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" "github.com/tendermint/log15" ) diff --git a/process/util.go b/process/util.go index 4976e46e5..5e954473a 100644 --- a/process/util.go +++ b/process/util.go @@ -1,7 +1,7 @@ package process import ( - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) // Runs a command and gets the result. diff --git a/test/mutate.go b/test/mutate.go index 629e9f865..3b552ff3c 100644 --- a/test/mutate.go +++ b/test/mutate.go @@ -1,7 +1,7 @@ package test import ( - . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/common" ) // Contract: !bytes.Equal(input, output) && len(input) >= len(output) From 2f8551d3b614dd0c0c6c114c42ab25901cc41a52 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 18 Apr 2017 17:56:05 -0400 Subject: [PATCH 118/515] go-common -> tmlibs --- autofile/autofile.go | 2 +- autofile/autofile_test.go | 2 +- autofile/group.go | 2 +- autofile/group_test.go | 2 +- db/c_level_db.go | 2 +- db/c_level_db_test.go | 2 +- db/db.go | 2 +- db/go_level_db.go | 2 +- db/go_level_db_test.go | 2 +- events/events.go | 2 +- events/log.go | 2 +- logger/log.go | 2 +- merge.sh | 1 + process/util.go | 2 +- test/mutate.go | 2 +- 15 files changed, 15 insertions(+), 14 deletions(-) diff --git a/autofile/autofile.go b/autofile/autofile.go index 83395a536..5d6bc7261 100644 --- a/autofile/autofile.go +++ b/autofile/autofile.go @@ -5,7 +5,7 @@ import ( "sync" "time" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) /* AutoFile usage diff --git a/autofile/autofile_test.go b/autofile/autofile_test.go index c8b81ed39..8f8017e1b 100644 --- a/autofile/autofile_test.go +++ b/autofile/autofile_test.go @@ -1,7 +1,7 @@ package autofile import ( - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" "os" "sync/atomic" "syscall" diff --git a/autofile/group.go b/autofile/group.go index faeba6e74..0f829309a 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -15,7 +15,7 @@ import ( "sync" "time" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) /* diff --git a/autofile/group_test.go b/autofile/group_test.go index aa4c794fa..92e259701 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) // NOTE: Returned group has ticker stopped diff --git a/db/c_level_db.go b/db/c_level_db.go index 8de2732ea..b1ae49a12 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -8,7 +8,7 @@ import ( "github.com/jmhodges/levigo" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) func init() { diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 1ce2202c8..0ee6d6414 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) func BenchmarkRandomReadsWrites2(b *testing.B) { diff --git a/db/db.go b/db/db.go index 682ecfc4c..aa8ff48a8 100644 --- a/db/db.go +++ b/db/db.go @@ -1,6 +1,6 @@ package db -import . "github.com/tendermint/go-common/common" +import . "github.com/tendermint/tmlibs/common" type DB interface { Get([]byte) []byte diff --git a/db/go_level_db.go b/db/go_level_db.go index 9b6e25a1e..54ae1149f 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -8,7 +8,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) func init() { diff --git a/db/go_level_db_test.go b/db/go_level_db_test.go index eb0631665..0603b2d4f 100644 --- a/db/go_level_db_test.go +++ b/db/go_level_db_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) func BenchmarkRandomReadsWrites(b *testing.B) { diff --git a/events/events.go b/events/events.go index 208cea26e..487d120b3 100644 --- a/events/events.go +++ b/events/events.go @@ -6,7 +6,7 @@ package events import ( "sync" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) // Generic event data can be typed and registered with tendermint/go-wire diff --git a/events/log.go b/events/log.go index f79b657bd..adb6bab09 100644 --- a/events/log.go +++ b/events/log.go @@ -1,7 +1,7 @@ package events import ( - "github.com/tendermint/go-common/logger" + "github.com/tendermint/tmlibs/logger" ) var log = logger.New("module", "events") diff --git a/logger/log.go b/logger/log.go index edc2ddcf5..84f71c34f 100644 --- a/logger/log.go +++ b/logger/log.go @@ -3,7 +3,7 @@ package logger import ( "os" - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" "github.com/tendermint/log15" ) diff --git a/merge.sh b/merge.sh index 9d2629bfb..7ff0ed94e 100644 --- a/merge.sh +++ b/merge.sh @@ -51,3 +51,4 @@ git add -u git commit -m "update import paths" # TODO: change any paths in non-Go files +# TODO: add license diff --git a/process/util.go b/process/util.go index 5e954473a..b3e0aef11 100644 --- a/process/util.go +++ b/process/util.go @@ -1,7 +1,7 @@ package process import ( - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) // Runs a command and gets the result. diff --git a/test/mutate.go b/test/mutate.go index 3b552ff3c..1dbe7a6bf 100644 --- a/test/mutate.go +++ b/test/mutate.go @@ -1,7 +1,7 @@ package test import ( - . "github.com/tendermint/go-common/common" + . "github.com/tendermint/tmlibs/common" ) // Contract: !bytes.Equal(input, output) && len(input) >= len(output) From fd296811df7cf036c6c43665e92896df5db7c573 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 21 Apr 2017 15:33:25 -0400 Subject: [PATCH 119/515] simple merkle tree --- merkle/README.md | 4 + merkle/simple_tree.go | 277 +++++++++++++++++++++++++++++++++++++ merkle/simple_tree_test.go | 87 ++++++++++++ merkle/types.go | 23 +++ 4 files changed, 391 insertions(+) create mode 100644 merkle/README.md create mode 100644 merkle/simple_tree.go create mode 100644 merkle/simple_tree_test.go create mode 100644 merkle/types.go diff --git a/merkle/README.md b/merkle/README.md new file mode 100644 index 000000000..c44978368 --- /dev/null +++ b/merkle/README.md @@ -0,0 +1,4 @@ +## Simple Merkle Tree + +For smaller static data structures that don't require immutable snapshots or mutability; +for instance the transactions and validation signatures of a block can be hashed using this simple merkle tree logic. diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go new file mode 100644 index 000000000..517331449 --- /dev/null +++ b/merkle/simple_tree.go @@ -0,0 +1,277 @@ +/* +Computes a deterministic minimal height merkle tree hash. +If the number of items is not a power of two, some leaves +will be at different levels. Tries to keep both sides of +the tree the same size, but the left may be one greater. + +Use this for short deterministic trees, such as the validator list. +For larger datasets, use IAVLTree. + + * + / \ + / \ + / \ + / \ + * * + / \ / \ + / \ / \ + / \ / \ + * * * h6 + / \ / \ / \ + h0 h1 h2 h3 h4 h5 + +*/ + +package merkle + +import ( + "bytes" + "fmt" + "sort" + + "golang.org/x/crypto/ripemd160" + + . "github.com/tendermint/go-common" + "github.com/tendermint/go-wire" +) + +func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { + var n int + var err error + var hasher = ripemd160.New() + wire.WriteByteSlice(left, hasher, &n, &err) + wire.WriteByteSlice(right, hasher, &n, &err) + if err != nil { + PanicCrisis(err) + } + return hasher.Sum(nil) +} + +func SimpleHashFromHashes(hashes [][]byte) []byte { + // Recursive impl. + switch len(hashes) { + case 0: + return nil + case 1: + return hashes[0] + default: + left := SimpleHashFromHashes(hashes[:(len(hashes)+1)/2]) + right := SimpleHashFromHashes(hashes[(len(hashes)+1)/2:]) + return SimpleHashFromTwoHashes(left, right) + } +} + +// Convenience for SimpleHashFromHashes. +func SimpleHashFromBinaries(items []interface{}) []byte { + hashes := make([][]byte, len(items)) + for i, item := range items { + hashes[i] = SimpleHashFromBinary(item) + } + return SimpleHashFromHashes(hashes) +} + +// General Convenience +func SimpleHashFromBinary(item interface{}) []byte { + hasher, n, err := ripemd160.New(), new(int), new(error) + wire.WriteBinary(item, hasher, n, err) + if *err != nil { + PanicCrisis(err) + } + return hasher.Sum(nil) +} + +// Convenience for SimpleHashFromHashes. +func SimpleHashFromHashables(items []Hashable) []byte { + hashes := make([][]byte, len(items)) + for i, item := range items { + hash := item.Hash() + hashes[i] = hash + } + return SimpleHashFromHashes(hashes) +} + +// Convenience for SimpleHashFromHashes. +func SimpleHashFromMap(m map[string]interface{}) []byte { + kpPairsH := MakeSortedKVPairs(m) + return SimpleHashFromHashables(kpPairsH) +} + +//-------------------------------------------------------------------------------- + +/* Convenience struct for key-value pairs. +A list of KVPairs is hashed via `SimpleHashFromHashables`. +NOTE: Each `Value` is encoded for hashing without extra type information, +so the user is presumed to be aware of the Value types. +*/ +type KVPair struct { + Key string + Value interface{} +} + +func (kv KVPair) Hash() []byte { + hasher, n, err := ripemd160.New(), new(int), new(error) + wire.WriteString(kv.Key, hasher, n, err) + if kvH, ok := kv.Value.(Hashable); ok { + wire.WriteByteSlice(kvH.Hash(), hasher, n, err) + } else { + wire.WriteBinary(kv.Value, hasher, n, err) + } + if *err != nil { + PanicSanity(*err) + } + return hasher.Sum(nil) +} + +type KVPairs []KVPair + +func (kvps KVPairs) Len() int { return len(kvps) } +func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } +func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } +func (kvps KVPairs) Sort() { sort.Sort(kvps) } + +func MakeSortedKVPairs(m map[string]interface{}) []Hashable { + kvPairs := []KVPair{} + for k, v := range m { + kvPairs = append(kvPairs, KVPair{k, v}) + } + KVPairs(kvPairs).Sort() + kvPairsH := []Hashable{} + for _, kvp := range kvPairs { + kvPairsH = append(kvPairsH, kvp) + } + return kvPairsH +} + +//-------------------------------------------------------------------------------- + +type SimpleProof struct { + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. +} + +// proofs[0] is the proof for items[0]. +func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromHashables(items) + rootHash = rootSPN.Hash + proofs = make([]*SimpleProof, len(items)) + for i, trail := range trails { + proofs[i] = &SimpleProof{ + Aunts: trail.FlattenAunts(), + } + } + return +} + +// Verify that leafHash is a leaf hash of the simple-merkle-tree +// which hashes to rootHash. +func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { + computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) + if computedHash == nil { + return false + } + if !bytes.Equal(computedHash, rootHash) { + return false + } + return true +} + +func (sp *SimpleProof) String() string { + return sp.StringIndented("") +} + +func (sp *SimpleProof) StringIndented(indent string) string { + return fmt.Sprintf(`SimpleProof{ +%s Aunts: %X +%s}`, + indent, sp.Aunts, + indent) +} + +// Use the leafHash and innerHashes to get the root merkle hash. +// If the length of the innerHashes slice isn't exactly correct, the result is nil. +func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { + // Recursive impl. + if index >= total { + return nil + } + switch total { + case 0: + PanicSanity("Cannot call computeHashFromAunts() with 0 total") + return nil + case 1: + if len(innerHashes) != 0 { + return nil + } + return leafHash + default: + if len(innerHashes) == 0 { + return nil + } + numLeft := (total + 1) / 2 + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if leftHash == nil { + return nil + } + return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) + } else { + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if rightHash == nil { + return nil + } + return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) + } + } +} + +// Helper structure to construct merkle proof. +// The node and the tree is thrown away afterwards. +// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. +// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or +// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. +type SimpleProofNode struct { + Hash []byte + Parent *SimpleProofNode + Left *SimpleProofNode // Left sibling (only one of Left,Right is set) + Right *SimpleProofNode // Right sibling (only one of Left,Right is set) +} + +// Starting from a leaf SimpleProofNode, FlattenAunts() will return +// the inner hashes for the item corresponding to the leaf. +func (spn *SimpleProofNode) FlattenAunts() [][]byte { + // Nonrecursive impl. + innerHashes := [][]byte{} + for spn != nil { + if spn.Left != nil { + innerHashes = append(innerHashes, spn.Left.Hash) + } else if spn.Right != nil { + innerHashes = append(innerHashes, spn.Right.Hash) + } else { + break + } + spn = spn.Parent + } + return innerHashes +} + +// trails[0].Hash is the leaf hash for items[0]. +// trails[i].Parent.Parent....Parent == root for all i. +func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { + // Recursive impl. + switch len(items) { + case 0: + return nil, nil + case 1: + trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} + return []*SimpleProofNode{trail}, trail + default: + lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) + rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) + root := &SimpleProofNode{rootHash, nil, nil, nil} + leftRoot.Parent = root + leftRoot.Right = rightRoot + rightRoot.Parent = root + rightRoot.Left = leftRoot + return append(lefts, rights...), root + } +} diff --git a/merkle/simple_tree_test.go b/merkle/simple_tree_test.go new file mode 100644 index 000000000..424b498f0 --- /dev/null +++ b/merkle/simple_tree_test.go @@ -0,0 +1,87 @@ +package merkle + +import ( + "bytes" + + . "github.com/tendermint/go-common" + . "github.com/tendermint/go-common/test" + + "testing" +) + +type testItem []byte + +func (tI testItem) Hash() []byte { + return []byte(tI) +} + +func TestSimpleProof(t *testing.T) { + + total := 100 + + items := make([]Hashable, total) + for i := 0; i < total; i++ { + items[i] = testItem(RandBytes(32)) + } + + rootHash := SimpleHashFromHashables(items) + + rootHash2, proofs := SimpleProofsFromHashables(items) + + if !bytes.Equal(rootHash, rootHash2) { + t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) + } + + // For each item, check the trail. + for i, item := range items { + itemHash := item.Hash() + proof := proofs[i] + + // Verify success + ok := proof.Verify(i, total, itemHash, rootHash) + if !ok { + t.Errorf("Verification failed for index %v.", i) + } + + // Wrong item index should make it fail + { + ok = proof.Verify((i+1)%total, total, itemHash, rootHash) + if ok { + t.Errorf("Expected verification to fail for wrong index %v.", i) + } + } + + // Trail too long should make it fail + origAunts := proof.Aunts + proof.Aunts = append(proof.Aunts, RandBytes(32)) + { + ok = proof.Verify(i, total, itemHash, rootHash) + if ok { + t.Errorf("Expected verification to fail for wrong trail length.") + } + } + proof.Aunts = origAunts + + // Trail too short should make it fail + proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] + { + ok = proof.Verify(i, total, itemHash, rootHash) + if ok { + t.Errorf("Expected verification to fail for wrong trail length.") + } + } + proof.Aunts = origAunts + + // Mutating the itemHash should make it fail. + ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) + if ok { + t.Errorf("Expected verification to fail for mutated leaf hash") + } + + // Mutating the rootHash should make it fail. + ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) + if ok { + t.Errorf("Expected verification to fail for mutated root hash") + } + } +} diff --git a/merkle/types.go b/merkle/types.go new file mode 100644 index 000000000..93541eda5 --- /dev/null +++ b/merkle/types.go @@ -0,0 +1,23 @@ +package merkle + +type Tree interface { + Size() (size int) + Height() (height int8) + Has(key []byte) (has bool) + Proof(key []byte) (value []byte, proof []byte, exists bool) // TODO make it return an index + Get(key []byte) (index int, value []byte, exists bool) + GetByIndex(index int) (key []byte, value []byte) + Set(key []byte, value []byte) (updated bool) + Remove(key []byte) (value []byte, removed bool) + HashWithCount() (hash []byte, count int) + Hash() (hash []byte) + Save() (hash []byte) + Load(hash []byte) + Copy() Tree + Iterate(func(key []byte, value []byte) (stop bool)) (stopped bool) + IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) +} + +type Hashable interface { + Hash() []byte +} From 56d36c8f25629e7c707439dffc7bc7b10a5de031 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 21 Apr 2017 16:04:58 -0400 Subject: [PATCH 120/515] merkle: go-common -> tmlibs --- merkle/simple_tree.go | 2 +- merkle/simple_tree_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 517331449..b5520f723 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -31,7 +31,7 @@ import ( "golang.org/x/crypto/ripemd160" - . "github.com/tendermint/go-common" + . "github.com/tendermint/tmlibs/common" "github.com/tendermint/go-wire" ) diff --git a/merkle/simple_tree_test.go b/merkle/simple_tree_test.go index 424b498f0..6299fa33b 100644 --- a/merkle/simple_tree_test.go +++ b/merkle/simple_tree_test.go @@ -3,8 +3,8 @@ package merkle import ( "bytes" - . "github.com/tendermint/go-common" - . "github.com/tendermint/go-common/test" + . "github.com/tendermint/tmlibs/common" + . "github.com/tendermint/tmlibs/test" "testing" ) From 1ea866fd691ded9c1de16408934927d133550efc Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 21 Apr 2017 16:05:03 -0400 Subject: [PATCH 121/515] glide --- glide.lock | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ glide.yaml | 17 +++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 glide.lock create mode 100644 glide.yaml diff --git a/glide.lock b/glide.lock new file mode 100644 index 000000000..003e3d618 --- /dev/null +++ b/glide.lock @@ -0,0 +1,55 @@ +hash: 47e715510d6b57cff8dc4750b6b9d89a41469a8330a7a8bea1c044b2ac61e581 +updated: 2017-04-21T16:04:25.798163098-04:00 +imports: +- name: github.com/go-stack/stack + version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 +- name: github.com/golang/snappy + version: d9eb7a3d35ec988b8585d4a0068e462c27d28380 +- name: github.com/jmhodges/levigo + version: c42d9e0ca023e2198120196f842701bb4c55d7b9 +- name: github.com/mattn/go-colorable + version: d228849504861217f796da67fae4f6e347643f15 +- name: github.com/mattn/go-isatty + version: 30a891c33c7cde7b02a981314b4228ec99380cca +- name: github.com/syndtr/goleveldb + version: 23851d93a2292dcc56e71a18ec9e0624d84a0f65 + subpackages: + - leveldb + - leveldb/cache + - leveldb/comparer + - leveldb/errors + - leveldb/filter + - leveldb/iterator + - leveldb/journal + - leveldb/memdb + - leveldb/opt + - leveldb/storage + - leveldb/table + - leveldb/util +- name: github.com/tendermint/go-wire + version: 4325edc613ad1e9286c8bb770ed40ad3fe647e6c +- name: github.com/tendermint/log15 + version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6 + subpackages: + - term +- name: golang.org/x/crypto + version: 7c6cc321c680f03b9ef0764448e780704f486b51 + subpackages: + - ripemd160 +- name: golang.org/x/sys + version: d75a52659825e75fff6158388dddc6a5b04f9ba5 + subpackages: + - unix +testImports: +- name: github.com/davecgh/go-spew + version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + subpackages: + - assert diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 000000000..a4c5dd2b6 --- /dev/null +++ b/glide.yaml @@ -0,0 +1,17 @@ +package: github.com/tendermint/tmlibs +import: +- package: github.com/jmhodges/levigo +- package: github.com/syndtr/goleveldb + subpackages: + - leveldb + - leveldb/errors + - leveldb/opt +- package: github.com/tendermint/go-wire +- package: github.com/tendermint/log15 +- package: golang.org/x/crypto + subpackages: + - ripemd160 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert From df250b69416a35a943a6e2a92118667e9ef031d4 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 21 Apr 2017 16:25:23 -0400 Subject: [PATCH 122/515] docs: go-events -> tmlibs/events --- events/Makefile | 2 +- events/README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/events/Makefile b/events/Makefile index 295001f8d..c425ee5a6 100644 --- a/events/Makefile +++ b/events/Makefile @@ -1,5 +1,5 @@ .PHONY: docs -REPO:=github.com/tendermint/go-events +REPO:=github.com/tendermint/tmlibs/events docs: @go get github.com/davecheney/godoc2md diff --git a/events/README.md b/events/README.md index c435e7283..7a00d79dc 100644 --- a/events/README.md +++ b/events/README.md @@ -1,7 +1,7 @@ # events -`import "github.com/tendermint/go-events"` +`import "github.com/tendermint/tmlibs/events"` * [Overview](#pkg-overview) * [Index](#pkg-index) @@ -26,7 +26,7 @@ Pub-Sub in go with event caching #### Package files -[event_cache.go](/src/github.com/tendermint/go-events/event_cache.go) [events.go](/src/github.com/tendermint/go-events/events.go) [log.go](/src/github.com/tendermint/go-events/log.go) +[event_cache.go](/src/github.com/tendermint/tmlibs/events/event_cache.go) [events.go](/src/github.com/tendermint/tmlibs/events/events.go) [log.go](/src/github.com/tendermint/tmlibs/events/log.go) From af637abf120304bdbf40479fac51bd4788a9a0dd Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 25 Apr 2017 17:48:10 -0400 Subject: [PATCH 123/515] Makefile and circle.yml --- Makefile | 20 ++++++++++++++++++++ circle.yml | 21 +++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 Makefile create mode 100644 circle.yml diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..49acb091a --- /dev/null +++ b/Makefile @@ -0,0 +1,20 @@ +.PHONEY: all test install get_vendor_deps ensure_tools + +GOTOOLS = \ + github.com/Masterminds/glide +REPO:=github.com/tendermint/tmlibs + +all: install test + +test: + go test `glide novendor` + +get_vendor_deps: ensure_tools + @rm -rf vendor/ + @echo "--> Running glide install" + @glide install + +ensure_tools: + go get $(GOTOOLS) + + diff --git a/circle.yml b/circle.yml new file mode 100644 index 000000000..23ac4bd9f --- /dev/null +++ b/circle.yml @@ -0,0 +1,21 @@ +machine: + environment: + GOPATH: /home/ubuntu/.go_workspace + PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" + PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + GO15VENDOREXPERIMENT: 1 + hosts: + circlehost: 127.0.0.1 + localhost: 127.0.0.1 + +dependencies: + override: + - mkdir -p "$PROJECT_PARENT_PATH" + - ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH" + post: + - go version + +test: + override: + - "go version" + - "cd $PROJECT_PATH && make get_vendor_deps && make test" From b92bd8f6a8d10553d27fdff8cf6d7205c83904f3 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Tue, 2 May 2017 17:01:50 +0200 Subject: [PATCH 124/515] Separate out PrepareBaseCmd, try to set env vars --- cli/setup.go | 136 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 cli/setup.go diff --git a/cli/setup.go b/cli/setup.go new file mode 100644 index 000000000..be5735d92 --- /dev/null +++ b/cli/setup.go @@ -0,0 +1,136 @@ +package cli + +import ( + "fmt" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" + data "github.com/tendermint/go-wire/data" + "github.com/tendermint/go-wire/data/base58" +) + +const ( + RootFlag = "root" + OutputFlag = "output" + EncodingFlag = "encoding" +) + +// PrepareBaseCmd is meant for tendermint and other servers +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { + cobra.OnInitialize(func() { initEnv(envPrefix) }) + cmd.PersistentFlags().StringP(RootFlag, "r", defautRoot, "root directory for config and data") + cmd.PersistentPreRunE = multiE(bindFlags, cmd.PersistentPreRunE) + return func() { execute(cmd) } +} + +// PrepareMainCmd is meant for client side libs that want some more flags +func PrepareMainCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { + cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") + cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") + cmd.PersistentPreRunE = multiE(setEncoding, validateOutput, cmd.PersistentPreRunE) + return PrepareBaseCmd(cmd, envPrefix, defautRoot) +} + +// initEnv sets to use ENV variables if set. +func initEnv(prefix string) { + copyEnvVars(prefix) + + // env variables with TM prefix (eg. TM_ROOT) + viper.SetEnvPrefix(prefix) + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + viper.AutomaticEnv() +} + +// This copies all variables like TMROOT to TM_ROOT, +// so we can support both formats for the user +func copyEnvVars(prefix string) { + prefix = strings.ToUpper(prefix) + ps := prefix + "_" + for _, e := range os.Environ() { + kv := strings.SplitN(e, "=", 1) + k, v := kv[0], kv[1] + if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) { + k2 := strings.Replace(k, prefix, ps, 1) + os.Setenv(k2, v) + } + } +} + +// execute adds all child commands to the root command sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func execute(cmd *cobra.Command) { + // TODO: this can do something cooler with debug and log-levels + if err := cmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} + +type wrapE func(cmd *cobra.Command, args []string) error + +func multiE(fs ...wrapE) wrapE { + return func(cmd *cobra.Command, args []string) error { + for _, f := range fs { + if f != nil { + if err := f(cmd, args); err != nil { + return err + } + } + } + return nil + } +} + +func bindFlags(cmd *cobra.Command, args []string) error { + // cmd.Flags() includes flags from this command and all persistent flags from the parent + if err := viper.BindPFlags(cmd.Flags()); err != nil { + return err + } + + // rootDir is command line flag, env variable, or default $HOME/.tlc + rootDir := viper.GetString("root") + viper.SetConfigName("config") // name of config file (without extension) + viper.AddConfigPath(rootDir) // search root directory + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + // stderr, so if we redirect output to json file, this doesn't appear + // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) + } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + // we ignore not found error, only parse error + // stderr, so if we redirect output to json file, this doesn't appear + fmt.Fprintf(os.Stderr, "%#v", err) + } + return nil +} + +// setEncoding reads the encoding flag +func setEncoding(cmd *cobra.Command, args []string) error { + // validate and set encoding + enc := viper.GetString("encoding") + switch enc { + case "hex": + data.Encoder = data.HexEncoder + case "b64": + data.Encoder = data.B64Encoder + case "btc": + data.Encoder = base58.BTCEncoder + default: + return errors.Errorf("Unsupported encoding: %s", enc) + } + return nil +} + +func validateOutput(cmd *cobra.Command, args []string) error { + // validate output format + output := viper.GetString(OutputFlag) + switch output { + case "text", "json": + default: + return errors.Errorf("Unsupported output format: %s", output) + } + return nil +} From d4ab9679d71c8fc174284696d15930cb799fa24f Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Tue, 2 May 2017 17:16:22 +0200 Subject: [PATCH 125/515] Fix up error in copyEnv --- cli/setup.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index be5735d92..5a7218a80 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -50,11 +50,13 @@ func copyEnvVars(prefix string) { prefix = strings.ToUpper(prefix) ps := prefix + "_" for _, e := range os.Environ() { - kv := strings.SplitN(e, "=", 1) - k, v := kv[0], kv[1] - if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) { - k2 := strings.Replace(k, prefix, ps, 1) - os.Setenv(k2, v) + kv := strings.SplitN(e, "=", 2) + if len(kv) == 2 { + k, v := kv[0], kv[1] + if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) { + k2 := strings.Replace(k, prefix, ps, 1) + os.Setenv(k2, v) + } } } } From a95a60cb0bece614db9c0d16faade4aaad0dfab5 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 May 2017 14:50:24 -0400 Subject: [PATCH 126/515] cli: support --root and --home --- cli/setup.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 5a7218a80..d7a11e069 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -14,6 +14,7 @@ import ( const ( RootFlag = "root" + HomeFlag = "home" OutputFlag = "output" EncodingFlag = "encoding" ) @@ -21,7 +22,8 @@ const ( // PrepareBaseCmd is meant for tendermint and other servers func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { cobra.OnInitialize(func() { initEnv(envPrefix) }) - cmd.PersistentFlags().StringP(RootFlag, "r", defautRoot, "root directory for config and data") + cmd.PersistentFlags().StringP(RootFlag, "r", defautRoot, "DEPRECATED. Use --home") + cmd.PersistentFlags().StringP(HomeFlag, "h", defautRoot, "root directory for config and data") cmd.PersistentPreRunE = multiE(bindFlags, cmd.PersistentPreRunE) return func() { execute(cmd) } } @@ -93,7 +95,11 @@ func bindFlags(cmd *cobra.Command, args []string) error { } // rootDir is command line flag, env variable, or default $HOME/.tlc - rootDir := viper.GetString("root") + // NOTE: we support both --root and --home for now, but eventually only --home + rootDir := viper.GetString(HomeFlag) + if !viper.IsSet(HomeFlag) && viper.IsSet(RootFlag) { + rootDir = viper.GetString(RootFlag) + } viper.SetConfigName("config") // name of config file (without extension) viper.AddConfigPath(rootDir) // search root directory From 7becd35126765a5cad3018c1efc3922cd2f1a0d2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 May 2017 14:57:32 -0400 Subject: [PATCH 127/515] cli: more descriptive naming --- cli/setup.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index d7a11e069..14801ee52 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -8,6 +8,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" + data "github.com/tendermint/go-wire/data" "github.com/tendermint/go-wire/data/base58" ) @@ -24,7 +25,7 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { cobra.OnInitialize(func() { initEnv(envPrefix) }) cmd.PersistentFlags().StringP(RootFlag, "r", defautRoot, "DEPRECATED. Use --home") cmd.PersistentFlags().StringP(HomeFlag, "h", defautRoot, "root directory for config and data") - cmd.PersistentPreRunE = multiE(bindFlags, cmd.PersistentPreRunE) + cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) return func() { execute(cmd) } } @@ -32,7 +33,7 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { func PrepareMainCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") - cmd.PersistentPreRunE = multiE(setEncoding, validateOutput, cmd.PersistentPreRunE) + cmd.PersistentPreRunE = concatCobraCmdFuncs(setEncoding, validateOutput, cmd.PersistentPreRunE) return PrepareBaseCmd(cmd, envPrefix, defautRoot) } @@ -73,9 +74,10 @@ func execute(cmd *cobra.Command) { } } -type wrapE func(cmd *cobra.Command, args []string) error +type cobraCmdFunc func(cmd *cobra.Command, args []string) error -func multiE(fs ...wrapE) wrapE { +// Returns a single function that calls each argument function in sequence +func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { return func(cmd *cobra.Command, args []string) error { for _, f := range fs { if f != nil { @@ -88,7 +90,8 @@ func multiE(fs ...wrapE) wrapE { } } -func bindFlags(cmd *cobra.Command, args []string) error { +// Bind all flags and read the config into viper +func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := viper.BindPFlags(cmd.Flags()); err != nil { return err From 435fd0ece75acd97910f7e617525bc31839730bc Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 3 May 2017 09:25:04 +0200 Subject: [PATCH 128/515] Add clarifying comments as requested by Rigel --- cli/setup.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cli/setup.go b/cli/setup.go index 14801ee52..8120449c7 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -30,6 +30,9 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { } // PrepareMainCmd is meant for client side libs that want some more flags +// +// This adds --encoding (hex, btc, base64) and --output (text, json) to +// the command. These only really make sense in interactive commands. func PrepareMainCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") @@ -77,6 +80,7 @@ func execute(cmd *cobra.Command) { type cobraCmdFunc func(cmd *cobra.Command, args []string) error // Returns a single function that calls each argument function in sequence +// RunE, PreRunE, PersistentPreRunE, etc. all have this same signature func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { return func(cmd *cobra.Command, args []string) error { for _, f := range fs { From 62427adbec0814203bfcc93ee20e40092b5f6f3c Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 3 May 2017 10:02:21 +0200 Subject: [PATCH 129/515] First basic test case on setup functionality --- cli/setup_test.go | 122 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 cli/setup_test.go diff --git a/cli/setup_test.go b/cli/setup_test.go new file mode 100644 index 000000000..4e16bdfe2 --- /dev/null +++ b/cli/setup_test.go @@ -0,0 +1,122 @@ +package cli + +import ( + "bytes" + "io" + "os" + "strconv" + "testing" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Executable is the minimal interface to *corba.Command, so we can +// wrap if desired before the test +type Executable interface { + Execute() error +} + +func TestSetupEnv(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + cases := []struct { + args []string + env map[string]string + expected string + }{ + {nil, nil, ""}, + {[]string{"--foobar", "bang!"}, nil, "bang!"}, + // make sure reset is good + {nil, nil, ""}, + // test both variants of the prefix + {nil, map[string]string{"DEMO_FOOBAR": "good"}, "good"}, + {nil, map[string]string{"DEMOFOOBAR": "silly"}, "silly"}, + // and that cli overrides env... + {[]string{"--foobar", "important"}, + map[string]string{"DEMO_FOOBAR": "ignored"}, "important"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo string + cmd := &cobra.Command{ + Use: "demo", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("foobar") + return nil + }, + } + cmd.Flags().String("foobar", "", "Some test value from config") + PrepareBaseCmd(cmd, "DEMO", "/qwerty/asdfgh") // some missing dir.. + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := runWithArgs(cmd, args, tc.env) + require.Nil(err, i) + assert.Equal(tc.expected, foo, i) + } +} + +// runWithArgs executes the given command with the specified command line args +// and environmental variables set. It returns any error returned from cmd.Execute() +func runWithArgs(cmd Executable, args []string, env map[string]string) error { + oargs := os.Args + oenv := map[string]string{} + // defer returns the environment back to normal + defer func() { + os.Args = oargs + for k, v := range oenv { + os.Setenv(k, v) + } + }() + + // set the args and env how we want them + os.Args = args + for k, v := range env { + // backup old value if there, to restore at end + ov := os.Getenv(k) + if ov != "" { + oenv[k] = ov + } + err := os.Setenv(k, v) + if err != nil { + return err + } + } + + // and finally run the command + return cmd.Execute() +} + +// runCaptureWithArgs executes the given command with the specified command line args +// and environmental variables set. It returns whatever was writen to +// stdout along with any error returned from cmd.Execute() +func runCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) { + old := os.Stdout // keep backup of the real stdout + r, w, _ := os.Pipe() + os.Stdout = w + defer func() { + os.Stdout = old // restoring the real stdout + }() + + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + // io.Copy will end when we call w.Close() below + io.Copy(&buf, r) + outC <- buf.String() + }() + + // now run the command + err = runWithArgs(cmd, args, env) + + // and grab the stdout to return + w.Close() + output = <-outC + return output, err +} From 5637a7885430b695dc06fb9d9b7f034a2a0361b2 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 3 May 2017 10:23:58 +0200 Subject: [PATCH 130/515] Test setting config file as root --- cli/setup_test.go | 71 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/cli/setup_test.go b/cli/setup_test.go index 4e16bdfe2..fb8e5655c 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -2,8 +2,11 @@ package cli import ( "bytes" + "fmt" "io" + "io/ioutil" "os" + "path/filepath" "strconv" "testing" @@ -61,6 +64,69 @@ func TestSetupEnv(t *testing.T) { } } +func writeConfig(vals map[string]string) (string, error) { + cdir, err := ioutil.TempDir("", "test-cli") + if err != nil { + return "", err + } + data := "" + for k, v := range vals { + data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + } + cfile := filepath.Join(cdir, "config.toml") + err = ioutil.WriteFile(cfile, []byte(data), 0666) + return cdir, err +} + +func TestSetupConfig(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1, cval2 := "fubble", "wubble" + conf1, err := writeConfig(map[string]string{"boo": cval1}) + require.Nil(err) + // even with some ignored fields, should be no problem + conf2, err := writeConfig(map[string]string{"boo": cval2, "foo": "bar"}) + require.Nil(err) + + cases := []struct { + args []string + env map[string]string + expected string + }{ + {nil, nil, ""}, + // setting on the command line + {[]string{"--boo", "haha"}, nil, "haha"}, + {[]string{"--root", conf1}, nil, cval1}, + // test both variants of the prefix + {nil, map[string]string{"RD_BOO": "bang"}, "bang"}, + {nil, map[string]string{"RD_ROOT": conf1}, cval1}, + {nil, map[string]string{"RDROOT": conf2}, cval2}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo string + cmd := &cobra.Command{ + Use: "reader", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("boo") + return nil + }, + } + cmd.Flags().String("boo", "", "Some test value from config") + PrepareBaseCmd(cmd, "RD", "/qwerty/asdfgh") // some missing dir... + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := runWithArgs(cmd, args, tc.env) + require.Nil(err, i) + assert.Equal(tc.expected, foo, i) + } +} + // runWithArgs executes the given command with the specified command line args // and environmental variables set. It returns any error returned from cmd.Execute() func runWithArgs(cmd Executable, args []string, env map[string]string) error { @@ -78,10 +144,7 @@ func runWithArgs(cmd Executable, args []string, env map[string]string) error { os.Args = args for k, v := range env { // backup old value if there, to restore at end - ov := os.Getenv(k) - if ov != "" { - oenv[k] = ov - } + oenv[k] = os.Getenv(k) err := os.Setenv(k, v) if err != nil { return err From d05b8131a32c72139bf187834478c09889aa6b30 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 3 May 2017 10:30:50 +0200 Subject: [PATCH 131/515] Updated glide with cobra/viper, fixed Makefile typo --- Makefile | 2 +- glide.lock | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++---- glide.yaml | 3 +++ 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 49acb091a..cd1a57346 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONEY: all test install get_vendor_deps ensure_tools +.PHONY: all test install get_vendor_deps ensure_tools GOTOOLS = \ github.com/Masterminds/glide diff --git a/glide.lock b/glide.lock index 003e3d618..b7d4fb8ff 100644 --- a/glide.lock +++ b/glide.lock @@ -1,16 +1,65 @@ -hash: 47e715510d6b57cff8dc4750b6b9d89a41469a8330a7a8bea1c044b2ac61e581 -updated: 2017-04-21T16:04:25.798163098-04:00 +hash: a28817fffc1bfbba980a957b7782a84ea574fb73d5dfb01730f7e304c9dee630 +updated: 2017-05-03T10:27:41.060683376+02:00 imports: +- name: github.com/fsnotify/fsnotify + version: 4da3e2cfbabc9f751898f250b49f2439785783a1 +- name: github.com/go-kit/kit + version: 0873e56b0faeae3a1d661b10d629135508ea5504 + subpackages: + - log + - log/level + - log/term +- name: github.com/go-logfmt/logfmt + version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 - name: github.com/golang/snappy - version: d9eb7a3d35ec988b8585d4a0068e462c27d28380 + version: 553a641470496b2327abcac10b36396bd98e45c9 +- name: github.com/hashicorp/hcl + version: 630949a3c5fa3c613328e1b8256052cbc2327c9b + subpackages: + - hcl/ast + - hcl/parser + - hcl/scanner + - hcl/strconv + - hcl/token + - json/parser + - json/scanner + - json/token +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 - name: github.com/jmhodges/levigo version: c42d9e0ca023e2198120196f842701bb4c55d7b9 +- name: github.com/kr/logfmt + version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 +- name: github.com/magiconair/properties + version: 51463bfca2576e06c62a8504b5c0f06d61312647 - name: github.com/mattn/go-colorable version: d228849504861217f796da67fae4f6e347643f15 - name: github.com/mattn/go-isatty version: 30a891c33c7cde7b02a981314b4228ec99380cca +- name: github.com/mitchellh/mapstructure + version: 53818660ed4955e899c0bcafa97299a388bd7c8e +- name: github.com/pelletier/go-buffruneio + version: c37440a7cf42ac63b919c752ca73a85067e05992 +- name: github.com/pelletier/go-toml + version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a +- name: github.com/pkg/errors + version: bfd5150e4e41705ded2129ec33379de1cb90b513 +- name: github.com/spf13/afero + version: 9be650865eab0c12963d8753212f4f9c66cdcf12 + subpackages: + - mem +- name: github.com/spf13/cast + version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 +- name: github.com/spf13/cobra + version: fcd0c5a1df88f5d6784cb4feead962c3f3d0b66c +- name: github.com/spf13/jwalterweatherman + version: fa7ca7e836cf3a8bb4ebf799f472c12d7e903d66 +- name: github.com/spf13/pflag + version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7 +- name: github.com/spf13/viper + version: 5d46e70da8c0b6f812e0b170b7a985753b5c63cb - name: github.com/syndtr/goleveldb version: 23851d93a2292dcc56e71a18ec9e0624d84a0f65 subpackages: @@ -27,7 +76,10 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 4325edc613ad1e9286c8bb770ed40ad3fe647e6c + version: 334005c236d19c632fb5f073f9de3b0fab6a522b + subpackages: + - data + - data/base58 - name: github.com/tendermint/log15 version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6 subpackages: @@ -40,6 +92,13 @@ imports: version: d75a52659825e75fff6158388dddc6a5b04f9ba5 subpackages: - unix +- name: golang.org/x/text + version: f4b4367115ec2de254587813edaa901bc1c723a8 + subpackages: + - transform + - unicode/norm +- name: gopkg.in/yaml.v2 + version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b testImports: - name: github.com/davecgh/go-spew version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 @@ -53,3 +112,4 @@ testImports: version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 subpackages: - assert + - require diff --git a/glide.yaml b/glide.yaml index a4c5dd2b6..b0f22d1a7 100644 --- a/glide.yaml +++ b/glide.yaml @@ -11,6 +11,9 @@ import: - package: golang.org/x/crypto subpackages: - ripemd160 +- package: github.com/go-logfmt/logfmt +- package: github.com/spf13/cobra +- package: github.com/spf13/viper testImport: - package: github.com/stretchr/testify subpackages: From ef3b9610a17f4d87d5cb03bfe9d69f1d416e1ce3 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 3 May 2017 10:37:25 +0200 Subject: [PATCH 132/515] Fixed up the --home flag, ebuchman check this out --- cli/setup.go | 8 ++++++-- cli/setup_test.go | 3 +++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 8120449c7..b6f006583 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -24,7 +24,8 @@ const ( func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { cobra.OnInitialize(func() { initEnv(envPrefix) }) cmd.PersistentFlags().StringP(RootFlag, "r", defautRoot, "DEPRECATED. Use --home") - cmd.PersistentFlags().StringP(HomeFlag, "h", defautRoot, "root directory for config and data") + // -h is already reserved for --help as part of the cobra framework + cmd.PersistentFlags().String(HomeFlag, "", "root directory for config and data") cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) return func() { execute(cmd) } } @@ -104,7 +105,10 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { // rootDir is command line flag, env variable, or default $HOME/.tlc // NOTE: we support both --root and --home for now, but eventually only --home rootDir := viper.GetString(HomeFlag) - if !viper.IsSet(HomeFlag) && viper.IsSet(RootFlag) { + // @ebuchman: viper.IsSet doesn't do what you think... + // Even a default of "" on the pflag marks it as set, + // simply by fact of having a pflag. + if rootDir == "" { rootDir = viper.GetString(RootFlag) } viper.SetConfigName("config") // name of config file (without extension) diff --git a/cli/setup_test.go b/cli/setup_test.go index fb8e5655c..47170fe21 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -103,6 +103,9 @@ func TestSetupConfig(t *testing.T) { {nil, map[string]string{"RD_BOO": "bang"}, "bang"}, {nil, map[string]string{"RD_ROOT": conf1}, cval1}, {nil, map[string]string{"RDROOT": conf2}, cval2}, + {nil, map[string]string{"RDHOME": conf1}, cval1}, + // and when both are set??? HOME wins every time! + {[]string{"--root", conf1}, map[string]string{"RDHOME": conf2}, cval2}, } for idx, tc := range cases { From 8efeeb5f38e8647dcb1f162f3f1e58500c02f0ed Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 3 May 2017 11:25:07 +0200 Subject: [PATCH 133/515] Add --debug flag to return full stack trace on error --- cli/setup.go | 39 +++++++++++++++++++++++------- cli/setup_test.go | 61 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 80 insertions(+), 20 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index b6f006583..e55baf902 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -16,25 +16,36 @@ import ( const ( RootFlag = "root" HomeFlag = "home" + DebugFlag = "debug" OutputFlag = "output" EncodingFlag = "encoding" ) +// Executable is the minimal interface to *corba.Command, so we can +// wrap if desired before the test +type Executable interface { + Execute() error +} + // PrepareBaseCmd is meant for tendermint and other servers -func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) Executor { cobra.OnInitialize(func() { initEnv(envPrefix) }) cmd.PersistentFlags().StringP(RootFlag, "r", defautRoot, "DEPRECATED. Use --home") // -h is already reserved for --help as part of the cobra framework + // do you want to try something else?? + // also, default must be empty, so we can detect this unset and fall back + // to --root / TM_ROOT / TMROOT cmd.PersistentFlags().String(HomeFlag, "", "root directory for config and data") + cmd.PersistentFlags().Bool(DebugFlag, false, "print out full stack trace on errors") cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) - return func() { execute(cmd) } + return Executor{cmd} } // PrepareMainCmd is meant for client side libs that want some more flags // // This adds --encoding (hex, btc, base64) and --output (text, json) to // the command. These only really make sense in interactive commands. -func PrepareMainCmd(cmd *cobra.Command, envPrefix, defautRoot string) func() { +func PrepareMainCmd(cmd *cobra.Command, envPrefix, defautRoot string) Executor { cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") cmd.PersistentPreRunE = concatCobraCmdFuncs(setEncoding, validateOutput, cmd.PersistentPreRunE) @@ -68,14 +79,26 @@ func copyEnvVars(prefix string) { } } +// Executor wraps the cobra Command with a nicer Execute method +type Executor struct { + *cobra.Command +} + // execute adds all child commands to the root command sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. -func execute(cmd *cobra.Command) { - // TODO: this can do something cooler with debug and log-levels - if err := cmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) +func (e Executor) Execute() error { + e.SilenceUsage = true + e.SilenceErrors = true + err := e.Command.Execute() + if err != nil { + // TODO: something cooler with log-levels + if viper.GetBool(DebugFlag) { + fmt.Printf("ERROR: %+v\n", err) + } else { + fmt.Println("ERROR:", err.Error()) + } } + return err } type cobraCmdFunc func(cmd *cobra.Command, args []string) error diff --git a/cli/setup_test.go b/cli/setup_test.go index 47170fe21..d8e37d3a4 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -8,20 +8,16 @@ import ( "os" "path/filepath" "strconv" + "strings" "testing" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// Executable is the minimal interface to *corba.Command, so we can -// wrap if desired before the test -type Executable interface { - Execute() error -} - func TestSetupEnv(t *testing.T) { assert, require := assert.New(t), require.New(t) @@ -46,15 +42,15 @@ func TestSetupEnv(t *testing.T) { i := strconv.Itoa(idx) // test command that store value of foobar in local variable var foo string - cmd := &cobra.Command{ + demo := &cobra.Command{ Use: "demo", RunE: func(cmd *cobra.Command, args []string) error { foo = viper.GetString("foobar") return nil }, } - cmd.Flags().String("foobar", "", "Some test value from config") - PrepareBaseCmd(cmd, "DEMO", "/qwerty/asdfgh") // some missing dir.. + demo.Flags().String("foobar", "", "Some test value from config") + cmd := PrepareBaseCmd(demo, "DEMO", "/qwerty/asdfgh") // some missing dir.. viper.Reset() args := append([]string{cmd.Use}, tc.args...) @@ -112,15 +108,15 @@ func TestSetupConfig(t *testing.T) { i := strconv.Itoa(idx) // test command that store value of foobar in local variable var foo string - cmd := &cobra.Command{ + boo := &cobra.Command{ Use: "reader", RunE: func(cmd *cobra.Command, args []string) error { foo = viper.GetString("boo") return nil }, } - cmd.Flags().String("boo", "", "Some test value from config") - PrepareBaseCmd(cmd, "RD", "/qwerty/asdfgh") // some missing dir... + boo.Flags().String("boo", "", "Some test value from config") + cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... viper.Reset() args := append([]string{cmd.Use}, tc.args...) @@ -130,6 +126,47 @@ func TestSetupConfig(t *testing.T) { } } +func TestSetupDebug(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + cases := []struct { + args []string + env map[string]string + long bool + expected string + }{ + {nil, nil, false, "Debug flag = false"}, + {[]string{"--debug"}, nil, true, "Debug flag = true"}, + {[]string{"--no-such-flag"}, nil, false, "unknown flag: --no-such-flag"}, + {nil, map[string]string{"DBG_DEBUG": "true"}, true, "Debug flag = true"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + debug := &cobra.Command{ + Use: "debug", + RunE: func(cmd *cobra.Command, args []string) error { + return errors.Errorf("Debug flag = %t", viper.GetBool(DebugFlag)) + }, + } + cmd := PrepareBaseCmd(debug, "DBG", "/qwerty/asdfgh") // some missing dir.. + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + out, err := runCaptureWithArgs(cmd, args, tc.env) + require.NotNil(err, i) + msg := strings.Split(out, "\n") + desired := fmt.Sprintf("ERROR: %s", tc.expected) + assert.Equal(desired, msg[0], i) + if tc.long && assert.True(len(msg) > 2, i) { + // the next line starts the stack trace... + assert.Contains(msg[1], "TestSetupDebug", i) + assert.Contains(msg[2], "setup_test.go", i) + } + } +} + // runWithArgs executes the given command with the specified command line args // and environmental variables set. It returns any error returned from cmd.Execute() func runWithArgs(cmd Executable, args []string, env map[string]string) error { From ee45dbdc8b7947e95f403b736711ff8000f2927d Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Thu, 4 May 2017 19:16:58 +0200 Subject: [PATCH 134/515] Test how unmarshall plays with flags/env/config/default struct --- cli/setup_test.go | 77 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/cli/setup_test.go b/cli/setup_test.go index d8e37d3a4..169b14004 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -126,6 +126,83 @@ func TestSetupConfig(t *testing.T) { } } +type DemoConfig struct { + Name string `mapstructure:"name"` + Age int `mapstructure:"age"` + Unused int `mapstructure:"unused"` +} + +func TestSetupUnmarshal(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1, cval2 := "someone", "else" + conf1, err := writeConfig(map[string]string{"name": cval1}) + require.Nil(err) + // even with some ignored fields, should be no problem + conf2, err := writeConfig(map[string]string{"name": cval2, "foo": "bar"}) + require.Nil(err) + + // unused is not declared on a flag and remains from base + base := DemoConfig{ + Name: "default", + Age: 42, + Unused: -7, + } + c := func(name string, age int) DemoConfig { + r := base + // anything set on the flags as a default is used over + // the default config object + r.Name = "from-flag" + if name != "" { + r.Name = name + } + if age != 0 { + r.Age = age + } + return r + } + + cases := []struct { + args []string + env map[string]string + expected DemoConfig + }{ + {nil, nil, c("", 0)}, + // setting on the command line + {[]string{"--name", "haha"}, nil, c("haha", 0)}, + {[]string{"--root", conf1}, nil, c(cval1, 0)}, + // test both variants of the prefix + {nil, map[string]string{"MR_AGE": "56"}, c("", 56)}, + {nil, map[string]string{"MR_ROOT": conf1}, c(cval1, 0)}, + {[]string{"--age", "17"}, map[string]string{"MRHOME": conf2}, c(cval2, 17)}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + cfg := base + marsh := &cobra.Command{ + Use: "marsh", + RunE: func(cmd *cobra.Command, args []string) error { + return viper.Unmarshal(&cfg) + }, + } + marsh.Flags().String("name", "from-flag", "Some test value from config") + // if we want a flag to use the proper default, then copy it + // from the default config here + marsh.Flags().Int("age", base.Age, "Some test value from config") + cmd := PrepareBaseCmd(marsh, "MR", "/qwerty/asdfgh") // some missing dir... + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := runWithArgs(cmd, args, tc.env) + require.Nil(err, i) + assert.Equal(tc.expected, cfg, i) + } +} + func TestSetupDebug(t *testing.T) { assert, require := assert.New(t), require.New(t) From 3585a542a0e07d0e9d396b2f809c2826c8536437 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 5 May 2017 00:48:23 -0400 Subject: [PATCH 135/515] cli: viper.Set(HomeFlag, rootDir) --- cli/setup.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index e55baf902..7a4a2098e 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -127,12 +127,12 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { // rootDir is command line flag, env variable, or default $HOME/.tlc // NOTE: we support both --root and --home for now, but eventually only --home + // Also ensure we set the correct rootDir under HomeFlag so we dont need to + // repeat this logic elsewhere. rootDir := viper.GetString(HomeFlag) - // @ebuchman: viper.IsSet doesn't do what you think... - // Even a default of "" on the pflag marks it as set, - // simply by fact of having a pflag. if rootDir == "" { rootDir = viper.GetString(RootFlag) + viper.Set(HomeFlag, rootDir) } viper.SetConfigName("config") // name of config file (without extension) viper.AddConfigPath(rootDir) // search root directory From d0132b0fffc2eb79c7a129a2faeae08cc0e5fcad Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Fri, 5 May 2017 14:46:04 +0200 Subject: [PATCH 136/515] Moved helper functions into non-test code for reuse elsewhere --- .gitignore | 1 + cli/helper.go | 64 ++++++++++++++++++++++++++++++++++++++++++++ cli/setup_test.go | 68 +++-------------------------------------------- 3 files changed, 69 insertions(+), 64 deletions(-) create mode 100644 cli/helper.go diff --git a/.gitignore b/.gitignore index 381931381..f37225baa 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *.swp *.swo +vendor diff --git a/cli/helper.go b/cli/helper.go new file mode 100644 index 000000000..b0662c78d --- /dev/null +++ b/cli/helper.go @@ -0,0 +1,64 @@ +package cli + +import ( + "bytes" + "io" + "os" +) + +// RunWithArgs executes the given command with the specified command line args +// and environmental variables set. It returns any error returned from cmd.Execute() +func RunWithArgs(cmd Executable, args []string, env map[string]string) error { + oargs := os.Args + oenv := map[string]string{} + // defer returns the environment back to normal + defer func() { + os.Args = oargs + for k, v := range oenv { + os.Setenv(k, v) + } + }() + + // set the args and env how we want them + os.Args = args + for k, v := range env { + // backup old value if there, to restore at end + oenv[k] = os.Getenv(k) + err := os.Setenv(k, v) + if err != nil { + return err + } + } + + // and finally run the command + return cmd.Execute() +} + +// RunCaptureWithArgs executes the given command with the specified command line args +// and environmental variables set. It returns whatever was writen to +// stdout along with any error returned from cmd.Execute() +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) { + old := os.Stdout // keep backup of the real stdout + r, w, _ := os.Pipe() + os.Stdout = w + defer func() { + os.Stdout = old // restoring the real stdout + }() + + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + // io.Copy will end when we call w.Close() below + io.Copy(&buf, r) + outC <- buf.String() + }() + + // now run the command + err = RunWithArgs(cmd, args, env) + + // and grab the stdout to return + w.Close() + output = <-outC + return output, err +} diff --git a/cli/setup_test.go b/cli/setup_test.go index 169b14004..34877209d 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -1,11 +1,8 @@ package cli import ( - "bytes" "fmt" - "io" "io/ioutil" - "os" "path/filepath" "strconv" "strings" @@ -54,7 +51,7 @@ func TestSetupEnv(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := runWithArgs(cmd, args, tc.env) + err := RunWithArgs(cmd, args, tc.env) require.Nil(err, i) assert.Equal(tc.expected, foo, i) } @@ -120,7 +117,7 @@ func TestSetupConfig(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := runWithArgs(cmd, args, tc.env) + err := RunWithArgs(cmd, args, tc.env) require.Nil(err, i) assert.Equal(tc.expected, foo, i) } @@ -197,7 +194,7 @@ func TestSetupUnmarshal(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := runWithArgs(cmd, args, tc.env) + err := RunWithArgs(cmd, args, tc.env) require.Nil(err, i) assert.Equal(tc.expected, cfg, i) } @@ -231,7 +228,7 @@ func TestSetupDebug(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) - out, err := runCaptureWithArgs(cmd, args, tc.env) + out, err := RunCaptureWithArgs(cmd, args, tc.env) require.NotNil(err, i) msg := strings.Split(out, "\n") desired := fmt.Sprintf("ERROR: %s", tc.expected) @@ -243,60 +240,3 @@ func TestSetupDebug(t *testing.T) { } } } - -// runWithArgs executes the given command with the specified command line args -// and environmental variables set. It returns any error returned from cmd.Execute() -func runWithArgs(cmd Executable, args []string, env map[string]string) error { - oargs := os.Args - oenv := map[string]string{} - // defer returns the environment back to normal - defer func() { - os.Args = oargs - for k, v := range oenv { - os.Setenv(k, v) - } - }() - - // set the args and env how we want them - os.Args = args - for k, v := range env { - // backup old value if there, to restore at end - oenv[k] = os.Getenv(k) - err := os.Setenv(k, v) - if err != nil { - return err - } - } - - // and finally run the command - return cmd.Execute() -} - -// runCaptureWithArgs executes the given command with the specified command line args -// and environmental variables set. It returns whatever was writen to -// stdout along with any error returned from cmd.Execute() -func runCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) { - old := os.Stdout // keep backup of the real stdout - r, w, _ := os.Pipe() - os.Stdout = w - defer func() { - os.Stdout = old // restoring the real stdout - }() - - outC := make(chan string) - // copy the output in a separate goroutine so printing can't block indefinitely - go func() { - var buf bytes.Buffer - // io.Copy will end when we call w.Close() below - io.Copy(&buf, r) - outC <- buf.String() - }() - - // now run the command - err = runWithArgs(cmd, args, env) - - // and grab the stdout to return - w.Close() - output = <-outC - return output, err -} From 2f02ed18e9b706467c9474d024a25a0b7a9c0e97 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Fri, 5 May 2017 14:58:53 +0200 Subject: [PATCH 137/515] One more helper function for cli tests... --- cli/helper.go | 20 ++++++++++++++++++++ cli/setup_test.go | 24 ++++-------------------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/cli/helper.go b/cli/helper.go index b0662c78d..79654bc34 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -2,10 +2,30 @@ package cli import ( "bytes" + "fmt" "io" + "io/ioutil" "os" + "path/filepath" ) +// WriteDemoConfig writes a toml file with the given values. +// It returns the RootDir the config.toml file is stored in, +// or an error if writing was impossible +func WriteDemoConfig(vals map[string]string) (string, error) { + cdir, err := ioutil.TempDir("", "test-cli") + if err != nil { + return "", err + } + data := "" + for k, v := range vals { + data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + } + cfile := filepath.Join(cdir, "config.toml") + err = ioutil.WriteFile(cfile, []byte(data), 0666) + return cdir, err +} + // RunWithArgs executes the given command with the specified command line args // and environmental variables set. It returns any error returned from cmd.Execute() func RunWithArgs(cmd Executable, args []string, env map[string]string) error { diff --git a/cli/setup_test.go b/cli/setup_test.go index 34877209d..6396b769e 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -2,8 +2,6 @@ package cli import ( "fmt" - "io/ioutil" - "path/filepath" "strconv" "strings" "testing" @@ -57,30 +55,16 @@ func TestSetupEnv(t *testing.T) { } } -func writeConfig(vals map[string]string) (string, error) { - cdir, err := ioutil.TempDir("", "test-cli") - if err != nil { - return "", err - } - data := "" - for k, v := range vals { - data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) - } - cfile := filepath.Join(cdir, "config.toml") - err = ioutil.WriteFile(cfile, []byte(data), 0666) - return cdir, err -} - func TestSetupConfig(t *testing.T) { assert, require := assert.New(t), require.New(t) // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "fubble", "wubble" - conf1, err := writeConfig(map[string]string{"boo": cval1}) + conf1, err := WriteDemoConfig(map[string]string{"boo": cval1}) require.Nil(err) // even with some ignored fields, should be no problem - conf2, err := writeConfig(map[string]string{"boo": cval2, "foo": "bar"}) + conf2, err := WriteDemoConfig(map[string]string{"boo": cval2, "foo": "bar"}) require.Nil(err) cases := []struct { @@ -135,10 +119,10 @@ func TestSetupUnmarshal(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "someone", "else" - conf1, err := writeConfig(map[string]string{"name": cval1}) + conf1, err := WriteDemoConfig(map[string]string{"name": cval1}) require.Nil(err) // even with some ignored fields, should be no problem - conf2, err := writeConfig(map[string]string{"name": cval2, "foo": "bar"}) + conf2, err := WriteDemoConfig(map[string]string{"name": cval2, "foo": "bar"}) require.Nil(err) // unused is not declared on a flag and remains from base From ed76afd409058b162748bb77232d584ea89fe498 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 27 Apr 2017 13:42:03 +0400 Subject: [PATCH 138/515] Logger interface and tmLogger impl based on go-kit --- .gitignore | 3 +- common/service.go | 56 +++++++++---------- events/events.go | 2 +- events/log.go | 7 --- glide.lock | 40 +++++++------- glide.yaml | 16 +++++- log/logger.go | 27 +++++++++ log/nop_logger.go | 18 ++++++ log/nop_logger_test.go | 18 ++++++ log/tm_logger.go | 74 +++++++++++++++++++++++++ log/tm_logger_test.go | 41 ++++++++++++++ log/tmfmt_logger.go | 116 +++++++++++++++++++++++++++++++++++++++ log/tmfmt_logger_test.go | 110 +++++++++++++++++++++++++++++++++++++ logger/log.go | 11 ++-- 14 files changed, 472 insertions(+), 67 deletions(-) delete mode 100644 events/log.go create mode 100644 log/logger.go create mode 100644 log/nop_logger.go create mode 100644 log/nop_logger_test.go create mode 100644 log/tm_logger.go create mode 100644 log/tm_logger_test.go create mode 100644 log/tmfmt_logger.go create mode 100644 log/tmfmt_logger_test.go diff --git a/.gitignore b/.gitignore index f37225baa..62f28681c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,2 @@ -*.swp -*.swo vendor +.glide diff --git a/common/service.go b/common/service.go index 6a274a798..71fc03cb9 100644 --- a/common/service.go +++ b/common/service.go @@ -3,7 +3,7 @@ package common import ( "sync/atomic" - "github.com/tendermint/log15" + "github.com/tendermint/tmlibs/log" ) type Service interface { @@ -19,6 +19,8 @@ type Service interface { IsRunning() bool String() string + + SetLogger(log.Logger) } /* @@ -64,7 +66,7 @@ Typical usage: } */ type BaseService struct { - log log15.Logger + Logger log.Logger name string started uint32 // atomic stopped uint32 // atomic @@ -74,27 +76,31 @@ type BaseService struct { impl Service } -func NewBaseService(log log15.Logger, name string, impl Service) *BaseService { +func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { + if logger == nil { + logger = log.NewNopLogger() + } + return &BaseService{ - log: log, - name: name, - Quit: make(chan struct{}), - impl: impl, + Logger: logger, + name: name, + Quit: make(chan struct{}), + impl: impl, } } +func (bs *BaseService) SetLogger(l log.Logger) { + bs.Logger = l +} + // Implements Servce func (bs *BaseService) Start() (bool, error) { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { - if bs.log != nil { - bs.log.Warn(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) - } + bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) return false, nil } else { - if bs.log != nil { - bs.log.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) - } + bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) } err := bs.impl.OnStart() if err != nil { @@ -104,9 +110,7 @@ func (bs *BaseService) Start() (bool, error) { } return true, err } else { - if bs.log != nil { - bs.log.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) - } + bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) return false, nil } } @@ -119,16 +123,12 @@ func (bs *BaseService) OnStart() error { return nil } // Implements Service func (bs *BaseService) Stop() bool { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { - if bs.log != nil { - bs.log.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) - } + bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) bs.impl.OnStop() close(bs.Quit) return true } else { - if bs.log != nil { - bs.log.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) - } + bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) return false } } @@ -147,9 +147,7 @@ func (bs *BaseService) Reset() (bool, error) { bs.Quit = make(chan struct{}) return true, bs.impl.OnReset() } else { - if bs.log != nil { - bs.log.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) - } + bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) return false, nil } // never happens @@ -182,11 +180,11 @@ type QuitService struct { BaseService } -func NewQuitService(log log15.Logger, name string, impl Service) *QuitService { - if log != nil { - log.Warn("QuitService is deprecated, use BaseService instead") +func NewQuitService(logger log.Logger, name string, impl Service) *QuitService { + if logger != nil { + logger.Info("QuitService is deprecated, use BaseService instead") } return &QuitService{ - BaseService: *NewBaseService(log, name, impl), + BaseService: *NewBaseService(logger, name, impl), } } diff --git a/events/events.go b/events/events.go index 487d120b3..12aa07813 100644 --- a/events/events.go +++ b/events/events.go @@ -45,7 +45,7 @@ type eventSwitch struct { func NewEventSwitch() EventSwitch { evsw := &eventSwitch{} - evsw.BaseService = *NewBaseService(log, "EventSwitch", evsw) + evsw.BaseService = *NewBaseService(nil, "EventSwitch", evsw) return evsw } diff --git a/events/log.go b/events/log.go deleted file mode 100644 index adb6bab09..000000000 --- a/events/log.go +++ /dev/null @@ -1,7 +0,0 @@ -package events - -import ( - "github.com/tendermint/tmlibs/logger" -) - -var log = logger.New("module", "events") diff --git a/glide.lock b/glide.lock index b7d4fb8ff..37fe6ba4b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: a28817fffc1bfbba980a957b7782a84ea574fb73d5dfb01730f7e304c9dee630 -updated: 2017-05-03T10:27:41.060683376+02:00 +hash: 69359a39dbb6957c9f09167520317ad72d4bfa75f37a614b347e2510768c8a42 +updated: 2017-05-05T17:40:30.424309209Z imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 @@ -12,11 +12,11 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 + version: 7a2f19628aabfe68f0766b59e74d6315f8347d22 - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl - version: 630949a3c5fa3c613328e1b8256052cbc2327c9b + version: a4b07c25de5ff55ad3b8936cea69a79a3d95a855 subpackages: - hcl/ast - hcl/parser @@ -35,17 +35,17 @@ imports: - name: github.com/magiconair/properties version: 51463bfca2576e06c62a8504b5c0f06d61312647 - name: github.com/mattn/go-colorable - version: d228849504861217f796da67fae4f6e347643f15 + version: ded68f7a9561c023e790de24279db7ebf473ea80 - name: github.com/mattn/go-isatty - version: 30a891c33c7cde7b02a981314b4228ec99380cca + version: fc9e8d8ef48496124e79ae0df75490096eccf6fe - name: github.com/mitchellh/mapstructure - version: 53818660ed4955e899c0bcafa97299a388bd7c8e + version: cc8532a8e9a55ea36402aa21efdf403a60d34096 - name: github.com/pelletier/go-buffruneio version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: 97253b98df84f9eef872866d079e74b8265150f1 - name: github.com/pkg/errors - version: bfd5150e4e41705ded2129ec33379de1cb90b513 + version: c605e284fe17294bda444b34710735b29d1a9d90 - name: github.com/spf13/afero version: 9be650865eab0c12963d8753212f4f9c66cdcf12 subpackages: @@ -53,15 +53,15 @@ imports: - name: github.com/spf13/cast version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 - name: github.com/spf13/cobra - version: fcd0c5a1df88f5d6784cb4feead962c3f3d0b66c + version: db6b9a8b3f3f400c8ecb4a4d7d02245b8facad66 - name: github.com/spf13/jwalterweatherman version: fa7ca7e836cf3a8bb4ebf799f472c12d7e903d66 - name: github.com/spf13/pflag - version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7 + version: 80fe0fb4eba54167e2ccae1c6c950e72abf61b73 - name: github.com/spf13/viper - version: 5d46e70da8c0b6f812e0b170b7a985753b5c63cb + version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2 - name: github.com/syndtr/goleveldb - version: 23851d93a2292dcc56e71a18ec9e0624d84a0f65 + version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4 subpackages: - leveldb - leveldb/cache @@ -76,24 +76,24 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 334005c236d19c632fb5f073f9de3b0fab6a522b + version: b53add0b622662731985485f3a19be7f684660b8 subpackages: - data - data/base58 - name: github.com/tendermint/log15 - version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6 + version: f91285dece9f4875421b481da3e613d83d44f29b subpackages: - term - name: golang.org/x/crypto - version: 7c6cc321c680f03b9ef0764448e780704f486b51 + version: 5a033cc77e57eca05bdb50522851d29e03569cbe subpackages: - ripemd160 - name: golang.org/x/sys - version: d75a52659825e75fff6158388dddc6a5b04f9ba5 + version: 9ccfe848b9db8435a24c424abbc07a921adf1df5 subpackages: - unix - name: golang.org/x/text - version: f4b4367115ec2de254587813edaa901bc1c723a8 + version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4 subpackages: - transform - unicode/norm @@ -101,7 +101,7 @@ imports: version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b testImports: - name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 subpackages: - spew - name: github.com/pmezard/go-difflib @@ -109,7 +109,7 @@ testImports: subpackages: - difflib - name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + version: 4d4bfba8f1d1027c4fdbe371823030df51419987 subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index b0f22d1a7..2df880175 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,20 +1,30 @@ package: github.com/tendermint/tmlibs import: +- package: github.com/go-kit/kit + subpackages: + - log + - log/level + - log/term +- package: github.com/go-logfmt/logfmt - package: github.com/jmhodges/levigo +- package: github.com/pkg/errors +- package: github.com/spf13/cobra +- package: github.com/spf13/viper - package: github.com/syndtr/goleveldb subpackages: - leveldb - leveldb/errors - leveldb/opt - package: github.com/tendermint/go-wire + subpackages: + - data + - data/base58 - package: github.com/tendermint/log15 - package: golang.org/x/crypto subpackages: - ripemd160 -- package: github.com/go-logfmt/logfmt -- package: github.com/spf13/cobra -- package: github.com/spf13/viper testImport: - package: github.com/stretchr/testify subpackages: - assert + - require diff --git a/log/logger.go b/log/logger.go new file mode 100644 index 000000000..790bcad74 --- /dev/null +++ b/log/logger.go @@ -0,0 +1,27 @@ +package log + +import ( + "fmt" + + kitlog "github.com/go-kit/kit/log" +) + +// Logger is what any Tendermint library should take. +type Logger interface { + Debug(msg string, keyvals ...interface{}) error + Info(msg string, keyvals ...interface{}) error + Error(msg string, keyvals ...interface{}) error +} + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Info, Debug or Error. +func With(logger Logger, keyvals ...interface{}) Logger { + switch logger.(type) { + case *tmLogger: + return &tmLogger{kitlog.With(logger.(*tmLogger).srcLogger, keyvals...)} + case *nopLogger: + return logger + default: + panic(fmt.Sprintf("Unexpected logger of type %T", logger)) + } +} diff --git a/log/nop_logger.go b/log/nop_logger.go new file mode 100644 index 000000000..b6e312631 --- /dev/null +++ b/log/nop_logger.go @@ -0,0 +1,18 @@ +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return &nopLogger{} } + +func (nopLogger) Info(string, ...interface{}) error { + return nil +} + +func (nopLogger) Debug(string, ...interface{}) error { + return nil +} + +func (nopLogger) Error(string, ...interface{}) error { + return nil +} diff --git a/log/nop_logger_test.go b/log/nop_logger_test.go new file mode 100644 index 000000000..9757d4f19 --- /dev/null +++ b/log/nop_logger_test.go @@ -0,0 +1,18 @@ +package log_test + +import ( + "testing" + + "github.com/tendermint/tmlibs/log" +) + +func TestNopLogger(t *testing.T) { + t.Parallel() + logger := log.NewNopLogger() + if err := logger.Info("Hello", "abc", 123); err != nil { + t.Error(err) + } + if err := log.With(logger, "def", "ghi").Debug(""); err != nil { + t.Error(err) + } +} diff --git a/log/tm_logger.go b/log/tm_logger.go new file mode 100644 index 000000000..0d01a970e --- /dev/null +++ b/log/tm_logger.go @@ -0,0 +1,74 @@ +package log + +import ( + "fmt" + "io" + + kitlog "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/go-kit/kit/log/term" +) + +const ( + msgKey = "_msg" // "_" prefixed to avoid collisions +) + +type tmLogger struct { + srcLogger kitlog.Logger +} + +// NewTmTermLogger returns a logger that encodes msg and keyvals to the Writer +// using go-kit's log as an underlying logger and our custom formatter. Note +// that underlying logger could be swapped with something else. +func NewTmLogger(w io.Writer) Logger { + // Color by level value + colorFn := func(keyvals ...interface{}) term.FgBgColor { + if keyvals[0] != level.Key() { + panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) + } + switch keyvals[1].(level.Value).String() { + case "debug": + return term.FgBgColor{Fg: term.DarkGray} + case "error": + return term.FgBgColor{Fg: term.Red} + default: + return term.FgBgColor{} + } + } + + srcLogger := term.NewLogger(w, NewTmfmtLogger, colorFn) + srcLogger = level.NewFilter(srcLogger, level.AllowInfo()) + return &tmLogger{srcLogger} +} + +// WithLevel returns a copy of the logger with a level set to lvl. +func (l *tmLogger) WithLevel(lvl string) Logger { + switch lvl { + case "info": + return &tmLogger{level.NewFilter(l.srcLogger, level.AllowInfo())} + case "debug": + return &tmLogger{level.NewFilter(l.srcLogger, level.AllowDebug())} + case "error": + return &tmLogger{level.NewFilter(l.srcLogger, level.AllowError())} + default: + panic(fmt.Sprintf("Unexpected level %v, expect either \"info\" or \"debug\" or \"error\"", lvl)) + } +} + +// Info logs a message at level Info. +func (l *tmLogger) Info(msg string, keyvals ...interface{}) error { + lWithLevel := level.Info(l.srcLogger) + return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) +} + +// Debug logs a message at level Debug. +func (l *tmLogger) Debug(msg string, keyvals ...interface{}) error { + lWithLevel := level.Debug(l.srcLogger) + return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) +} + +// Error logs a message at level Error. +func (l *tmLogger) Error(msg string, keyvals ...interface{}) error { + lWithLevel := level.Error(l.srcLogger) + return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) +} diff --git a/log/tm_logger_test.go b/log/tm_logger_test.go new file mode 100644 index 000000000..f301c9fde --- /dev/null +++ b/log/tm_logger_test.go @@ -0,0 +1,41 @@ +package log_test + +import ( + "io/ioutil" + "testing" + + "github.com/tendermint/tmlibs/log" +) + +func TestTmLogger(t *testing.T) { + t.Parallel() + logger := log.NewTmLogger(ioutil.Discard) + if err := logger.Info("Hello", "abc", 123); err != nil { + t.Error(err) + } + if err := log.With(logger, "def", "ghi").Debug(""); err != nil { + t.Error(err) + } +} + +func BenchmarkTmLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewTmLogger(ioutil.Discard), baseInfoMessage) +} + +func BenchmarkTmLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewTmLogger(ioutil.Discard), withInfoMessage) +} + +func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { + lc := log.With(logger, "common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseInfoMessage = func(logger log.Logger) { logger.Info("foo_message", "foo_key", "foo_value") } + withInfoMessage = func(logger log.Logger) { log.With(logger, "a", "b").Info("c", "d", "f") } +) diff --git a/log/tmfmt_logger.go b/log/tmfmt_logger.go new file mode 100644 index 000000000..db689c084 --- /dev/null +++ b/log/tmfmt_logger.go @@ -0,0 +1,116 @@ +package log + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + kitlog "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/go-logfmt/logfmt" +) + +type tmfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *tmfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var tmfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc tmfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type tmfmtLogger struct { + w io.Writer +} + +// NewTmFmtLogger returns a logger that encodes keyvals to the Writer in +// Tendermint custom format. +// +// Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewTmfmtLogger(w io.Writer) kitlog.Logger { + return &tmfmtLogger{w} +} + +func (l tmfmtLogger) Log(keyvals ...interface{}) error { + enc := tmfmtEncoderPool.Get().(*tmfmtEncoder) + enc.Reset() + defer tmfmtEncoderPool.Put(enc) + + lvl := "none" + msg := "unknown" + lvlIndex := -1 + msgIndex := -1 + + for i := 0; i < len(keyvals)-1; i += 2 { + // Extract level + if keyvals[i] == level.Key() { + lvlIndex = i + switch keyvals[i+1].(type) { + case string: + lvl = keyvals[i+1].(string) + case level.Value: + lvl = keyvals[i+1].(level.Value).String() + default: + panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) + } + continue + } + + // and message + if keyvals[i] == msgKey { + msgIndex = i + msg = keyvals[i+1].(string) + continue + } + + if lvlIndex > 0 && msgIndex > 0 { // found all we're looking for + break + } + } + + // Form a custom Tendermint line + // + // Example: + // D[05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) + // + // Description: + // D - first character of the level, uppercase (ASCII only) + // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) + // Stopping ... - message + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) + + for i := 0; i < len(keyvals)-1; i += 2 { + if i == lvlIndex || i == msgIndex { + continue + } + if err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]); err != nil { + return err + } + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/log/tmfmt_logger_test.go b/log/tmfmt_logger_test.go new file mode 100644 index 000000000..ca5ee55ea --- /dev/null +++ b/log/tmfmt_logger_test.go @@ -0,0 +1,110 @@ +package log_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "regexp" + "testing" + + kitlog "github.com/go-kit/kit/log" + "github.com/stretchr/testify/assert" + "github.com/tendermint/tmlibs/log" +) + +func TestTmfmtLogger(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewTmfmtLogger(buf) + + if err := logger.Log("hello", "world"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hello=world\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) + + buf.Reset() + err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}) + assert.NotNil(t, err) + + buf.Reset() + if err := logger.Log("level", "error"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`E\[.+\] unknown \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("_msg", "Hello"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) +} + +func BenchmarkTmfmtLoggerSimple(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTmfmtLogger(ioutil.Discard), baseMessage) +} + +func BenchmarkTmfmtLoggerContextual(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTmfmtLogger(ioutil.Discard), withMessage) +} + +func TestTmfmtLoggerConcurrency(t *testing.T) { + t.Parallel() + testConcurrency(t, log.NewTmfmtLogger(ioutil.Discard), 10000) +} + +func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { + lc := kitlog.With(logger, "common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } + withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } +) + +// These test are designed to be run with the race detector. + +func testConcurrency(t *testing.T, logger kitlog.Logger, total int) { + n := int(math.Sqrt(float64(total))) + share := total / n + + errC := make(chan error, n) + + for i := 0; i < n; i++ { + go func() { + errC <- spam(logger, share) + }() + } + + for i := 0; i < n; i++ { + err := <-errC + if err != nil { + t.Fatalf("concurrent logging error: %v", err) + } + } +} + +func spam(logger kitlog.Logger, count int) error { + for i := 0; i < count; i++ { + err := logger.Log("key", i) + if err != nil { + return err + } + } + return nil +} + +type mymap map[int]int + +func (m mymap) String() string { return "special_behavior" } diff --git a/logger/log.go b/logger/log.go index 84f71c34f..2f4faef6b 100644 --- a/logger/log.go +++ b/logger/log.go @@ -1,10 +1,11 @@ +// DEPRECATED! Use newer log package. package logger import ( "os" - . "github.com/tendermint/tmlibs/common" "github.com/tendermint/log15" + . "github.com/tendermint/tmlibs/common" ) var mainHandler log15.Handler @@ -40,14 +41,14 @@ func MainHandler() log15.Handler { return mainHandler } -func BypassHandler() log15.Handler { - return bypassHandler -} - func New(ctx ...interface{}) log15.Logger { return NewMain(ctx...) } +func BypassHandler() log15.Handler { + return bypassHandler +} + func NewMain(ctx ...interface{}) log15.Logger { return log15.Root().New(ctx...) } From 66c9401c0740ffff0f9f978328330a3f2728133b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 May 2017 14:26:00 -0400 Subject: [PATCH 139/515] log: Tm -> TM --- log/tm_logger.go | 6 +++--- log/tm_logger_test.go | 12 ++++++------ log/tmfmt_logger.go | 4 ++-- log/tmfmt_logger_test.go | 16 ++++++++-------- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/log/tm_logger.go b/log/tm_logger.go index 0d01a970e..f12442db1 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -17,10 +17,10 @@ type tmLogger struct { srcLogger kitlog.Logger } -// NewTmTermLogger returns a logger that encodes msg and keyvals to the Writer +// NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer // using go-kit's log as an underlying logger and our custom formatter. Note // that underlying logger could be swapped with something else. -func NewTmLogger(w io.Writer) Logger { +func NewTMLogger(w io.Writer) Logger { // Color by level value colorFn := func(keyvals ...interface{}) term.FgBgColor { if keyvals[0] != level.Key() { @@ -36,7 +36,7 @@ func NewTmLogger(w io.Writer) Logger { } } - srcLogger := term.NewLogger(w, NewTmfmtLogger, colorFn) + srcLogger := term.NewLogger(w, NewTMFmtLogger, colorFn) srcLogger = level.NewFilter(srcLogger, level.AllowInfo()) return &tmLogger{srcLogger} } diff --git a/log/tm_logger_test.go b/log/tm_logger_test.go index f301c9fde..898316c42 100644 --- a/log/tm_logger_test.go +++ b/log/tm_logger_test.go @@ -7,9 +7,9 @@ import ( "github.com/tendermint/tmlibs/log" ) -func TestTmLogger(t *testing.T) { +func TestTMLogger(t *testing.T) { t.Parallel() - logger := log.NewTmLogger(ioutil.Discard) + logger := log.NewTMLogger(ioutil.Discard) if err := logger.Info("Hello", "abc", 123); err != nil { t.Error(err) } @@ -18,12 +18,12 @@ func TestTmLogger(t *testing.T) { } } -func BenchmarkTmLoggerSimple(b *testing.B) { - benchmarkRunner(b, log.NewTmLogger(ioutil.Discard), baseInfoMessage) +func BenchmarkTMLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) } -func BenchmarkTmLoggerContextual(b *testing.B) { - benchmarkRunner(b, log.NewTmLogger(ioutil.Discard), withInfoMessage) +func BenchmarkTMLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), withInfoMessage) } func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { diff --git a/log/tmfmt_logger.go b/log/tmfmt_logger.go index db689c084..2f299e274 100644 --- a/log/tmfmt_logger.go +++ b/log/tmfmt_logger.go @@ -34,13 +34,13 @@ type tmfmtLogger struct { w io.Writer } -// NewTmFmtLogger returns a logger that encodes keyvals to the Writer in +// NewTMFmtLogger returns a logger that encodes keyvals to the Writer in // Tendermint custom format. // // Each log event produces no more than one call to w.Write. // The passed Writer must be safe for concurrent use by multiple goroutines if // the returned Logger will be used concurrently. -func NewTmfmtLogger(w io.Writer) kitlog.Logger { +func NewTMFmtLogger(w io.Writer) kitlog.Logger { return &tmfmtLogger{w} } diff --git a/log/tmfmt_logger_test.go b/log/tmfmt_logger_test.go index ca5ee55ea..c5ec41274 100644 --- a/log/tmfmt_logger_test.go +++ b/log/tmfmt_logger_test.go @@ -13,10 +13,10 @@ import ( "github.com/tendermint/tmlibs/log" ) -func TestTmfmtLogger(t *testing.T) { +func TestTMFmtLogger(t *testing.T) { t.Parallel() buf := &bytes.Buffer{} - logger := log.NewTmfmtLogger(buf) + logger := log.NewTMFmtLogger(buf) if err := logger.Log("hello", "world"); err != nil { t.Fatal(err) @@ -46,17 +46,17 @@ func TestTmfmtLogger(t *testing.T) { assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) } -func BenchmarkTmfmtLoggerSimple(b *testing.B) { - benchmarkRunnerKitlog(b, log.NewTmfmtLogger(ioutil.Discard), baseMessage) +func BenchmarkTMFmtLoggerSimple(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), baseMessage) } -func BenchmarkTmfmtLoggerContextual(b *testing.B) { - benchmarkRunnerKitlog(b, log.NewTmfmtLogger(ioutil.Discard), withMessage) +func BenchmarkTMFmtLoggerContextual(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), withMessage) } -func TestTmfmtLoggerConcurrency(t *testing.T) { +func TestTMFmtLoggerConcurrency(t *testing.T) { t.Parallel() - testConcurrency(t, log.NewTmfmtLogger(ioutil.Discard), 10000) + testConcurrency(t, log.NewTMFmtLogger(ioutil.Discard), 10000) } func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { From 520561e94a11040272fc64e5f338eacdc336ea3d Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 3 May 2017 11:06:30 +0400 Subject: [PATCH 140/515] add testing logger --- log/testing_logger.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 log/testing_logger.go diff --git a/log/testing_logger.go b/log/testing_logger.go new file mode 100644 index 000000000..7ab83e414 --- /dev/null +++ b/log/testing_logger.go @@ -0,0 +1,31 @@ +package log + +import ( + "os" + "testing" +) + +var ( + // reuse the same logger across all tests + _testingLogger Logger +) + +// TestingLogger returns a TMLogger which writes to STDOUT if testing being run +// with the verbose (-v) flag, NopLogger otherwise. +// +// Note that the call to TestingLogger() must be made +// inside a test (not in the init func) because +// verbose flag only set at the time of testing. +func TestingLogger() Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLogger(os.Stdout) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} From 3420b389e93af28db6ba30d58db18fc5693fbba3 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Fri, 5 May 2017 19:45:02 +0200 Subject: [PATCH 141/515] Add testify version for consistency --- glide.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/glide.yaml b/glide.yaml index b0f22d1a7..f9e8f29b1 100644 --- a/glide.yaml +++ b/glide.yaml @@ -16,5 +16,7 @@ import: - package: github.com/spf13/viper testImport: - package: github.com/stretchr/testify + version: ^1.1.4 subpackages: - assert + - require From 6ff7b4395d7019a31cec66713aec15ed86cdb888 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 3 May 2017 11:18:50 +0400 Subject: [PATCH 142/515] update glide.yaml and glide.lock files --- Makefile | 2 -- glide.lock | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Makefile b/Makefile index cd1a57346..d5e5aa2c1 100644 --- a/Makefile +++ b/Makefile @@ -4,8 +4,6 @@ GOTOOLS = \ github.com/Masterminds/glide REPO:=github.com/tendermint/tmlibs -all: install test - test: go test `glide novendor` diff --git a/glide.lock b/glide.lock index 37fe6ba4b..b30f538a3 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 69359a39dbb6957c9f09167520317ad72d4bfa75f37a614b347e2510768c8a42 -updated: 2017-05-05T17:40:30.424309209Z +updated: 2017-05-05T17:46:34.975369143Z imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 From b2bd039923c20f59d13c7992a079d95e92f328fe Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 4 May 2017 17:33:32 +0400 Subject: [PATCH 143/515] correct impl of WithLevel --- log/logger.go | 22 ++++++++++++++++++++++ log/tm_logger.go | 14 -------------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/log/logger.go b/log/logger.go index 790bcad74..a895aed6d 100644 --- a/log/logger.go +++ b/log/logger.go @@ -4,6 +4,7 @@ import ( "fmt" kitlog "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" ) // Logger is what any Tendermint library should take. @@ -25,3 +26,24 @@ func With(logger Logger, keyvals ...interface{}) Logger { panic(fmt.Sprintf("Unexpected logger of type %T", logger)) } } + +// WithLevel returns a copy of the logger with a level set to lvl. +func WithLevel(logger Logger, lvl string) Logger { + switch logger.(type) { + case *tmLogger: + switch lvl { + case "info": + return &tmLogger{level.NewFilter(logger.(*tmLogger).srcLogger, level.AllowInfo())} + case "debug": + return &tmLogger{level.NewFilter(logger.(*tmLogger).srcLogger, level.AllowDebug())} + case "error": + return &tmLogger{level.NewFilter(logger.(*tmLogger).srcLogger, level.AllowError())} + default: + panic(fmt.Sprintf("Unexpected level %v, expect either \"info\" or \"debug\" or \"error\"", lvl)) + } + case *nopLogger: + return logger + default: + panic(fmt.Sprintf("Unexpected logger of type %T", logger)) + } +} diff --git a/log/tm_logger.go b/log/tm_logger.go index f12442db1..d8550ea67 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -41,20 +41,6 @@ func NewTMLogger(w io.Writer) Logger { return &tmLogger{srcLogger} } -// WithLevel returns a copy of the logger with a level set to lvl. -func (l *tmLogger) WithLevel(lvl string) Logger { - switch lvl { - case "info": - return &tmLogger{level.NewFilter(l.srcLogger, level.AllowInfo())} - case "debug": - return &tmLogger{level.NewFilter(l.srcLogger, level.AllowDebug())} - case "error": - return &tmLogger{level.NewFilter(l.srcLogger, level.AllowError())} - default: - panic(fmt.Sprintf("Unexpected level %v, expect either \"info\" or \"debug\" or \"error\"", lvl)) - } -} - // Info logs a message at level Info. func (l *tmLogger) Info(msg string, keyvals ...interface{}) error { lWithLevel := level.Info(l.srcLogger) From ea01d003d1fe1a95a118913d3798544cce3869ac Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 4 May 2017 19:39:16 +0400 Subject: [PATCH 144/515] changes per @ethanfrey comments --- log/logger.go | 48 +++++++++++++----------------------------- log/nop_logger.go | 11 ++++++++++ log/nop_logger_test.go | 2 +- log/testing_logger.go | 2 +- log/tm_logger.go | 25 ++++++++++++++++++++++ log/tm_logger_test.go | 6 +++--- 6 files changed, 56 insertions(+), 38 deletions(-) diff --git a/log/logger.go b/log/logger.go index a895aed6d..bf6a3ff65 100644 --- a/log/logger.go +++ b/log/logger.go @@ -1,10 +1,9 @@ package log import ( - "fmt" + "io" kitlog "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" ) // Logger is what any Tendermint library should take. @@ -12,38 +11,21 @@ type Logger interface { Debug(msg string, keyvals ...interface{}) error Info(msg string, keyvals ...interface{}) error Error(msg string, keyvals ...interface{}) error -} -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Info, Debug or Error. -func With(logger Logger, keyvals ...interface{}) Logger { - switch logger.(type) { - case *tmLogger: - return &tmLogger{kitlog.With(logger.(*tmLogger).srcLogger, keyvals...)} - case *nopLogger: - return logger - default: - panic(fmt.Sprintf("Unexpected logger of type %T", logger)) - } + With(keyvals ...interface{}) Logger + WithLevel(lvl string) Logger } -// WithLevel returns a copy of the logger with a level set to lvl. -func WithLevel(logger Logger, lvl string) Logger { - switch logger.(type) { - case *tmLogger: - switch lvl { - case "info": - return &tmLogger{level.NewFilter(logger.(*tmLogger).srcLogger, level.AllowInfo())} - case "debug": - return &tmLogger{level.NewFilter(logger.(*tmLogger).srcLogger, level.AllowDebug())} - case "error": - return &tmLogger{level.NewFilter(logger.(*tmLogger).srcLogger, level.AllowError())} - default: - panic(fmt.Sprintf("Unexpected level %v, expect either \"info\" or \"debug\" or \"error\"", lvl)) - } - case *nopLogger: - return logger - default: - panic(fmt.Sprintf("Unexpected logger of type %T", logger)) - } +// NewSyncWriter returns a new writer that is safe for concurrent use by +// multiple goroutines. Writes to the returned writer are passed on to w. If +// another write is already in progress, the calling goroutine blocks until +// the writer is available. +// +// If w implements the following interface, so does the returned writer. +// +// interface { +// Fd() uintptr +// } +func NewSyncWriter(w io.Writer) io.Writer { + return kitlog.NewSyncWriter(w) } diff --git a/log/nop_logger.go b/log/nop_logger.go index b6e312631..219998175 100644 --- a/log/nop_logger.go +++ b/log/nop_logger.go @@ -2,6 +2,9 @@ package log type nopLogger struct{} +// Interface assertions +var _ Logger = (*nopLogger)(nil) + // NewNopLogger returns a logger that doesn't do anything. func NewNopLogger() Logger { return &nopLogger{} } @@ -16,3 +19,11 @@ func (nopLogger) Debug(string, ...interface{}) error { func (nopLogger) Error(string, ...interface{}) error { return nil } + +func (l *nopLogger) With(...interface{}) Logger { + return l +} + +func (l *nopLogger) WithLevel(lvl string) Logger { + return l +} diff --git a/log/nop_logger_test.go b/log/nop_logger_test.go index 9757d4f19..d2009fdf0 100644 --- a/log/nop_logger_test.go +++ b/log/nop_logger_test.go @@ -12,7 +12,7 @@ func TestNopLogger(t *testing.T) { if err := logger.Info("Hello", "abc", 123); err != nil { t.Error(err) } - if err := log.With(logger, "def", "ghi").Debug(""); err != nil { + if err := logger.With("def", "ghi").Debug(""); err != nil { t.Error(err) } } diff --git a/log/testing_logger.go b/log/testing_logger.go index 7ab83e414..319136337 100644 --- a/log/testing_logger.go +++ b/log/testing_logger.go @@ -22,7 +22,7 @@ func TestingLogger() Logger { } if testing.Verbose() { - _testingLogger = NewTMLogger(os.Stdout) + _testingLogger = NewTMLogger(NewSyncWriter(os.Stdout)) } else { _testingLogger = NewNopLogger() } diff --git a/log/tm_logger.go b/log/tm_logger.go index d8550ea67..3a3c9dde5 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -17,9 +17,14 @@ type tmLogger struct { srcLogger kitlog.Logger } +// Interface assertions +var _ Logger = (*tmLogger)(nil) + // NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer // using go-kit's log as an underlying logger and our custom formatter. Note // that underlying logger could be swapped with something else. +// +// Default logging level is info. You can change it using SetLevel(). func NewTMLogger(w io.Writer) Logger { // Color by level value colorFn := func(keyvals ...interface{}) term.FgBgColor { @@ -58,3 +63,23 @@ func (l *tmLogger) Error(msg string, keyvals ...interface{}) error { lWithLevel := level.Error(l.srcLogger) return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) } + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Info, Debug or Error. +func (l *tmLogger) With(keyvals ...interface{}) Logger { + return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} +} + +// WithLevel returns a new logger with the level set to lvl. +func (l *tmLogger) WithLevel(lvl string) Logger { + switch lvl { + case "info": + return &tmLogger{level.NewFilter(l.srcLogger, level.AllowInfo())} + case "debug": + return &tmLogger{level.NewFilter(l.srcLogger, level.AllowDebug())} + case "error": + return &tmLogger{level.NewFilter(l.srcLogger, level.AllowError())} + default: + panic(fmt.Sprintf("Unexpected level %v, expect either \"info\" or \"debug\" or \"error\"", lvl)) + } +} diff --git a/log/tm_logger_test.go b/log/tm_logger_test.go index 898316c42..15c940ce8 100644 --- a/log/tm_logger_test.go +++ b/log/tm_logger_test.go @@ -13,7 +13,7 @@ func TestTMLogger(t *testing.T) { if err := logger.Info("Hello", "abc", 123); err != nil { t.Error(err) } - if err := log.With(logger, "def", "ghi").Debug(""); err != nil { + if err := logger.With("def", "ghi").Debug(""); err != nil { t.Error(err) } } @@ -27,7 +27,7 @@ func BenchmarkTMLoggerContextual(b *testing.B) { } func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { - lc := log.With(logger, "common_key", "common_value") + lc := logger.With("common_key", "common_value") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -37,5 +37,5 @@ func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { var ( baseInfoMessage = func(logger log.Logger) { logger.Info("foo_message", "foo_key", "foo_value") } - withInfoMessage = func(logger log.Logger) { log.With(logger, "a", "b").Info("c", "d", "f") } + withInfoMessage = func(logger log.Logger) { logger.With("a", "b").Info("c", "d", "f") } ) From 2bf6ebf379bebebc456d755998e1bedd87ed9cae Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 5 May 2017 15:06:56 +0400 Subject: [PATCH 145/515] filter --- log/filter.go | 97 +++++++++++++++++++++++++++++++++++++++ log/filter_test.go | 103 ++++++++++++++++++++++++++++++++++++++++++ log/logger.go | 1 - log/nop_logger.go | 4 -- log/tm_json_logger.go | 15 ++++++ log/tm_logger.go | 32 +++---------- log/tmfmt_logger.go | 8 ++-- 7 files changed, 226 insertions(+), 34 deletions(-) create mode 100644 log/filter.go create mode 100644 log/filter_test.go create mode 100644 log/tm_json_logger.go diff --git a/log/filter.go b/log/filter.go new file mode 100644 index 000000000..e7bec982e --- /dev/null +++ b/log/filter.go @@ -0,0 +1,97 @@ +package log + +// NewFilter wraps next and implements filtering. See the commentary on the +// Option functions for a detailed description of how to configure levels. If +// no options are provided, all leveled log events created with Debug, Info or +// Error helper methods are squelched. +func NewFilter(next Logger, options ...Option) Logger { + l := &filter{ + next: next, + } + for _, option := range options { + option(l) + } + return l +} + +type filter struct { + next Logger + allowed level + errNotAllowed error +} + +func (l *filter) Info(msg string, keyvals ...interface{}) error { + levelAllowed := l.allowed&levelInfo != 0 + if !levelAllowed { + return l.errNotAllowed + } + return l.next.Info(msg, keyvals...) +} + +func (l *filter) Debug(msg string, keyvals ...interface{}) error { + levelAllowed := l.allowed&levelDebug != 0 + if !levelAllowed { + return l.errNotAllowed + } + return l.next.Debug(msg, keyvals...) +} + +func (l *filter) Error(msg string, keyvals ...interface{}) error { + levelAllowed := l.allowed&levelError != 0 + if !levelAllowed { + return l.errNotAllowed + } + return l.next.Error(msg, keyvals...) +} + +func (l *filter) With(keyvals ...interface{}) Logger { + return l.next.With(keyvals...) +} + +// Option sets a parameter for the filter. +type Option func(*filter) + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, warn, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelInfo | levelDebug) +} + +// AllowInfo allows error, warn and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelInfo) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *filter) { l.allowed = allowed } +} + +// ErrNotAllowed sets the error to return from Log when it squelches a log +// event disallowed by the configured Allow[Level] option. By default, +// ErrNotAllowed is nil; in this case the log event is squelched with no +// error. +func ErrNotAllowed(err error) Option { + return func(l *filter) { l.errNotAllowed = err } +} + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelError +) diff --git a/log/filter_test.go b/log/filter_test.go new file mode 100644 index 000000000..5e1fb167e --- /dev/null +++ b/log/filter_test.go @@ -0,0 +1,103 @@ +package log_test + +import ( + "bytes" + "errors" + "strings" + "testing" + + "github.com/tendermint/tmlibs/log" +) + +func TestVariousLevels(t *testing.T) { + testCases := []struct { + name string + allowed log.Option + want string + }{ + { + "AllowAll", + log.AllowAll(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowDebug", + log.AllowDebug(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowInfo", + log.AllowInfo(), + strings.Join([]string{ + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowError", + log.AllowError(), + strings.Join([]string{ + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowNone", + log.AllowNone(), + ``, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + logger := log.NewFilter(log.NewTMJSONLogger(&buf), tc.allowed) + + logger.Debug("here", "this is", "debug log") + logger.Info("here", "this is", "info log") + logger.Error("here", "this is", "error log") + + if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant:\n%s\nhave:\n%s", want, have) + } + }) + } +} + +func TestErrNotAllowed(t *testing.T) { + myError := errors.New("squelched!") + opts := []log.Option{ + log.AllowError(), + log.ErrNotAllowed(myError), + } + logger := log.NewFilter(log.NewNopLogger(), opts...) + + if want, have := myError, logger.Info("foo", "bar", "baz"); want != have { + t.Errorf("want %#+v, have %#+v", want, have) + } + + if want, have := error(nil), logger.Error("foo", "bar", "baz"); want != have { + t.Errorf("want %#+v, have %#+v", want, have) + } +} + +func TestLevelContext(t *testing.T) { + var buf bytes.Buffer + + var logger log.Logger + logger = log.NewTMJSONLogger(&buf) + logger = log.NewFilter(logger, log.AllowAll()) + logger = logger.With("context", "value") + + logger.Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/log/logger.go b/log/logger.go index bf6a3ff65..be273f484 100644 --- a/log/logger.go +++ b/log/logger.go @@ -13,7 +13,6 @@ type Logger interface { Error(msg string, keyvals ...interface{}) error With(keyvals ...interface{}) Logger - WithLevel(lvl string) Logger } // NewSyncWriter returns a new writer that is safe for concurrent use by diff --git a/log/nop_logger.go b/log/nop_logger.go index 219998175..306a8405f 100644 --- a/log/nop_logger.go +++ b/log/nop_logger.go @@ -23,7 +23,3 @@ func (nopLogger) Error(string, ...interface{}) error { func (l *nopLogger) With(...interface{}) Logger { return l } - -func (l *nopLogger) WithLevel(lvl string) Logger { - return l -} diff --git a/log/tm_json_logger.go b/log/tm_json_logger.go new file mode 100644 index 000000000..a71ac1034 --- /dev/null +++ b/log/tm_json_logger.go @@ -0,0 +1,15 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/kit/log" +) + +// NewTMJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewTMJSONLogger(w io.Writer) Logger { + return &tmLogger{kitlog.NewJSONLogger(w)} +} diff --git a/log/tm_logger.go b/log/tm_logger.go index 3a3c9dde5..a6a6f6d39 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -5,7 +5,7 @@ import ( "io" kitlog "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + kitlevel "github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/term" ) @@ -23,15 +23,13 @@ var _ Logger = (*tmLogger)(nil) // NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer // using go-kit's log as an underlying logger and our custom formatter. Note // that underlying logger could be swapped with something else. -// -// Default logging level is info. You can change it using SetLevel(). func NewTMLogger(w io.Writer) Logger { // Color by level value colorFn := func(keyvals ...interface{}) term.FgBgColor { - if keyvals[0] != level.Key() { + if keyvals[0] != kitlevel.Key() { panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) } - switch keyvals[1].(level.Value).String() { + switch keyvals[1].(kitlevel.Value).String() { case "debug": return term.FgBgColor{Fg: term.DarkGray} case "error": @@ -41,26 +39,24 @@ func NewTMLogger(w io.Writer) Logger { } } - srcLogger := term.NewLogger(w, NewTMFmtLogger, colorFn) - srcLogger = level.NewFilter(srcLogger, level.AllowInfo()) - return &tmLogger{srcLogger} + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} } // Info logs a message at level Info. func (l *tmLogger) Info(msg string, keyvals ...interface{}) error { - lWithLevel := level.Info(l.srcLogger) + lWithLevel := kitlevel.Info(l.srcLogger) return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) } // Debug logs a message at level Debug. func (l *tmLogger) Debug(msg string, keyvals ...interface{}) error { - lWithLevel := level.Debug(l.srcLogger) + lWithLevel := kitlevel.Debug(l.srcLogger) return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) } // Error logs a message at level Error. func (l *tmLogger) Error(msg string, keyvals ...interface{}) error { - lWithLevel := level.Error(l.srcLogger) + lWithLevel := kitlevel.Error(l.srcLogger) return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) } @@ -69,17 +65,3 @@ func (l *tmLogger) Error(msg string, keyvals ...interface{}) error { func (l *tmLogger) With(keyvals ...interface{}) Logger { return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} } - -// WithLevel returns a new logger with the level set to lvl. -func (l *tmLogger) WithLevel(lvl string) Logger { - switch lvl { - case "info": - return &tmLogger{level.NewFilter(l.srcLogger, level.AllowInfo())} - case "debug": - return &tmLogger{level.NewFilter(l.srcLogger, level.AllowDebug())} - case "error": - return &tmLogger{level.NewFilter(l.srcLogger, level.AllowError())} - default: - panic(fmt.Sprintf("Unexpected level %v, expect either \"info\" or \"debug\" or \"error\"", lvl)) - } -} diff --git a/log/tmfmt_logger.go b/log/tmfmt_logger.go index 2f299e274..4e36de898 100644 --- a/log/tmfmt_logger.go +++ b/log/tmfmt_logger.go @@ -8,7 +8,7 @@ import ( "time" kitlog "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + kitlevel "github.com/go-kit/kit/log/level" "github.com/go-logfmt/logfmt" ) @@ -56,13 +56,13 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { for i := 0; i < len(keyvals)-1; i += 2 { // Extract level - if keyvals[i] == level.Key() { + if keyvals[i] == kitlevel.Key() { lvlIndex = i switch keyvals[i+1].(type) { case string: lvl = keyvals[i+1].(string) - case level.Value: - lvl = keyvals[i+1].(level.Value).String() + case kitlevel.Value: + lvl = keyvals[i+1].(kitlevel.Value).String() default: panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) } From f4be75cb1e5fc7259195f2a2ae5f4b995847b9f5 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 5 May 2017 20:33:15 +0400 Subject: [PATCH 146/515] remove warn mentions --- log/filter.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/log/filter.go b/log/filter.go index e7bec982e..102e2d900 100644 --- a/log/filter.go +++ b/log/filter.go @@ -56,12 +56,12 @@ func AllowAll() Option { return AllowDebug() } -// AllowDebug allows error, warn, info and debug level log events to pass. +// AllowDebug allows error, info and debug level log events to pass. func AllowDebug() Option { return allowed(levelError | levelInfo | levelDebug) } -// AllowInfo allows error, warn and info level log events to pass. +// AllowInfo allows error and info level log events to pass. func AllowInfo() Option { return allowed(levelError | levelInfo) } From 240215f2aa26d53223c618a5a2127a08d37868f0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 5 May 2017 21:52:01 +0400 Subject: [PATCH 147/515] return back all in Makefile --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index d5e5aa2c1..8e43dd11a 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,11 @@ -.PHONY: all test install get_vendor_deps ensure_tools +.PHONY: all test get_vendor_deps ensure_tools GOTOOLS = \ github.com/Masterminds/glide REPO:=github.com/tendermint/tmlibs +all: test + test: go test `glide novendor` @@ -14,5 +16,3 @@ get_vendor_deps: ensure_tools ensure_tools: go get $(GOTOOLS) - - From bc6baf677420b4d718848a6559ded96a97868a63 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 6 May 2017 11:21:07 +0400 Subject: [PATCH 148/515] [log] proper impl of With for filter --- log/filter.go | 2 +- log/filter_test.go | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/log/filter.go b/log/filter.go index 102e2d900..afb2e920b 100644 --- a/log/filter.go +++ b/log/filter.go @@ -45,7 +45,7 @@ func (l *filter) Error(msg string, keyvals ...interface{}) error { } func (l *filter) With(keyvals ...interface{}) Logger { - return l.next.With(keyvals...) + return &filter{next: l.next.With(keyvals...), allowed: l.allowed, errNotAllowed: l.errNotAllowed} } // Option sets a parameter for the filter. diff --git a/log/filter_test.go b/log/filter_test.go index 5e1fb167e..3840b12a7 100644 --- a/log/filter_test.go +++ b/log/filter_test.go @@ -93,11 +93,17 @@ func TestLevelContext(t *testing.T) { var logger log.Logger logger = log.NewTMJSONLogger(&buf) - logger = log.NewFilter(logger, log.AllowAll()) + logger = log.NewFilter(logger, log.AllowError()) logger = logger.With("context", "value") + logger.Error("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() logger.Info("foo", "bar", "baz") - if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + if want, have := ``, strings.TrimSpace(buf.String()); want != have { t.Errorf("\nwant '%s'\nhave '%s'", want, have) } } From b2a116863cbf080a5f80201c9ea843c25919d061 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 8 May 2017 17:30:21 +0400 Subject: [PATCH 149/515] squash module key if multiple keyvals were provided last keyvalue wins --- log/tm_logger.go | 3 ++- log/tmfmt_logger.go | 38 ++++++++++++++++++++++---------------- log/tmfmt_logger_test.go | 6 ++++++ 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/log/tm_logger.go b/log/tm_logger.go index a6a6f6d39..370aa7c0a 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -10,7 +10,8 @@ import ( ) const ( - msgKey = "_msg" // "_" prefixed to avoid collisions + msgKey = "_msg" // "_" prefixed to avoid collisions + moduleKey = "module" ) type tmLogger struct { diff --git a/log/tmfmt_logger.go b/log/tmfmt_logger.go index 4e36de898..362fbf28d 100644 --- a/log/tmfmt_logger.go +++ b/log/tmfmt_logger.go @@ -51,13 +51,15 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { lvl := "none" msg := "unknown" - lvlIndex := -1 - msgIndex := -1 + module := "unknown" + + // indexes of keys to skip while encoding later + excludeIndexes := make([]int, 0) for i := 0; i < len(keyvals)-1; i += 2 { // Extract level if keyvals[i] == kitlevel.Key() { - lvlIndex = i + excludeIndexes = append(excludeIndexes, i) switch keyvals[i+1].(type) { case string: lvl = keyvals[i+1].(string) @@ -66,18 +68,14 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { default: panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) } - continue - } - - // and message - if keyvals[i] == msgKey { - msgIndex = i + // and message + } else if keyvals[i] == msgKey { + excludeIndexes = append(excludeIndexes, i) msg = keyvals[i+1].(string) - continue - } - - if lvlIndex > 0 && msgIndex > 0 { // found all we're looking for - break + // and module (could be multiple keyvals; if such case last keyvalue wins) + } else if keyvals[i] == moduleKey { + excludeIndexes = append(excludeIndexes, i) + module = keyvals[i+1].(string) } } @@ -92,10 +90,18 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { // Stopping ... - message enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) + if module != "unknown" { + enc.buf.WriteString("module=" + module + " ") + } + +KeyvalueLoop: for i := 0; i < len(keyvals)-1; i += 2 { - if i == lvlIndex || i == msgIndex { - continue + for _, j := range excludeIndexes { + if i == j { + continue KeyvalueLoop + } } + if err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]); err != nil { return err } diff --git a/log/tmfmt_logger_test.go b/log/tmfmt_logger_test.go index c5ec41274..62eb32a03 100644 --- a/log/tmfmt_logger_test.go +++ b/log/tmfmt_logger_test.go @@ -44,6 +44,12 @@ func TestTMFmtLogger(t *testing.T) { t.Fatal(err) } assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("module", "main", "module", "crypto", "module", "wire"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) } func BenchmarkTMFmtLoggerSimple(b *testing.B) { From f202d02d0da8172c1d8e9d630cedf8c91f5fd4f8 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 12 May 2017 17:39:00 +0200 Subject: [PATCH 150/515] [log] add separator --- log/tmfmt_logger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/log/tmfmt_logger.go b/log/tmfmt_logger.go index 362fbf28d..14028d756 100644 --- a/log/tmfmt_logger.go +++ b/log/tmfmt_logger.go @@ -88,7 +88,7 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { // D - first character of the level, uppercase (ASCII only) // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) // Stopping ... - message - enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) if module != "unknown" { enc.buf.WriteString("module=" + module + " ") From 300766827437f4d4333b40e072fad26c21e19dc0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 12 May 2017 22:47:02 +0200 Subject: [PATCH 151/515] [log] add NewFilterByLevel helper func --- log/filter.go | 18 ++++++++++++++++++ log/filter_test.go | 13 +++++++++++++ 2 files changed, 31 insertions(+) diff --git a/log/filter.go b/log/filter.go index afb2e920b..a5d15c7eb 100644 --- a/log/filter.go +++ b/log/filter.go @@ -1,5 +1,7 @@ package log +import "fmt" + // NewFilter wraps next and implements filtering. See the commentary on the // Option functions for a detailed description of how to configure levels. If // no options are provided, all leveled log events created with Debug, Info or @@ -14,6 +16,22 @@ func NewFilter(next Logger, options ...Option) Logger { return l } +// NewFilterByLevel wraps next and implements filtering based on a given level. +func NewFilterByLevel(next Logger, lvl string) Logger { + var option Option + switch lvl { + case "info": + option = AllowInfo() + case "debug": + option = AllowDebug() + case "error": + option = AllowError() + default: + panic(fmt.Sprintf("Expected either \"info\", \"debug\" or \"error\" log level, given %v", lvl)) + } + return NewFilter(next, option) +} + type filter struct { next Logger allowed level diff --git a/log/filter_test.go b/log/filter_test.go index 3840b12a7..cae10c145 100644 --- a/log/filter_test.go +++ b/log/filter_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" "github.com/tendermint/tmlibs/log" ) @@ -107,3 +108,15 @@ func TestLevelContext(t *testing.T) { t.Errorf("\nwant '%s'\nhave '%s'", want, have) } } + +func TestNewFilterByLevel(t *testing.T) { + assert := assert.New(t) + var logger log.Logger + logger = log.NewNopLogger() + assert.NotPanics(func() { + logger = log.NewFilterByLevel(logger, "info") + }) + assert.Panics(func() { + logger = log.NewFilterByLevel(logger, "smth") + }) +} From dd3e433d32da1faddf66183678f760ccb023e4a2 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 13 May 2017 15:54:11 +0200 Subject: [PATCH 152/515] [log] NewFilterByLevel returns an error --- log/filter.go | 7 ++++--- log/filter_test.go | 14 ++++++-------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/log/filter.go b/log/filter.go index a5d15c7eb..f410d2228 100644 --- a/log/filter.go +++ b/log/filter.go @@ -17,7 +17,8 @@ func NewFilter(next Logger, options ...Option) Logger { } // NewFilterByLevel wraps next and implements filtering based on a given level. -func NewFilterByLevel(next Logger, lvl string) Logger { +// Error is returned if level is not info, error or debug. +func NewFilterByLevel(next Logger, lvl string) (Logger, error) { var option Option switch lvl { case "info": @@ -27,9 +28,9 @@ func NewFilterByLevel(next Logger, lvl string) Logger { case "error": option = AllowError() default: - panic(fmt.Sprintf("Expected either \"info\", \"debug\" or \"error\" log level, given %v", lvl)) + return nil, fmt.Errorf("Expected either \"info\", \"debug\" or \"error\" log level, given %v", lvl) } - return NewFilter(next, option) + return NewFilter(next, option), nil } type filter struct { diff --git a/log/filter_test.go b/log/filter_test.go index cae10c145..edde86249 100644 --- a/log/filter_test.go +++ b/log/filter_test.go @@ -6,7 +6,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/tendermint/tmlibs/log" ) @@ -110,13 +109,12 @@ func TestLevelContext(t *testing.T) { } func TestNewFilterByLevel(t *testing.T) { - assert := assert.New(t) var logger log.Logger logger = log.NewNopLogger() - assert.NotPanics(func() { - logger = log.NewFilterByLevel(logger, "info") - }) - assert.Panics(func() { - logger = log.NewFilterByLevel(logger, "smth") - }) + if _, err := log.NewFilterByLevel(logger, "info"); err != nil { + t.Fatal(err) + } + if _, err := log.NewFilterByLevel(logger, "other"); err == nil { + t.Fatal(err) + } } From a5fcc94a3b8dc1260da7ec956d6111ddfc0c67c1 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sun, 14 May 2017 12:03:45 +0200 Subject: [PATCH 153/515] [log] allow for custom color funcs --- log/testing_logger.go | 18 ++++++++++++++++++ log/tm_logger.go | 6 ++++++ 2 files changed, 24 insertions(+) diff --git a/log/testing_logger.go b/log/testing_logger.go index 319136337..81482bef5 100644 --- a/log/testing_logger.go +++ b/log/testing_logger.go @@ -3,6 +3,8 @@ package log import ( "os" "testing" + + "github.com/go-kit/kit/log/term" ) var ( @@ -29,3 +31,19 @@ func TestingLogger() Logger { return _testingLogger } + +// TestingLoggerWithColorFn allow you to provide your own color function. See +// TestingLogger for documentation. +func TestingLoggerWithColorFn(colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLoggerWithColorFn(NewSyncWriter(os.Stdout), colorFn) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} diff --git a/log/tm_logger.go b/log/tm_logger.go index 370aa7c0a..ae8f88c6a 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -43,6 +43,12 @@ func NewTMLogger(w io.Writer) Logger { return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} } +// NewTMLoggerWithColorFn allow you to provide your own color function. See +// NewTMLogger for documentation. +func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + // Info logs a message at level Info. func (l *tmLogger) Info(msg string, keyvals ...interface{}) error { lWithLevel := kitlevel.Info(l.srcLogger) From 74a7f8c92b66abe75fc04c7b0a17ba52e101caef Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 6 May 2017 22:48:08 +0400 Subject: [PATCH 154/515] [autofile] close file before renaming it this might fix our windows bug https://github.com/tendermint/tendermint/issues/444 https://github.com/Netflix-Skunkworks/go-jira/commit/0980f8e1972a942e505b1939935adf0f7a71f387 --- autofile/group.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/autofile/group.go b/autofile/group.go index 0f829309a..39f274e04 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -224,15 +224,17 @@ func (g *Group) RotateFile() { g.mtx.Lock() defer g.mtx.Unlock() - dstPath := filePathForIndex(g.Head.Path, g.maxIndex, g.maxIndex+1) - err := os.Rename(g.Head.Path, dstPath) - if err != nil { + headPath := g.Head.Path + + if err := g.Head.closeFile(); err != nil { panic(err) } - err = g.Head.closeFile() - if err != nil { + + indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) + if err := os.Rename(headPath, indexPath); err != nil { panic(err) } + g.maxIndex += 1 } From 812d9f9b84d1dfe4cb46ce021b3a2d97b48d1292 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 15 May 2017 09:09:42 -0400 Subject: [PATCH 155/515] add changelog --- CHANGELOG.md | 196 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..34eeffc05 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,196 @@ +# Changelog + +## 0.2.0 (May 15, 2017) + +BREAKING CHANGES: + +- [common] NewBaseService takes the new logger + + +FEATURES: + +- [cli] New library to standardize building command line tools +- [log] New logging library + +BUG FIXES: + +- [autofile] Close file before rotating + +## 0.1.0 (May 1, 2017) + +Initial release, combines what were previously independent repos: + +- go-autofile +- go-clist +- go-common +- go-db +- go-events +- go-flowrate +- go-logger +- go-merkle +- go-process + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 761b1553aa2316a41e102371ab3643dddc56608a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 15 May 2017 20:08:02 +0200 Subject: [PATCH 156/515] [log] allow filtering with fields --- log/filter.go | 59 ++++++++++++++++++++++++++++++++++++++++++---- log/filter_test.go | 30 ++++++++++++++++++----- 2 files changed, 78 insertions(+), 11 deletions(-) diff --git a/log/filter.go b/log/filter.go index f410d2228..9633d88a8 100644 --- a/log/filter.go +++ b/log/filter.go @@ -8,7 +8,8 @@ import "fmt" // Error helper methods are squelched. func NewFilter(next Logger, options ...Option) Logger { l := &filter{ - next: next, + next: next, + allowedKeyvals: make(map[keyval]level), } for _, option := range options { option(l) @@ -34,9 +35,15 @@ func NewFilterByLevel(next Logger, lvl string) (Logger, error) { } type filter struct { - next Logger - allowed level - errNotAllowed error + next Logger + allowed level + allowedKeyvals map[keyval]level + errNotAllowed error +} + +type keyval struct { + key interface{} + value interface{} } func (l *filter) Info(msg string, keyvals ...interface{}) error { @@ -63,8 +70,30 @@ func (l *filter) Error(msg string, keyvals ...interface{}) error { return l.next.Error(msg, keyvals...) } +// With implements Logger by constructing a new filter with a keyvals appended +// to the logger. +// +// If custom level was set for a keyval pair using one of the +// Allow*With methods, it is used as the logger's level. +// +// Examples: +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) +// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" func (l *filter) With(keyvals ...interface{}) Logger { - return &filter{next: l.next.With(keyvals...), allowed: l.allowed, errNotAllowed: l.errNotAllowed} + for i := len(keyvals) - 2; i >= 0; i -= 2 { + for kv, allowed := range l.allowedKeyvals { + if keyvals[i] == kv.key && keyvals[i+1] == kv.value { + return &filter{next: l.next.With(keyvals...), allowed: allowed, errNotAllowed: l.errNotAllowed, allowedKeyvals: l.allowedKeyvals} + } + } + } + return &filter{next: l.next.With(keyvals...), allowed: l.allowed, errNotAllowed: l.errNotAllowed, allowedKeyvals: l.allowedKeyvals} } // Option sets a parameter for the filter. @@ -107,6 +136,26 @@ func ErrNotAllowed(err error) Option { return func(l *filter) { l.errNotAllowed = err } } +// AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. +func AllowDebugWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } +} + +// AllowInfoWith allows error and info level log events to pass for a specific key value pair. +func AllowInfoWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo } +} + +// AllowErrorWith allows only error level log events to pass for a specific key value pair. +func AllowErrorWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } +} + +// AllowNoneWith allows no leveled log events to pass for a specific key value pair. +func AllowNoneWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } +} + type level byte const ( diff --git a/log/filter_test.go b/log/filter_test.go index edde86249..4665db3df 100644 --- a/log/filter_test.go +++ b/log/filter_test.go @@ -108,13 +108,31 @@ func TestLevelContext(t *testing.T) { } } -func TestNewFilterByLevel(t *testing.T) { +func TestVariousAllowWith(t *testing.T) { + var buf bytes.Buffer + var logger log.Logger - logger = log.NewNopLogger() - if _, err := log.NewFilterByLevel(logger, "info"); err != nil { - t.Fatal(err) + logger = log.NewTMJSONLogger(&buf) + + logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) + logger1.With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) } - if _, err := log.NewFilterByLevel(logger, "other"); err == nil { - t.Fatal(err) + + buf.Reset() + + logger2 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger3 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) } } From d0cae7b6edb4896390fd5ff82aea1bac98994bd0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 16 May 2017 11:51:29 +0200 Subject: [PATCH 157/515] [log] change helper func --- log/filter.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/log/filter.go b/log/filter.go index 9633d88a8..451088838 100644 --- a/log/filter.go +++ b/log/filter.go @@ -17,21 +17,21 @@ func NewFilter(next Logger, options ...Option) Logger { return l } -// NewFilterByLevel wraps next and implements filtering based on a given level. -// Error is returned if level is not info, error or debug. -func NewFilterByLevel(next Logger, lvl string) (Logger, error) { - var option Option +// AllowLevel returns an option for the given level or error if no option exist +// for such level. +func AllowLevel(lvl string) (Option, error) { switch lvl { - case "info": - option = AllowInfo() case "debug": - option = AllowDebug() + return AllowDebug(), nil + case "info": + return AllowInfo(), nil case "error": - option = AllowError() + return AllowError(), nil + case "none": + return AllowNone(), nil default: - return nil, fmt.Errorf("Expected either \"info\", \"debug\" or \"error\" log level, given %v", lvl) + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) } - return NewFilter(next, option), nil } type filter struct { From 8af1c70a8be17543eb33e9bfbbcdd8371e3201cc Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 17 May 2017 12:03:26 +0200 Subject: [PATCH 158/515] Renamed --debug to --trace, used for light-client and basecoin --- cli/setup.go | 6 +++--- cli/setup_test.go | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 7a4a2098e..21b29a491 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -16,7 +16,7 @@ import ( const ( RootFlag = "root" HomeFlag = "home" - DebugFlag = "debug" + TraceFlag = "trace" OutputFlag = "output" EncodingFlag = "encoding" ) @@ -36,7 +36,7 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) Executor { // also, default must be empty, so we can detect this unset and fall back // to --root / TM_ROOT / TMROOT cmd.PersistentFlags().String(HomeFlag, "", "root directory for config and data") - cmd.PersistentFlags().Bool(DebugFlag, false, "print out full stack trace on errors") + cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) return Executor{cmd} } @@ -92,7 +92,7 @@ func (e Executor) Execute() error { err := e.Command.Execute() if err != nil { // TODO: something cooler with log-levels - if viper.GetBool(DebugFlag) { + if viper.GetBool(TraceFlag) { fmt.Printf("ERROR: %+v\n", err) } else { fmt.Println("ERROR:", err.Error()) diff --git a/cli/setup_test.go b/cli/setup_test.go index 6396b769e..8fb4ce140 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -184,7 +184,7 @@ func TestSetupUnmarshal(t *testing.T) { } } -func TestSetupDebug(t *testing.T) { +func TestSetupTrace(t *testing.T) { assert, require := assert.New(t), require.New(t) cases := []struct { @@ -193,22 +193,22 @@ func TestSetupDebug(t *testing.T) { long bool expected string }{ - {nil, nil, false, "Debug flag = false"}, - {[]string{"--debug"}, nil, true, "Debug flag = true"}, + {nil, nil, false, "Trace flag = false"}, + {[]string{"--trace"}, nil, true, "Trace flag = true"}, {[]string{"--no-such-flag"}, nil, false, "unknown flag: --no-such-flag"}, - {nil, map[string]string{"DBG_DEBUG": "true"}, true, "Debug flag = true"}, + {nil, map[string]string{"DBG_TRACE": "true"}, true, "Trace flag = true"}, } for idx, tc := range cases { i := strconv.Itoa(idx) // test command that store value of foobar in local variable - debug := &cobra.Command{ - Use: "debug", + trace := &cobra.Command{ + Use: "trace", RunE: func(cmd *cobra.Command, args []string) error { - return errors.Errorf("Debug flag = %t", viper.GetBool(DebugFlag)) + return errors.Errorf("Trace flag = %t", viper.GetBool(TraceFlag)) }, } - cmd := PrepareBaseCmd(debug, "DBG", "/qwerty/asdfgh") // some missing dir.. + cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. viper.Reset() args := append([]string{cmd.Use}, tc.args...) @@ -219,7 +219,7 @@ func TestSetupDebug(t *testing.T) { assert.Equal(desired, msg[0], i) if tc.long && assert.True(len(msg) > 2, i) { // the next line starts the stack trace... - assert.Contains(msg[1], "TestSetupDebug", i) + assert.Contains(msg[1], "TestSetupTrace", i) assert.Contains(msg[2], "setup_test.go", i) } } From 2733f5a7381b17c6227c83f33bd23959076ed7e3 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 May 2017 11:27:26 +0200 Subject: [PATCH 159/515] CHANGELOG: update release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34eeffc05..ab193688f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 0.2.0 (May 15, 2017) +## 0.2.0 (May 18, 2017) BREAKING CHANGES: From 5032b224bce25c2ecc630cd56833bab45795fb37 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 23 May 2017 00:43:12 +0200 Subject: [PATCH 160/515] copy log level parsing from tendemint API change due to me not wanting `flags` package to depend on tendermint's config package. Refs https://github.com/tendermint/tendermint/issues/504 --- cli/flags/log_level.go | 86 +++++++++++++++++++++++++++++++++++++ cli/flags/log_level_test.go | 68 +++++++++++++++++++++++++++++ 2 files changed, 154 insertions(+) create mode 100644 cli/flags/log_level.go create mode 100644 cli/flags/log_level_test.go diff --git a/cli/flags/log_level.go b/cli/flags/log_level.go new file mode 100644 index 000000000..ee4825cf7 --- /dev/null +++ b/cli/flags/log_level.go @@ -0,0 +1,86 @@ +package flags + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/tendermint/tmlibs/log" +) + +const ( + defaultLogLevelKey = "*" +) + +// ParseLogLevel parses complex log level - comma-separated +// list of module:level pairs with an optional *:level pair (* means +// all other modules). +// +// Example: +// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") +func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) { + if lvl == "" { + return nil, errors.New("Empty log level") + } + + l := lvl + + // prefix simple one word levels (e.g. "info") with "*" + if !strings.Contains(l, ":") { + l = defaultLogLevelKey + ":" + l + } + + options := make([]log.Option, 0) + + isDefaultLogLevelSet := false + var option log.Option + var err error + + list := strings.Split(l, ",") + for _, item := range list { + moduleAndLevel := strings.Split(item, ":") + + if len(moduleAndLevel) != 2 { + return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list) + } + + module := moduleAndLevel[0] + level := moduleAndLevel[1] + + if module == defaultLogLevelKey { + option, err = log.AllowLevel(level) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l)) + } + options = append(options, option) + isDefaultLogLevelSet = true + } else { + switch level { + case "debug": + option = log.AllowDebugWith("module", module) + case "info": + option = log.AllowInfoWith("module", module) + case "error": + option = log.AllowErrorWith("module", module) + case "none": + option = log.AllowNoneWith("module", module) + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list) + } + options = append(options, option) + + } + } + + // if "*" is not provided, set default global level + if !isDefaultLogLevelSet { + option, err = log.AllowLevel(defaultLogLevelValue) + if err != nil { + return nil, err + } + options = append(options, option) + } + + return log.NewFilter(logger, options...), nil +} diff --git a/cli/flags/log_level_test.go b/cli/flags/log_level_test.go new file mode 100644 index 000000000..027249145 --- /dev/null +++ b/cli/flags/log_level_test.go @@ -0,0 +1,68 @@ +package flags_test + +import ( + "bytes" + "strings" + "testing" + + tmflags "github.com/tendermint/tmlibs/cli/flags" + "github.com/tendermint/tmlibs/log" +) + +const ( + defaultLogLevelValue = "info" +) + +func TestParseLogLevel(t *testing.T) { + var buf bytes.Buffer + jsonLogger := log.NewTMJSONLogger(&buf) + + correctLogLevels := []struct { + lvl string + expectedLogLines []string + }{ + {"mempool:error", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}}, + {"mempool:error,*:debug", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}}, + {"*:debug,wire:none", []string{ + `{"_msg":"Kingpin","level":"debug","module":"mempool"}`, + `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`}}, + } + + for _, c := range correctLogLevels { + logger, err := tmflags.ParseLogLevel(c.lvl, jsonLogger, defaultLogLevelValue) + if err != nil { + t.Fatal(err) + } + + logger = logger.With("module", "mempool") + + buf.Reset() + + logger.Debug("Kingpin") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl) + } + + buf.Reset() + + logger.Info("Kitty Pryde") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl) + } + + buf.Reset() + + logger.Error("Mesmero") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl) + } + } + + incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"} + for _, lvl := range incorrectLogLevel { + if _, err := tmflags.ParseLogLevel(lvl, jsonLogger, defaultLogLevelValue); err == nil { + t.Fatalf("Expected %s to produce error", lvl) + } + } +} From 6b1043246380a5c543b8aba4b9ea11da8550cb25 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 23 May 2017 15:19:48 +0200 Subject: [PATCH 161/515] [flowrate] refactor clock functions (Refs #16) this commit does not fix the original bug --- flowrate/util.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flowrate/util.go b/flowrate/util.go index 4caac583f..b33ddc701 100644 --- a/flowrate/util.go +++ b/flowrate/util.go @@ -15,16 +15,16 @@ const clockRate = 20 * time.Millisecond // czero is the process start time rounded down to the nearest clockRate // increment. -var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate +var czero = time.Now().Round(clockRate) // clock returns a low resolution timestamp relative to the process start time. func clock() time.Duration { - return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero + return time.Now().Round(clockRate).Sub(czero) } // clockToTime converts a clock() timestamp to an absolute time.Time value. func clockToTime(c time.Duration) time.Time { - return time.Unix(0, int64(czero+c)) + return czero.Add(c) } // clockRound returns d rounded to the nearest clockRate increment. From b5c57967b71ed5c3c4667687df2a1e91317439b7 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 23 May 2017 15:24:00 +0200 Subject: [PATCH 162/515] [flowrate] improve error formatting (Refs #16) --- flowrate/io_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flowrate/io_test.go b/flowrate/io_test.go index fa7f4b4ae..f4049ed2a 100644 --- a/flowrate/io_test.go +++ b/flowrate/io_test.go @@ -91,7 +91,7 @@ func TestReader(t *testing.T) { } for i, s := range status { if !reflect.DeepEqual(&s, &want[i]) { - t.Errorf("r.Status(%v) expected %v; got %v", i, want[i], s) + t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) } } if !bytes.Equal(b[:20], in[:20]) { @@ -137,7 +137,7 @@ func TestWriter(t *testing.T) { } for i, s := range status { if !reflect.DeepEqual(&s, &want[i]) { - t.Errorf("w.Status(%v) expected %v; got %v", i, want[i], s) + t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) } } if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { From de02488778f3636ded32ca10e4ed9afbe8616481 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Tue, 23 May 2017 23:04:33 +0200 Subject: [PATCH 163/515] Enhance the tests to make it clearer how these levels work together --- cli/flags/log_level_test.go | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/cli/flags/log_level_test.go b/cli/flags/log_level_test.go index 027249145..91f49b69c 100644 --- a/cli/flags/log_level_test.go +++ b/cli/flags/log_level_test.go @@ -21,12 +21,22 @@ func TestParseLogLevel(t *testing.T) { lvl string expectedLogLines []string }{ - {"mempool:error", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}}, - {"mempool:error,*:debug", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}}, + {"mempool:error", []string{ + ``, // if no default is given, assume info + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`}}, // if no default is given, assume info + {"mempool:error,*:debug", []string{ + `{"_msg":"Kingpin","level":"debug","module":"wire"}`, + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`}}, + {"*:debug,wire:none", []string{ - `{"_msg":"Kingpin","level":"debug","module":"mempool"}`, + ``, `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, - `{"_msg":"Mesmero","level":"error","module":"mempool"}`}}, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`}}, } for _, c := range correctLogLevels { @@ -35,28 +45,36 @@ func TestParseLogLevel(t *testing.T) { t.Fatal(err) } - logger = logger.With("module", "mempool") + logger = logger buf.Reset() - logger.Debug("Kingpin") + logger.With("module", "wire").Debug("Kingpin") if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have { t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl) } buf.Reset() - logger.Info("Kitty Pryde") + logger.With("module", "mempool").Info("Kitty Pryde") if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have { t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl) } buf.Reset() - logger.Error("Mesmero") + logger.With("module", "mempool").Error("Mesmero") if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have { t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl) } + + buf.Reset() + + logger.With("module", "state").Info("Mind") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[3] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[3], have, c.lvl) + } + } incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"} From ddaa4d9b4cdb7c3090c780ede636b496390a073a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 23 May 2017 17:37:13 +0200 Subject: [PATCH 164/515] [log] tracing logger --- log/tm_logger.go | 2 +- log/tracing_logger.go | 76 ++++++++++++++++++++++++++++++++++++++ log/tracing_logger_test.go | 42 +++++++++++++++++++++ 3 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 log/tracing_logger.go create mode 100644 log/tracing_logger_test.go diff --git a/log/tm_logger.go b/log/tm_logger.go index ae8f88c6a..a903dbe8d 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -43,7 +43,7 @@ func NewTMLogger(w io.Writer) Logger { return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} } -// NewTMLoggerWithColorFn allow you to provide your own color function. See +// NewTMLoggerWithColorFn allows you to provide your own color function. See // NewTMLogger for documentation. func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} diff --git a/log/tracing_logger.go b/log/tracing_logger.go new file mode 100644 index 000000000..794bdaeb8 --- /dev/null +++ b/log/tracing_logger.go @@ -0,0 +1,76 @@ +package log + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// NewTracingLogger enables tracing by wrapping all errors (if they +// implement stackTracer interface) in tracedError. +// +// All errors returned by https://github.com/pkg/errors implement stackTracer +// interface. +// +// For debugging purposes only as it doubles the amount of allocations. +func NewTracingLogger(next Logger) Logger { + return &tracingLogger{ + next: next, + } +} + +type stackTracer interface { + error + StackTrace() errors.StackTrace +} + +type tracingLogger struct { + next Logger +} + +func (l *tracingLogger) Info(msg string, keyvals ...interface{}) error { + return l.next.Info(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) error { + return l.next.Debug(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Error(msg string, keyvals ...interface{}) error { + return l.next.Error(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) With(keyvals ...interface{}) Logger { + return &tracingLogger{next: l.next.With(formatErrors(keyvals)...)} +} + +func formatErrors(keyvals []interface{}) []interface{} { + newKeyvals := make([]interface{}, len(keyvals)) + copy(newKeyvals, keyvals) + for i := 0; i < len(newKeyvals)-1; i += 2 { + if err, ok := newKeyvals[i+1].(stackTracer); ok { + newKeyvals[i+1] = tracedError{err} + } + } + return newKeyvals +} + +// tracedError wraps a stackTracer and just makes the Error() result +// always return a full stack trace. +type tracedError struct { + wrapped stackTracer +} + +var _ stackTracer = tracedError{} + +func (t tracedError) StackTrace() errors.StackTrace { + return t.wrapped.StackTrace() +} + +func (t tracedError) Cause() error { + return t.wrapped +} + +func (t tracedError) Error() string { + return fmt.Sprintf("%+v", t.wrapped) +} diff --git a/log/tracing_logger_test.go b/log/tracing_logger_test.go new file mode 100644 index 000000000..584b34bef --- /dev/null +++ b/log/tracing_logger_test.go @@ -0,0 +1,42 @@ +package log_test + +import ( + "bytes" + stderr "errors" + "fmt" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/tendermint/tmlibs/log" +) + +func TestTracingLogger(t *testing.T) { + var buf bytes.Buffer + + var logger log.Logger + logger = log.NewTMJSONLogger(&buf) + + logger1 := log.NewTracingLogger(logger) + err1 := errors.New("Courage is grace under pressure.") + err2 := errors.New("It does not matter how slowly you go, so long as you do not stop.") + logger1.With("err1", err1).Info("foo", "err2", err2) + have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1) + if want := strings.Replace(strings.Replace(`{"_msg":"foo","err1":"`+fmt.Sprintf("%+v", err1)+`","err2":"`+fmt.Sprintf("%+v", err2)+`","level":"info"}`, "\t", "", -1), "\n", "", -1); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("err1", stderr.New("Opportunities don't happen. You create them.")).Info("foo", "err2", stderr.New("Once you choose hope, anything's possible.")) + if want, have := `{"_msg":"foo","err1":"Opportunities don't happen. You create them.","err2":"Once you choose hope, anything's possible.","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} From b36203bb02857ce7c2dac66244ec9a2760c650d9 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 23 May 2017 23:27:26 +0200 Subject: [PATCH 165/515] [cli] add a test case to TestParseLogLevel where there is no module key --- cli/flags/log_level_test.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/cli/flags/log_level_test.go b/cli/flags/log_level_test.go index 91f49b69c..458a9e24d 100644 --- a/cli/flags/log_level_test.go +++ b/cli/flags/log_level_test.go @@ -25,18 +25,22 @@ func TestParseLogLevel(t *testing.T) { ``, // if no default is given, assume info ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`, - `{"_msg":"Mind","level":"info","module":"state"}`}}, // if no default is given, assume info + `{"_msg":"Mind","level":"info","module":"state"}`, // if no default is given, assume info + ``}}, + {"mempool:error,*:debug", []string{ `{"_msg":"Kingpin","level":"debug","module":"wire"}`, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`, - `{"_msg":"Mind","level":"info","module":"state"}`}}, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, {"*:debug,wire:none", []string{ ``, `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, `{"_msg":"Mesmero","level":"error","module":"mempool"}`, - `{"_msg":"Mind","level":"info","module":"state"}`}}, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, } for _, c := range correctLogLevels { @@ -75,6 +79,12 @@ func TestParseLogLevel(t *testing.T) { t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[3], have, c.lvl) } + buf.Reset() + + logger.Debug("Gideon") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[4] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[4], have, c.lvl) + } } incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"} From 5f20b3323e6afa49eb7c681a953611af08f43206 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 25 May 2017 13:06:42 +0200 Subject: [PATCH 166/515] don't do DeepEqual, compare ranges for durations and rates (Refs #16) --- flowrate/io_test.go | 49 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/flowrate/io_test.go b/flowrate/io_test.go index f4049ed2a..6d4934a8a 100644 --- a/flowrate/io_test.go +++ b/flowrate/io_test.go @@ -6,7 +6,6 @@ package flowrate import ( "bytes" - "reflect" "testing" "time" ) @@ -90,7 +89,7 @@ func TestReader(t *testing.T) { Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, } for i, s := range status { - if !reflect.DeepEqual(&s, &want[i]) { + if !statusesAreEqual(&s, &want[i]) { t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) } } @@ -136,7 +135,7 @@ func TestWriter(t *testing.T) { Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, } for i, s := range status { - if !reflect.DeepEqual(&s, &want[i]) { + if !statusesAreEqual(&s, &want[i]) { t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) } } @@ -144,3 +143,47 @@ func TestWriter(t *testing.T) { t.Errorf("w.Write() input doesn't match output") } } + +const maxDeviationForDuration = 50 * time.Millisecond +const maxDeviationForRate int64 = 50 + +// statusesAreEqual returns true if s1 is equal to s2. Equality here means +// general equality of fields except for the duration and rates, which can +// drift due to unpredictable delays (e.g. thread wakes up 25ms after +// `time.Sleep` has ended). +func statusesAreEqual(s1 *Status, s2 *Status) bool { + if s1.Active == s2.Active && + s1.Start == s2.Start && + durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && + s1.Idle == s2.Idle && + s1.Bytes == s2.Bytes && + s1.Samples == s2.Samples && + ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && + ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && + ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && + ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && + s1.BytesRem == s2.BytesRem && + durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && + s1.Progress == s2.Progress { + return true + } + return false +} + +func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { + if d2-d1 <= maxDeviation { + return true + } + return false +} + +func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { + sub := r1 - r2 + if sub < 0 { + sub = -sub + } + if sub <= maxDeviation { + return true + } + return false +} From 4ef77c008ced1f9518bffb2631a692230dd4f920 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 2 Jun 2017 11:55:43 +0300 Subject: [PATCH 167/515] update changelog --- CHANGELOG.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab193688f..a97aa1285 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 0.2.1 (June 2, 2017) + +FEATURES: + +- [cli] Log level parsing moved here from tendermint repo + ## 0.2.0 (May 18, 2017) BREAKING CHANGES: @@ -14,7 +20,7 @@ FEATURES: BUG FIXES: -- [autofile] Close file before rotating +- [autofile] Close file before rotating ## 0.1.0 (May 1, 2017) From 295f6c2cc60f290e4e7968a9d0a265de010d9238 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Mon, 5 Jun 2017 15:50:11 -0400 Subject: [PATCH 168/515] IsHex and StripHex --- common/string.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/common/string.go b/common/string.go index a4d221b74..f053f0e87 100644 --- a/common/string.go +++ b/common/string.go @@ -1,6 +1,7 @@ package common import ( + "encoding/hex" "fmt" "strings" ) @@ -22,3 +23,22 @@ func LeftPadString(s string, totalLength int) string { } return s } + +// Returns true for non-empty hex-string prefixed with "0x" +func IsHex(s string) bool { + if len(s) > 2 && s[:2] == "0x" { + _, err := hex.DecodeString(s[2:]) + if err != nil { + return false + } + return true + } + return false +} + +func StripHex(s string) string { + if IsHex(s) { + return s[2:] + } + return s +} From 925f2b33504bc2f19932be1d474ade0c33619c96 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Mon, 5 Jun 2017 16:22:01 -0400 Subject: [PATCH 169/515] golint corrections --- common/string.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/common/string.go b/common/string.go index f053f0e87..2818f5ed5 100644 --- a/common/string.go +++ b/common/string.go @@ -6,8 +6,10 @@ import ( "strings" ) +// Fmt shorthand, XXX DEPRECATED var Fmt = fmt.Sprintf +// RightPadString adds spaces to the right of a string to make it length totalLength func RightPadString(s string, totalLength int) string { remaining := totalLength - len(s) if remaining > 0 { @@ -16,6 +18,7 @@ func RightPadString(s string, totalLength int) string { return s } +// LeftPadString adds spaces to the left of a string to make it length totalLength func LeftPadString(s string, totalLength int) string { remaining := totalLength - len(s) if remaining > 0 { @@ -24,7 +27,7 @@ func LeftPadString(s string, totalLength int) string { return s } -// Returns true for non-empty hex-string prefixed with "0x" +// IsHex returns true for non-empty hex-string prefixed with "0x" func IsHex(s string) bool { if len(s) > 2 && s[:2] == "0x" { _, err := hex.DecodeString(s[2:]) @@ -36,6 +39,7 @@ func IsHex(s string) bool { return false } +// StripHex returns hex string without leading "0x" func StripHex(s string) string { if IsHex(s) { return s[2:] From 304be4ec2fe153fac1a4a240c40f368c4763d760 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Tue, 6 Jun 2017 04:00:36 -0400 Subject: [PATCH 170/515] date parse functionality --- common/date.go | 67 +++++++++++++++++++++++++++++++++++++++ common/date_test.go | 76 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 common/date.go create mode 100644 common/date_test.go diff --git a/common/date.go b/common/date.go new file mode 100644 index 000000000..05f207f7c --- /dev/null +++ b/common/date.go @@ -0,0 +1,67 @@ +package common + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" +) + +// ParseDate parses a date string of the format YYYY-MM-DD +func ParseDate(date string) (t time.Time, err error) { + + //get the time of invoice + str := strings.Split(date, "-") + var ymd = []int{} + for _, i := range str { + j, err := strconv.Atoi(i) + if err != nil { + return t, err + } + ymd = append(ymd, j) + } + if len(ymd) != 3 { + return t, fmt.Errorf("Bad date parsing, not 3 segments") //never stack trace + } + if ymd[1] < 1 || ymd[1] > 12 { + return t, fmt.Errorf("Month not between 1 and 12") //never stack trace + } + if ymd[2] > 31 { + return t, fmt.Errorf("Day over 31") //never stack trace + } + + t = time.Date(ymd[0], time.Month(ymd[1]), ymd[2], 0, 0, 0, 0, time.UTC) + + return t, nil +} + +// ParseDateRange parses a date range string of the format start:end +// where the start and end date are of the format YYYY-MM-DD. +// The parsed dates are *time.Time and will return nil pointers for +// unbounded dates, ex: +// unbounded start: :2000-12-31 +// unbounded end: 2000-12-31: +func ParseDateRange(dateRange string) (startDate, endDate *time.Time, err error) { + dates := strings.Split(dateRange, ":") + if len(dates) != 2 { + return nil, nil, errors.New("bad date range, must be in format date:date") + } + parseDate := func(date string) (*time.Time, error) { + if len(date) == 0 { + return nil, nil + } + d, err := ParseDate(date) + return &d, err + } + startDate, err = parseDate(dates[0]) + if err != nil { + return nil, nil, err + } + endDate, err = parseDate(dates[1]) + if err != nil { + return nil, nil, err + } + return +} diff --git a/common/date_test.go b/common/date_test.go new file mode 100644 index 000000000..42fd91aa3 --- /dev/null +++ b/common/date_test.go @@ -0,0 +1,76 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC) +) + +func TestParseDate(t *testing.T) { + assert := assert.New(t) + + var testDates = []struct { + dateStr string + date time.Time + errNil bool + }{ + {"2015-12-31", date, true}, + {"2015-31-12", date, false}, + {"12-31-2015", date, false}, + {"31-12-2015", date, false}, + } + + for _, test := range testDates { + parsed, err := ParseDate(test.dateStr) + switch test.errNil { + case true: + assert.Nil(err) + assert.True(parsed.Equal(test.date), "parsed: %v, want %v", parsed, test.date) + case false: + assert.NotNil(err, "parsed %v, expected err %v", parsed, err) + } + } +} + +func TestParseDateRange(t *testing.T) { + assert := assert.New(t) + + var testDates = []struct { + dateStr string + start *time.Time + end *time.Time + errNil bool + }{ + {"2015-12-31:2016-12-31", &date, &date2, true}, + {"2015-12-31:", &date, nil, true}, + {":2016-12-31", nil, &date2, true}, + {"2016-12-31", nil, nil, false}, + {"2016-31-12:", nil, nil, false}, + {":2016-31-12", nil, nil, false}, + } + + for _, test := range testDates { + start, end, err := ParseDateRange(test.dateStr) + switch test.errNil { + case true: + assert.Nil(err) + testPtr := func(want, have *time.Time) { + if want == nil { + assert.Nil(have) + } else { + assert.True((*have).Equal(*want)) + } + } + testPtr(test.start, start) + testPtr(test.end, end) + case false: + assert.NotNil(err) + } + } +} From f913ed8134448f65f572014b90ca78b24c12c03a Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Tue, 6 Jun 2017 16:08:57 -0400 Subject: [PATCH 171/515] date simplify --- common/date.go | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/common/date.go b/common/date.go index 05f207f7c..7b7b05b22 100644 --- a/common/date.go +++ b/common/date.go @@ -1,8 +1,6 @@ package common import ( - "fmt" - "strconv" "strings" "time" @@ -11,30 +9,8 @@ import ( // ParseDate parses a date string of the format YYYY-MM-DD func ParseDate(date string) (t time.Time, err error) { - - //get the time of invoice - str := strings.Split(date, "-") - var ymd = []int{} - for _, i := range str { - j, err := strconv.Atoi(i) - if err != nil { - return t, err - } - ymd = append(ymd, j) - } - if len(ymd) != 3 { - return t, fmt.Errorf("Bad date parsing, not 3 segments") //never stack trace - } - if ymd[1] < 1 || ymd[1] > 12 { - return t, fmt.Errorf("Month not between 1 and 12") //never stack trace - } - if ymd[2] > 31 { - return t, fmt.Errorf("Day over 31") //never stack trace - } - - t = time.Date(ymd[0], time.Month(ymd[1]), ymd[2], 0, 0, 0, 0, time.UTC) - - return t, nil + layout := "2006-01-02" //this represents YYYY-MM-DD + return time.Parse(layout, date) } // ParseDateRange parses a date range string of the format start:end From c76dca0456bb38c16c10bc69088d0bf9ace157c3 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Fri, 26 May 2017 13:48:37 -0400 Subject: [PATCH 172/515] cli stderr output for Execute --- cli/setup.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 21b29a491..30fb5e78e 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -93,9 +93,9 @@ func (e Executor) Execute() error { if err != nil { // TODO: something cooler with log-levels if viper.GetBool(TraceFlag) { - fmt.Printf("ERROR: %+v\n", err) + fmt.Fprintf(os.Stderr, "ERROR: %+v\n", err) } else { - fmt.Println("ERROR:", err.Error()) + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) } } return err From 33d0dd0bfc3451643a78eb267faafd704c224a79 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Tue, 6 Jun 2017 18:36:28 -0400 Subject: [PATCH 173/515] add stderr to tests --- .gitignore | 1 + cli/helper.go | 33 ++++++++++++++++++++++----------- cli/setup_test.go | 6 ++++-- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index 62f28681c..6e0986855 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ +*.swp vendor .glide diff --git a/cli/helper.go b/cli/helper.go index 79654bc34..959f8a5f6 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -57,28 +57,39 @@ func RunWithArgs(cmd Executable, args []string, env map[string]string) error { // RunCaptureWithArgs executes the given command with the specified command line args // and environmental variables set. It returns whatever was writen to // stdout along with any error returned from cmd.Execute() -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) { - old := os.Stdout // keep backup of the real stdout - r, w, _ := os.Pipe() - os.Stdout = w +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { + oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout + rOut, wOut, _ := os.Pipe() + rErr, wErr, _ := os.Pipe() + os.Stdout, os.Stderr = wOut, wErr defer func() { - os.Stdout = old // restoring the real stdout + os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout }() - outC := make(chan string) // copy the output in a separate goroutine so printing can't block indefinitely + outC := make(chan string) go func() { var buf bytes.Buffer - // io.Copy will end when we call w.Close() below - io.Copy(&buf, r) + // io.Copy will end when we call wOut.Close() below + io.Copy(&buf, rOut) outC <- buf.String() }() + errC := make(chan string) + go func() { + var buf bytes.Buffer + // io.Copy will end when we call wErr.Close() below + io.Copy(&buf, rErr) + errC <- buf.String() + }() + // now run the command err = RunWithArgs(cmd, args, env) // and grab the stdout to return - w.Close() - output = <-outC - return output, err + wOut.Close() + wErr.Close() + stdout = <-outC + stderr = <-errC + return stdout, stderr, err } diff --git a/cli/setup_test.go b/cli/setup_test.go index 8fb4ce140..791bc7993 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -212,9 +212,11 @@ func TestSetupTrace(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) - out, err := RunCaptureWithArgs(cmd, args, tc.env) + stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) require.NotNil(err, i) - msg := strings.Split(out, "\n") + require.Equal("", stdout, i) + require.NotEqual("", stderr, i) + msg := strings.Split(stderr, "\n") desired := fmt.Sprintf("ERROR: %s", tc.expected) assert.Equal(desired, msg[0], i) if tc.long && assert.True(len(msg) > 2, i) { From 94c0172618a5d2d856edd2568034dd6b3b69b6c0 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Wed, 7 Jun 2017 05:05:11 -0400 Subject: [PATCH 174/515] doc update --- cli/helper.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cli/helper.go b/cli/helper.go index 959f8a5f6..12f94ec72 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -54,9 +54,10 @@ func RunWithArgs(cmd Executable, args []string, env map[string]string) error { return cmd.Execute() } -// RunCaptureWithArgs executes the given command with the specified command line args -// and environmental variables set. It returns whatever was writen to -// stdout along with any error returned from cmd.Execute() +// RunCaptureWithArgs executes the given command with the specified command +// line args and environmental variables set. It returns string fields +// representing output written to stdout and stderr, additionally any error +// from cmd.Execute() is also returned func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout rOut, wOut, _ := os.Pipe() From 0ecb38c6da95a1e8f60117b2bd4a6f76c7a0f944 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 14 Jun 2017 16:51:33 +0200 Subject: [PATCH 175/515] Return exit code on error, disable in tests --- cli/setup.go | 16 ++++++++++++++-- cli/setup_test.go | 4 ++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 21b29a491..4a64f00fd 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -38,7 +38,7 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) Executor { cmd.PersistentFlags().String(HomeFlag, "", "root directory for config and data") cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) - return Executor{cmd} + return Executor{cmd, os.Exit} } // PrepareMainCmd is meant for client side libs that want some more flags @@ -82,6 +82,11 @@ func copyEnvVars(prefix string) { // Executor wraps the cobra Command with a nicer Execute method type Executor struct { *cobra.Command + Exit func(int) // this is os.Exit by default, override in tests +} + +type ExitCoder interface { + ExitCode() int } // execute adds all child commands to the root command sets flags appropriately. @@ -91,12 +96,19 @@ func (e Executor) Execute() error { e.SilenceErrors = true err := e.Command.Execute() if err != nil { - // TODO: something cooler with log-levels if viper.GetBool(TraceFlag) { fmt.Printf("ERROR: %+v\n", err) } else { fmt.Println("ERROR:", err.Error()) } + + fmt.Printf("%#v\n", e) + // return error code 1 by default, can override it with a special error type + exitCode := 1 + if ec, ok := err.(ExitCoder); ok { + exitCode = ec.ExitCode() + } + e.Exit(exitCode) } return err } diff --git a/cli/setup_test.go b/cli/setup_test.go index 8fb4ce140..538797c9e 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -46,6 +46,7 @@ func TestSetupEnv(t *testing.T) { } demo.Flags().String("foobar", "", "Some test value from config") cmd := PrepareBaseCmd(demo, "DEMO", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) @@ -98,6 +99,7 @@ func TestSetupConfig(t *testing.T) { } boo.Flags().String("boo", "", "Some test value from config") cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) @@ -175,6 +177,7 @@ func TestSetupUnmarshal(t *testing.T) { // from the default config here marsh.Flags().Int("age", base.Age, "Some test value from config") cmd := PrepareBaseCmd(marsh, "MR", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) @@ -209,6 +212,7 @@ func TestSetupTrace(t *testing.T) { }, } cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) From 59a77e7bef092eef0e1f9b44c983dc9e35eed0d6 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 14 Jun 2017 17:01:15 +0200 Subject: [PATCH 176/515] Remove Printf --- cli/setup.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cli/setup.go b/cli/setup.go index 4a64f00fd..148ac7b88 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -102,7 +102,6 @@ func (e Executor) Execute() error { fmt.Println("ERROR:", err.Error()) } - fmt.Printf("%#v\n", e) // return error code 1 by default, can override it with a special error type exitCode := 1 if ec, ok := err.(ExitCoder); ok { From 3400cee845a48508198845b3c227e6e1577c32a5 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Thu, 15 Jun 2017 20:16:22 +0200 Subject: [PATCH 177/515] Handle --two-words as TMTWO_WORDS env var --- cli/setup.go | 2 +- cli/setup_test.go | 35 +++++++++++++++++++++-------------- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 148ac7b88..35362ed81 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -58,7 +58,7 @@ func initEnv(prefix string) { // env variables with TM prefix (eg. TM_ROOT) viper.SetEnvPrefix(prefix) - viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) viper.AutomaticEnv() } diff --git a/cli/setup_test.go b/cli/setup_test.go index 538797c9e..36cbbcc90 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -64,40 +64,46 @@ func TestSetupConfig(t *testing.T) { cval1, cval2 := "fubble", "wubble" conf1, err := WriteDemoConfig(map[string]string{"boo": cval1}) require.Nil(err) - // even with some ignored fields, should be no problem - conf2, err := WriteDemoConfig(map[string]string{"boo": cval2, "foo": "bar"}) + // make sure it handles dashed-words in the config, and ignores random info + conf2, err := WriteDemoConfig(map[string]string{"boo": cval2, "foo": "bar", "two-words": "WORD"}) require.Nil(err) cases := []struct { - args []string - env map[string]string - expected string + args []string + env map[string]string + expected string + expectedTwo string }{ - {nil, nil, ""}, + {nil, nil, "", ""}, // setting on the command line - {[]string{"--boo", "haha"}, nil, "haha"}, - {[]string{"--root", conf1}, nil, cval1}, + {[]string{"--boo", "haha"}, nil, "haha", ""}, + {[]string{"--two-words", "rocks"}, nil, "", "rocks"}, + {[]string{"--root", conf1}, nil, cval1, ""}, // test both variants of the prefix - {nil, map[string]string{"RD_BOO": "bang"}, "bang"}, - {nil, map[string]string{"RD_ROOT": conf1}, cval1}, - {nil, map[string]string{"RDROOT": conf2}, cval2}, - {nil, map[string]string{"RDHOME": conf1}, cval1}, + {nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, + {nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RD_ROOT": conf1}, cval1, ""}, + {nil, map[string]string{"RDROOT": conf2}, cval2, "WORD"}, + {nil, map[string]string{"RDHOME": conf1}, cval1, ""}, // and when both are set??? HOME wins every time! - {[]string{"--root", conf1}, map[string]string{"RDHOME": conf2}, cval2}, + {[]string{"--root", conf1}, map[string]string{"RDHOME": conf2}, cval2, "WORD"}, } for idx, tc := range cases { i := strconv.Itoa(idx) // test command that store value of foobar in local variable - var foo string + var foo, two string boo := &cobra.Command{ Use: "reader", RunE: func(cmd *cobra.Command, args []string) error { foo = viper.GetString("boo") + two = viper.GetString("two-words") return nil }, } boo.Flags().String("boo", "", "Some test value from config") + boo.Flags().String("two-words", "", "Check out env handling -") cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... cmd.Exit = func(int) {} @@ -106,6 +112,7 @@ func TestSetupConfig(t *testing.T) { err := RunWithArgs(cmd, args, tc.env) require.Nil(err, i) assert.Equal(tc.expected, foo, i) + assert.Equal(tc.expectedTwo, two, i) } } From bd9d0d1637dadf1330e167189d5e5031aadcda6f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 16 Jun 2017 11:40:14 -0400 Subject: [PATCH 178/515] changelog and version --- CHANGELOG.md | 17 +++++++++++++++++ version/version.go | 3 +++ 2 files changed, 20 insertions(+) create mode 100644 version/version.go diff --git a/CHANGELOG.md b/CHANGELOG.md index a97aa1285..e6783601a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Changelog +## 0.2.2 (June 16, 2017) + +FEATURES: + +- [common] IsHex and StripHex for handling `0x` prefixed hex strings +- [log] NewTracingLogger returns a logger that output error traces, ala `github.com/pkg/errors` + +IMPROVEMENTS: + +- [cli] Error handling for tests +- [cli] Support dashes in ENV variables + +BUG FIXES: + +- [flowrate] Fix non-deterministic test failures + + ## 0.2.1 (June 2, 2017) FEATURES: diff --git a/version/version.go b/version/version.go new file mode 100644 index 000000000..42af8ff7a --- /dev/null +++ b/version/version.go @@ -0,0 +1,3 @@ +package version + +const Version = "0.2.2" From 0a3a08a3bc3d3397c259dc79f6e9bc15dac2bba2 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Sat, 17 Jun 2017 18:35:05 -0400 Subject: [PATCH 179/515] stderr PR revisions --- CHANGELOG.md | 6 ++++++ cli/helper.go | 27 ++++++++++++--------------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a97aa1285..c5b2a1551 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## Develop-Branch changes (unreleased) + +BREAKING CHANGES: + +- [run] NewBaseService takes the new logger + ## 0.2.1 (June 2, 2017) FEATURES: diff --git a/cli/helper.go b/cli/helper.go index 12f94ec72..8bdb9e496 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -68,21 +68,18 @@ func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (s }() // copy the output in a separate goroutine so printing can't block indefinitely - outC := make(chan string) - go func() { - var buf bytes.Buffer - // io.Copy will end when we call wOut.Close() below - io.Copy(&buf, rOut) - outC <- buf.String() - }() - - errC := make(chan string) - go func() { - var buf bytes.Buffer - // io.Copy will end when we call wErr.Close() below - io.Copy(&buf, rErr) - errC <- buf.String() - }() + copyStd := func(reader *os.File) *(chan string) { + stdC := make(chan string) + go func() { + var buf bytes.Buffer + // io.Copy will end when we call reader.Close() below + io.Copy(&buf, *reader) + stdC <- buf.String() + }() + return stdC + } + outC := copyStd(&rOut) + errC := copyStd(&rErr) // now run the command err = RunWithArgs(cmd, args, env) From 34bcb30f1cb9d14d141fe02be45ced9b082fd403 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Tue, 20 Jun 2017 16:40:32 -0400 Subject: [PATCH 180/515] changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5b2a1551..3db410cca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,14 @@ # Changelog + ## Develop-Branch changes (unreleased) BREAKING CHANGES: - [run] NewBaseService takes the new logger +- [cli] RunCaptureWithArgs now captures stderr and stdout + - +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) + - -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) ## 0.2.1 (June 2, 2017) From f3eaf9b8703db67ddae0c7979a1610d340d6d382 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Tue, 20 Jun 2017 16:52:22 -0400 Subject: [PATCH 181/515] quickfix --- cli/helper.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cli/helper.go b/cli/helper.go index 8bdb9e496..845c17dbf 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -73,13 +73,13 @@ func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (s go func() { var buf bytes.Buffer // io.Copy will end when we call reader.Close() below - io.Copy(&buf, *reader) + io.Copy(&buf, reader) stdC <- buf.String() }() - return stdC + return &stdC } - outC := copyStd(&rOut) - errC := copyStd(&rErr) + outC := copyStd(rOut) + errC := copyStd(rErr) // now run the command err = RunWithArgs(cmd, args, env) @@ -87,7 +87,7 @@ func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (s // and grab the stdout to return wOut.Close() wErr.Close() - stdout = <-outC - stderr = <-errC + stdout = <-*outC + stderr = <-*errC return stdout, stderr, err } From cc364b14e20965a82a59ba2061675eb98dd818c9 Mon Sep 17 00:00:00 2001 From: rigel rozanski Date: Tue, 20 Jun 2017 17:18:55 -0400 Subject: [PATCH 182/515] changelog and PR changes --- CHANGELOG.md | 7 ++++++ common/date.go | 28 +++++++++++------------ common/date_test.go | 56 +++++++++++---------------------------------- 3 files changed, 34 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a97aa1285..4261c466a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog + +## Develop-Branch changes (unreleased) + +FEATURES: + +- [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") + ## 0.2.1 (June 2, 2017) FEATURES: diff --git a/common/date.go b/common/date.go index 7b7b05b22..e017a4b41 100644 --- a/common/date.go +++ b/common/date.go @@ -7,37 +7,37 @@ import ( "github.com/pkg/errors" ) -// ParseDate parses a date string of the format YYYY-MM-DD -func ParseDate(date string) (t time.Time, err error) { - layout := "2006-01-02" //this represents YYYY-MM-DD - return time.Parse(layout, date) -} +// TimeLayout helps to parse a date string of the format YYYY-MM-DD +// Intended to be used with the following function: +// time.Parse(TimeLayout, date) +var TimeLayout = "2006-01-02" //this represents YYYY-MM-DD // ParseDateRange parses a date range string of the format start:end // where the start and end date are of the format YYYY-MM-DD. -// The parsed dates are *time.Time and will return nil pointers for +// The parsed dates are time.Time and will return the zero time for // unbounded dates, ex: // unbounded start: :2000-12-31 // unbounded end: 2000-12-31: -func ParseDateRange(dateRange string) (startDate, endDate *time.Time, err error) { +func ParseDateRange(dateRange string) (startDate, endDate time.Time, err error) { dates := strings.Split(dateRange, ":") if len(dates) != 2 { - return nil, nil, errors.New("bad date range, must be in format date:date") + err = errors.New("bad date range, must be in format date:date") + return } - parseDate := func(date string) (*time.Time, error) { + parseDate := func(date string) (out time.Time, err error) { if len(date) == 0 { - return nil, nil + return } - d, err := ParseDate(date) - return &d, err + out, err = time.Parse(TimeLayout, date) + return } startDate, err = parseDate(dates[0]) if err != nil { - return nil, nil, err + return } endDate, err = parseDate(dates[1]) if err != nil { - return nil, nil, err + return } return } diff --git a/common/date_test.go b/common/date_test.go index 42fd91aa3..2c0632477 100644 --- a/common/date_test.go +++ b/common/date_test.go @@ -10,66 +10,36 @@ import ( var ( date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC) date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + zero time.Time ) -func TestParseDate(t *testing.T) { - assert := assert.New(t) - - var testDates = []struct { - dateStr string - date time.Time - errNil bool - }{ - {"2015-12-31", date, true}, - {"2015-31-12", date, false}, - {"12-31-2015", date, false}, - {"31-12-2015", date, false}, - } - - for _, test := range testDates { - parsed, err := ParseDate(test.dateStr) - switch test.errNil { - case true: - assert.Nil(err) - assert.True(parsed.Equal(test.date), "parsed: %v, want %v", parsed, test.date) - case false: - assert.NotNil(err, "parsed %v, expected err %v", parsed, err) - } - } -} - func TestParseDateRange(t *testing.T) { assert := assert.New(t) var testDates = []struct { dateStr string - start *time.Time - end *time.Time + start time.Time + end time.Time errNil bool }{ - {"2015-12-31:2016-12-31", &date, &date2, true}, - {"2015-12-31:", &date, nil, true}, - {":2016-12-31", nil, &date2, true}, - {"2016-12-31", nil, nil, false}, - {"2016-31-12:", nil, nil, false}, - {":2016-31-12", nil, nil, false}, + {"2015-12-31:2016-12-31", date, date2, true}, + {"2015-12-31:", date, zero, true}, + {":2016-12-31", zero, date2, true}, + {"2016-12-31", zero, zero, false}, + {"2016-31-12:", zero, zero, false}, + {":2016-31-12", zero, zero, false}, } for _, test := range testDates { start, end, err := ParseDateRange(test.dateStr) - switch test.errNil { - case true: + if test.errNil { assert.Nil(err) - testPtr := func(want, have *time.Time) { - if want == nil { - assert.Nil(have) - } else { - assert.True((*have).Equal(*want)) - } + testPtr := func(want, have time.Time) { + assert.True(have.Equal(want)) } testPtr(test.start, start) testPtr(test.end, end) - case false: + } else { assert.NotNil(err) } } From a99b8a6210071060600a43b04daed800d8f4d125 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 20 Jun 2017 17:25:42 +0400 Subject: [PATCH 183/515] new events package query parser use parser compiler to generate query parser I used https://github.com/pointlander/peg which has a nice API and seems to be the most popular Golang compiler parser using PEG on Github. More about PEG: - https://en.wikipedia.org/wiki/Parsing_expression_grammar - https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics - https://github.com/PhilippeSigaud/Pegged/wiki/Grammar-Examples rename implement query match function match function uncomment test lines add more test cases for query#Matches fix int case rename events to pubsub add comment about cache assertReceive helper to not block on receive in tests fix bug with multiple conditions uncomment benchmark first results: ``` Benchmark10Clients-2 1000 1305493 ns/op 3957519 B/op 355 allocs/op Benchmark100Clients-2 100 12278304 ns/op 39571751 B/op 3505 allocs/op Benchmark1000Clients-2 10 124120909 ns/op 395714004 B/op 35005 allocs/op ``` 124ms to publish message to 1000 clients. A lot. use AST from query.peg.go separate pubsub and query packages by using Query interface in pubsub wrote docs and refactor code updates from Frey's review refactor type assertion to use type switch cleanup during shutdown subscriber should create output channel, not the server overflow strategies, server buffer capacity context as the first argument for Publish log error introduce Option type update NewServer comment move helpers into pubsub_test increase assertReceive timeout add query.MustParse add more false tests for parser add more false tests for query.Matches parse numbers as int64 / float64 try our best to convert from other types add number to panic output add more comments save commit introduce client argument as first argument to Subscribe > Why we do not specify buffer size on the output channel in Subscribe? The choice of buffer size of N here depends on knowing the number of messages server will receive and the number of messages downstream subscribers will consume. This is fragile: if we publish an additional message, or if one of the downstream subscribers reads any fewer messages, we will again have blocked goroutines. save commit remove reference counting fix test test client resubscribe test UnsubscribeAll client options [pubsub/query] fuzzy testing do not print msg as it creates data race! --- .gitignore | 2 + pubsub/example_test.go | 24 + pubsub/pubsub.go | 314 ++++++ pubsub/pubsub_test.go | 227 +++++ pubsub/query/Makefile | 11 + pubsub/query/empty.go | 14 + pubsub/query/empty_test.go | 16 + pubsub/query/fuzz_test/main.go | 30 + pubsub/query/parser_test.go | 81 ++ pubsub/query/query.go | 258 +++++ pubsub/query/query.peg | 33 + pubsub/query/query.peg.go | 1668 ++++++++++++++++++++++++++++++++ pubsub/query/query_test.go | 64 ++ 13 files changed, 2742 insertions(+) create mode 100644 pubsub/example_test.go create mode 100644 pubsub/pubsub.go create mode 100644 pubsub/pubsub_test.go create mode 100644 pubsub/query/Makefile create mode 100644 pubsub/query/empty.go create mode 100644 pubsub/query/empty_test.go create mode 100644 pubsub/query/fuzz_test/main.go create mode 100644 pubsub/query/parser_test.go create mode 100644 pubsub/query/query.go create mode 100644 pubsub/query/query.peg create mode 100644 pubsub/query/query.peg.go create mode 100644 pubsub/query/query_test.go diff --git a/.gitignore b/.gitignore index 62f28681c..34d0bf1f9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ vendor .glide + +pubsub/query/fuzz_test/output diff --git a/pubsub/example_test.go b/pubsub/example_test.go new file mode 100644 index 000000000..d64b96eab --- /dev/null +++ b/pubsub/example_test.go @@ -0,0 +1,24 @@ +package pubsub_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tmlibs/pubsub" + "github.com/tendermint/tmlibs/pubsub/query" +) + +func TestExample(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ch := make(chan interface{}, 1) + s.Subscribe("example-client", query.MustParse("abci.account.name=John"), ch) + err := s.PublishWithTags("Tombstone", map[string]interface{}{"abci.account.name": "John"}) + require.NoError(t, err) + assertReceive(t, "Tombstone", ch) +} diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go new file mode 100644 index 000000000..264848161 --- /dev/null +++ b/pubsub/pubsub.go @@ -0,0 +1,314 @@ +// Package pubsub implements a pub-sub model with a single publisher (Server) +// and multiple subscribers (clients). +// +// Though you can have multiple publishers by sharing a pointer to a server or +// by giving the same channel to each publisher and publishing messages from +// that channel (fan-in). +// +// Clients subscribe for messages, which could be of any type, using a query. +// When some message is published, we match it with all queries. If there is a +// match, this message will be pushed to all clients, subscribed to that query. +// See query subpackage for our implementation. +// +// Overflow strategies (incoming publish requests): +// +// 1) drop - drops publish requests when there are too many of them +// 2) wait - blocks until the server is ready to accept more publish requests (default) +// +// Subscribe/Unsubscribe calls are always blocking. +// +// Overflow strategies (outgoing messages): +// +// 1) skip - do not send a message if the client is busy or slow (default) +// 2) wait - wait until the client is ready to accept new messages +// +package pubsub + +import ( + "errors" + + cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tmlibs/log" +) + +type operation int + +const ( + sub operation = iota + pub + unsub + shutdown +) + +type overflowStrategy int + +const ( + drop overflowStrategy = iota + wait +) + +var ( + ErrorOverflow = errors.New("Server overflowed") +) + +type cmd struct { + op operation + query Query + ch chan<- interface{} + clientID string + msg interface{} + tags map[string]interface{} +} + +// Query defines an interface for a query to be used for subscribing. +type Query interface { + Matches(tags map[string]interface{}) bool +} + +// Server allows clients to subscribe/unsubscribe for messages, pubsling +// messages with or without tags, and manages internal state. +type Server struct { + cmn.BaseService + + cmds chan cmd + + overflowStrategy overflowStrategy + slowClientStrategy overflowStrategy +} + +// Option sets a parameter for the server. +type Option func(*Server) + +// NewServer returns a new server. See the commentary on the Option functions +// for a detailed description of how to configure buffering and overflow +// behavior. If no options are provided, the resulting server's queue is +// unbuffered and it blocks when overflowed. +func NewServer(options ...Option) *Server { + s := &Server{overflowStrategy: wait, slowClientStrategy: drop} + s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) + + for _, option := range options { + option(s) + } + + if s.cmds == nil { // if BufferCapacity was not set, create unbuffered channel + s.cmds = make(chan cmd) + } + + return s +} + +// BufferCapacity allows you to specify capacity for the internal server's +// queue. Since the server, given Y subscribers, could only process X messages, +// this option could be used to survive spikes (e.g. high amount of +// transactions during peak hours). +func BufferCapacity(cap int) Option { + return func(s *Server) { + if cap > 0 { + s.cmds = make(chan cmd, cap) + } + } +} + +// OverflowStrategyDrop will tell the server to drop messages when it can't +// process more messages. +func OverflowStrategyDrop() Option { + return func(s *Server) { + s.overflowStrategy = drop + } +} + +// OverflowStrategyWait will tell the server to block and wait for some time +// for server to process other messages. Default strategy. +func OverflowStrategyWait() func(*Server) { + return func(s *Server) { + s.overflowStrategy = wait + } +} + +// WaitSlowClients will tell the server to block and wait until subscriber +// reads a messages even if it is fast enough to process them. +func WaitSlowClients() func(*Server) { + return func(s *Server) { + s.slowClientStrategy = wait + } +} + +// SkipSlowClients will tell the server to skip subscriber if it is busy +// processing previous message(s). Default strategy. +func SkipSlowClients() func(*Server) { + return func(s *Server) { + s.slowClientStrategy = drop + } +} + +// Subscribe returns a channel on which messages matching the given query can +// be received. If the subscription already exists old channel will be closed +// and new one returned. +func (s *Server) Subscribe(clientID string, query Query, out chan<- interface{}) { + s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out} +} + +// Unsubscribe unsubscribes the given client from the query. +func (s *Server) Unsubscribe(clientID string, query Query) { + s.cmds <- cmd{op: unsub, clientID: clientID, query: query} +} + +// Unsubscribe unsubscribes the given channel. +func (s *Server) UnsubscribeAll(clientID string) { + s.cmds <- cmd{op: unsub, clientID: clientID} +} + +// Publish publishes the given message. +func (s *Server) Publish(msg interface{}) error { + return s.PublishWithTags(msg, make(map[string]interface{})) +} + +// PublishWithTags publishes the given message with a set of tags. This set of +// tags will be matched with client queries. If there is a match, the message +// will be sent to a client. +func (s *Server) PublishWithTags(msg interface{}, tags map[string]interface{}) error { + pubCmd := cmd{op: pub, msg: msg, tags: tags} + switch s.overflowStrategy { + case drop: + select { + case s.cmds <- pubCmd: + default: + s.Logger.Error("Server overflowed, dropping message...", "msg", msg) + return ErrorOverflow + } + case wait: + s.cmds <- pubCmd + } + return nil +} + +// OnStop implements Service.OnStop by shutting down the server. +func (s *Server) OnStop() { + s.cmds <- cmd{op: shutdown} +} + +// NOTE: not goroutine safe +type state struct { + // query -> client -> ch + queries map[Query]map[string]chan<- interface{} + // client -> query -> struct{} + clients map[string]map[Query]struct{} +} + +// OnStart implements Service.OnStart by creating a main loop. +func (s *Server) OnStart() error { + go s.loop(state{ + queries: make(map[Query]map[string]chan<- interface{}), + clients: make(map[string]map[Query]struct{}), + }) + return nil +} + +func (s *Server) loop(state state) { +loop: + for cmd := range s.cmds { + switch cmd.op { + case unsub: + if cmd.query != nil { + state.remove(cmd.clientID, cmd.query) + } else { + state.removeAll(cmd.clientID) + } + case shutdown: + state.reset() + break loop + case sub: + state.add(cmd.clientID, cmd.query, cmd.ch) + case pub: + state.send(cmd.msg, cmd.tags, s.slowClientStrategy, s.Logger) + } + } +} + +func (state *state) add(clientID string, q Query, ch chan<- interface{}) { + // add query if needed + if clientToChannelMap, ok := state.queries[q]; !ok { + state.queries[q] = make(map[string]chan<- interface{}) + } else { + // check if already subscribed + if oldCh, ok := clientToChannelMap[clientID]; ok { + close(oldCh) + } + } + state.queries[q][clientID] = ch + + // add client if needed + if _, ok := state.clients[clientID]; !ok { + state.clients[clientID] = make(map[Query]struct{}) + } + state.clients[clientID][q] = struct{}{} + + // create subscription + clientToChannelMap := state.queries[q] + clientToChannelMap[clientID] = ch +} + +func (state *state) remove(clientID string, q Query) { + clientToChannelMap, ok := state.queries[q] + if !ok { + return + } + + ch, ok := clientToChannelMap[clientID] + if ok { + close(ch) + + delete(state.clients[clientID], q) + + // if it not subscribed to anything else, remove the client + if len(state.clients[clientID]) == 0 { + delete(state.clients, clientID) + } + + delete(state.queries[q], clientID) + } +} + +func (state *state) removeAll(clientID string) { + queryMap, ok := state.clients[clientID] + if !ok { + return + } + + for q, _ := range queryMap { + ch := state.queries[q][clientID] + close(ch) + + delete(state.queries[q], clientID) + } + + delete(state.clients, clientID) +} + +func (state *state) reset() { + state.queries = make(map[Query]map[string]chan<- interface{}) + state.clients = make(map[string]map[Query]struct{}) +} + +func (state *state) send(msg interface{}, tags map[string]interface{}, slowClientStrategy overflowStrategy, logger log.Logger) { + for q, clientToChannelMap := range state.queries { + // NOTE we can use LRU cache to speed up common cases like query = " + // tm.events.type=NewBlock" and tags = {"tm.events.type": "NewBlock"} + if q.Matches(tags) { + for clientID, ch := range clientToChannelMap { + logger.Info("Sending message to client", "msg", msg, "client", clientID) + switch slowClientStrategy { + case drop: + select { + case ch <- msg: + default: + logger.Error("Client is busy, skipping...", "clientID", clientID) + } + case wait: + ch <- msg + } + } + } + } +} diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go new file mode 100644 index 000000000..570f76a82 --- /dev/null +++ b/pubsub/pubsub_test.go @@ -0,0 +1,227 @@ +package pubsub_test + +import ( + "fmt" + "runtime/debug" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tmlibs/pubsub" + "github.com/tendermint/tmlibs/pubsub/query" +) + +const ( + clientID = "test-client" +) + +func TestSubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ch := make(chan interface{}, 1) + s.Subscribe(clientID, query.Empty{}, ch) + err := s.Publish("Ka-Zar") + require.NoError(t, err) + assertReceive(t, "Ka-Zar", ch) + + err = s.Publish("Quicksilver") + require.NoError(t, err) + assertReceive(t, "Quicksilver", ch) +} + +func TestDifferentClients(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + ch1 := make(chan interface{}, 1) + s.Subscribe("client-1", query.MustParse("tm.events.type=NewBlock"), ch1) + err := s.PublishWithTags("Iceman", map[string]interface{}{"tm.events.type": "NewBlock"}) + require.NoError(t, err) + assertReceive(t, "Iceman", ch1) + + ch2 := make(chan interface{}, 1) + s.Subscribe("client-2", query.MustParse("tm.events.type=NewBlock AND abci.account.name=Igor"), ch2) + err = s.PublishWithTags("Ultimo", map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) + require.NoError(t, err) + assertReceive(t, "Ultimo", ch1) + assertReceive(t, "Ultimo", ch2) + + ch3 := make(chan interface{}, 1) + s.Subscribe("client-3", query.MustParse("tm.events.type=NewRoundStep AND abci.account.name=Igor AND abci.invoice.number = 10"), ch3) + err = s.PublishWithTags("Valeria Richards", map[string]interface{}{"tm.events.type": "NewRoundStep"}) + require.NoError(t, err) + assert.Zero(t, len(ch3)) +} + +func TestClientResubscribes(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + q := query.MustParse("tm.events.type=NewBlock") + + ch1 := make(chan interface{}, 1) + s.Subscribe(clientID, q, ch1) + err := s.PublishWithTags("Goblin Queen", map[string]interface{}{"tm.events.type": "NewBlock"}) + require.NoError(t, err) + assertReceive(t, "Goblin Queen", ch1) + + ch2 := make(chan interface{}, 1) + s.Subscribe(clientID, q, ch2) + + _, ok := <-ch1 + assert.False(t, ok) + + err = s.PublishWithTags("Spider-Man", map[string]interface{}{"tm.events.type": "NewBlock"}) + require.NoError(t, err) + assertReceive(t, "Spider-Man", ch2) +} + +func TestUnsubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ch := make(chan interface{}) + s.Subscribe(clientID, query.Empty{}, ch) + s.Unsubscribe(clientID, query.Empty{}) + + err := s.Publish("Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe") + + _, ok := <-ch + assert.False(t, ok) +} + +func TestUnsubscribeAll(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) + s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlock"), ch1) + s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlockHeader"), ch2) + + s.UnsubscribeAll(clientID) + + err := s.Publish("Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll") + assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll") + + _, ok := <-ch1 + assert.False(t, ok) + _, ok = <-ch2 + assert.False(t, ok) +} + +func TestOverflowStrategyDrop(t *testing.T) { + s := pubsub.NewServer(pubsub.OverflowStrategyDrop()) + s.SetLogger(log.TestingLogger()) + + err := s.Publish("Veda") + if assert.Error(t, err) { + assert.Equal(t, pubsub.ErrorOverflow, err) + } +} + +func TestOverflowStrategyWait(t *testing.T) { + s := pubsub.NewServer(pubsub.OverflowStrategyWait()) + s.SetLogger(log.TestingLogger()) + + go func() { + time.Sleep(1 * time.Second) + s.Start() + defer s.Stop() + }() + + err := s.Publish("Veda") + assert.NoError(t, err) +} + +func TestBufferCapacity(t *testing.T) { + s := pubsub.NewServer(pubsub.BufferCapacity(2)) + s.SetLogger(log.TestingLogger()) + + err := s.Publish("Nighthawk") + require.NoError(t, err) + err = s.Publish("Sage") + require.NoError(t, err) +} + +func TestWaitSlowClients(t *testing.T) { + s := pubsub.NewServer(pubsub.WaitSlowClients()) + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ch := make(chan interface{}) + s.Subscribe(clientID, query.Empty{}, ch) + err := s.Publish("Wonderwoman") + require.NoError(t, err) + + time.Sleep(1 * time.Second) + + assertReceive(t, "Wonderwoman", ch) +} + +func TestSkipSlowClients(t *testing.T) { + s := pubsub.NewServer(pubsub.SkipSlowClients()) + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ch := make(chan interface{}) + s.Subscribe(clientID, query.Empty{}, ch) + err := s.Publish("Cyclops") + require.NoError(t, err) + assert.Zero(t, len(ch)) +} + +func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } +func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } +func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } + +func benchmarkNClients(n int, b *testing.B) { + s := pubsub.NewServer(pubsub.BufferCapacity(b.N)) + s.Start() + defer s.Stop() + + for i := 0; i < n; i++ { + ch := make(chan interface{}) + s.Subscribe(clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = Ivan AND abci.Invoices.Number = %d", i)), ch) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.PublishWithTags("Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i}) + } +} + +/////////////////////////////////////////////////////////////////////////////// +/// HELPERS +/////////////////////////////////////////////////////////////////////////////// + +func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) { + select { + case actual := <-ch: + if actual != nil { + assert.Equal(t, expected, actual, msgAndArgs...) + } + case <-time.After(1 * time.Second): + t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected) + debug.PrintStack() + } +} diff --git a/pubsub/query/Makefile b/pubsub/query/Makefile new file mode 100644 index 000000000..ca3ff5b56 --- /dev/null +++ b/pubsub/query/Makefile @@ -0,0 +1,11 @@ +gen_query_parser: + @go get github.com/pointlander/peg + peg -inline -switch query.peg + +fuzzy_test: + @go get github.com/dvyukov/go-fuzz/go-fuzz + @go get github.com/dvyukov/go-fuzz/go-fuzz-build + go-fuzz-build github.com/tendermint/tmlibs/pubsub/query/fuzz_test + go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output + +.PHONY: gen_query_parser fuzzy_test diff --git a/pubsub/query/empty.go b/pubsub/query/empty.go new file mode 100644 index 000000000..2d60a8923 --- /dev/null +++ b/pubsub/query/empty.go @@ -0,0 +1,14 @@ +package query + +// Empty query matches any set of tags. +type Empty struct { +} + +// Matches always returns true. +func (Empty) Matches(tags map[string]interface{}) bool { + return true +} + +func (Empty) String() string { + return "empty" +} diff --git a/pubsub/query/empty_test.go b/pubsub/query/empty_test.go new file mode 100644 index 000000000..663acb191 --- /dev/null +++ b/pubsub/query/empty_test.go @@ -0,0 +1,16 @@ +package query_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tmlibs/pubsub/query" +) + +func TestEmptyQueryMatchesAnything(t *testing.T) { + q := query.Empty{} + assert.True(t, q.Matches(map[string]interface{}{})) + assert.True(t, q.Matches(map[string]interface{}{"Asher": "Roth"})) + assert.True(t, q.Matches(map[string]interface{}{"Route": 66})) + assert.True(t, q.Matches(map[string]interface{}{"Route": 66, "Billy": "Blue"})) +} diff --git a/pubsub/query/fuzz_test/main.go b/pubsub/query/fuzz_test/main.go new file mode 100644 index 000000000..3b0ef1473 --- /dev/null +++ b/pubsub/query/fuzz_test/main.go @@ -0,0 +1,30 @@ +package fuzz_test + +import ( + "fmt" + + "github.com/tendermint/tmlibs/pubsub/query" +) + +func Fuzz(data []byte) int { + sdata := string(data) + q0, err := query.New(sdata) + if err != nil { + return 0 + } + + sdata1 := q0.String() + q1, err := query.New(sdata1) + if err != nil { + panic(err) + } + + sdata2 := q1.String() + if sdata1 != sdata2 { + fmt.Printf("q0: %q\n", sdata1) + fmt.Printf("q1: %q\n", sdata2) + panic("query changed") + } + + return 1 +} diff --git a/pubsub/query/parser_test.go b/pubsub/query/parser_test.go new file mode 100644 index 000000000..194966664 --- /dev/null +++ b/pubsub/query/parser_test.go @@ -0,0 +1,81 @@ +package query_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tmlibs/pubsub/query" +) + +// TODO: fuzzy testing? +func TestParser(t *testing.T) { + cases := []struct { + query string + valid bool + }{ + {"tm.events.type=NewBlock", true}, + {"tm.events.type = NewBlock", true}, + {"tm.events.type=TIME", true}, + {"tm.events.type=DATE", true}, + {"tm.events.type==", false}, + {">==", false}, + {"tm.events.type NewBlock =", false}, + {"tm.events.type>NewBlock", false}, + {"", false}, + {"=", false}, + {"=NewBlock", false}, + {"tm.events.type=", false}, + + {"tm.events.typeNewBlock", false}, + {"NewBlock", false}, + {"", false}, + + {"tm.events.type=NewBlock AND abci.account.name=Igor", true}, + {"tm.events.type=NewBlock AND", false}, + {"tm.events.type=NewBlock AN", false}, + {"tm.events.type=NewBlock AN tm.events.type=NewBlockHeader", false}, + {"AND tm.events.type=NewBlock ", false}, + + {"abci.account.name CONTAINS Igor", true}, + + {"tx.date > DATE 2013-05-03", true}, + {"tx.date < DATE 2013-05-03", true}, + {"tx.date <= DATE 2013-05-03", true}, + {"tx.date >= DATE 2013-05-03", true}, + {"tx.date >= DAT 2013-05-03", false}, + {"tx.date <= DATE2013-05-03", false}, + {"tx.date <= DATE -05-03", false}, + {"tx.date >= DATE 20130503", false}, + {"tx.date >= DATE 2013+01-03", false}, + // incorrect year, month, day + {"tx.date >= DATE 0013-01-03", false}, + {"tx.date >= DATE 2013-31-03", false}, + {"tx.date >= DATE 2013-01-83", false}, + + {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, + {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, + {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME2013-05-03T14:45:00Z", false}, + {"tx.date = IME 2013-05-03T14:45:00Z", false}, + {"tx.date = TIME 2013-05-:45:00Z", false}, + {"tx.date >= TIME 2013-05-03T14:45:00", false}, + {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, + {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, + + {"account.balance=100", true}, + {"account.balance >= 200", true}, + {"account.balance >= -300", false}, + {"account.balance >>= 400", false}, + {"account.balance=33.22.1", false}, + } + + for _, c := range cases { + _, err := query.New(c.query) + if c.valid { + assert.NoError(t, err, "Query was '%s'", c.query) + } else { + assert.Error(t, err, "Query was '%s'", c.query) + } + } +} diff --git a/pubsub/query/query.go b/pubsub/query/query.go new file mode 100644 index 000000000..f084a3f98 --- /dev/null +++ b/pubsub/query/query.go @@ -0,0 +1,258 @@ +// Package query provides a parser for a custom query format: +// +// abci.invoice.number=22 AND abci.invoice.owner=Ivan +// +// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. +// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics +// +// It has a support for numbers (integer and floating point), dates and times. +package query + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Query holds the query string and the query parser. +type Query struct { + str string + parser *QueryParser +} + +// New parses the given string and returns a query or error if the string is +// invalid. +func New(s string) (*Query, error) { + p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)} + p.Init() + if err := p.Parse(); err != nil { + return nil, err + } + return &Query{str: s, parser: p}, nil +} + +// MustParse turns the given string into a query or panics; for tests or others +// cases where you know the string is valid. +func MustParse(s string) *Query { + q, err := New(s) + if err != nil { + panic(fmt.Sprintf("failed to parse %s: %v", s, err)) + } + return q +} + +// String returns the original string. +func (q *Query) String() string { + return q.str +} + +type operator uint8 + +const ( + opLessEqual operator = iota + opGreaterEqual + opLess + opGreater + opEqual + opContains +) + +// Matches returns true if the query matches the given set of tags, false otherwise. +// +// For example, query "name=John" matches tags = {"name": "John"}. More +// examples could be found in parser_test.go and query_test.go. +func (q *Query) Matches(tags map[string]interface{}) bool { + if len(tags) == 0 { + return false + } + + buffer, begin, end := q.parser.Buffer, 0, 0 + + var tag string + var op operator + + // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + for _, token := range q.parser.Tokens() { + switch token.pegRule { + + case rulePegText: + begin, end = int(token.begin), int(token.end) + case ruletag: + tag = buffer[begin:end] + case rulele: + op = opLessEqual + case rulege: + op = opGreaterEqual + case rulel: + op = opLess + case ruleg: + op = opGreater + case ruleequal: + op = opEqual + case rulecontains: + op = opContains + case rulevalue: + // see if the triplet (tag, operator, operand) matches any tag + // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } + if !match(tag, op, reflect.ValueOf(buffer[begin:end]), tags) { + return false + } + case rulenumber: + number := buffer[begin:end] + if strings.Contains(number, ".") { // if it looks like a floating-point number + value, err := strconv.ParseFloat(number, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } else { + value, err := strconv.ParseInt(number, 10, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } + case ruletime: + value, err := time.Parse(time.RFC3339, buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + case ruledate: + value, err := time.Parse("2006-01-02", buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } + } + + return true +} + +// match returns true if the given triplet (tag, operator, operand) matches any tag. +// +// First, it looks up the tag in tags and if it finds one, tries to compare the +// value from it to the operand using the operator. +// +// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } +func match(tag string, op operator, operand reflect.Value, tags map[string]interface{}) bool { + // look up the tag from the query in tags + value, ok := tags[tag] + if !ok { + return false + } + switch operand.Kind() { + case reflect.Struct: // time + operandAsTime := operand.Interface().(time.Time) + v, ok := value.(time.Time) + if !ok { // if value from tags is not time.Time + return false + } + switch op { + case opLessEqual: + return v.Before(operandAsTime) || v.Equal(operandAsTime) + case opGreaterEqual: + return v.Equal(operandAsTime) || v.After(operandAsTime) + case opLess: + return v.Before(operandAsTime) + case opGreater: + return v.After(operandAsTime) + case opEqual: + return v.Equal(operandAsTime) + } + case reflect.Float64: + operandFloat64 := operand.Interface().(float64) + var v float64 + // try our best to convert value from tags to float64 + switch vt := value.(type) { + case float64: + v = vt + case float32: + v = float64(vt) + case int: + v = float64(vt) + case int8: + v = float64(vt) + case int16: + v = float64(vt) + case int32: + v = float64(vt) + case int64: + v = float64(vt) + default: // fail for all other types + panic(fmt.Sprintf("Incomparable types: %T (%v) vs float64 (%v)", value, value, operandFloat64)) + } + switch op { + case opLessEqual: + return v <= operandFloat64 + case opGreaterEqual: + return v >= operandFloat64 + case opLess: + return v < operandFloat64 + case opGreater: + return v > operandFloat64 + case opEqual: + return v == operandFloat64 + } + case reflect.Int64: + operandInt := operand.Interface().(int64) + var v int64 + // try our best to convert value from tags to int64 + switch vt := value.(type) { + case int64: + v = vt + case int8: + v = int64(vt) + case int16: + v = int64(vt) + case int32: + v = int64(vt) + case int: + v = int64(vt) + case float64: + v = int64(vt) + case float32: + v = int64(vt) + default: // fail for all other types + panic(fmt.Sprintf("Incomparable types: %T (%v) vs int64 (%v)", value, value, operandInt)) + } + switch op { + case opLessEqual: + return v <= operandInt + case opGreaterEqual: + return v >= operandInt + case opLess: + return v < operandInt + case opGreater: + return v > operandInt + case opEqual: + return v == operandInt + } + case reflect.String: + v, ok := value.(string) + if !ok { // if value from tags is not string + return false + } + switch op { + case opEqual: + return v == operand.String() + case opContains: + return strings.Contains(v, operand.String()) + } + default: + panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind())) + } + + return false +} diff --git a/pubsub/query/query.peg b/pubsub/query/query.peg new file mode 100644 index 000000000..9654289c4 --- /dev/null +++ b/pubsub/query/query.peg @@ -0,0 +1,33 @@ +package query + +type QueryParser Peg { +} + +e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !. + +condition <- tag ' '* (le ' '* (number / time / date) + / ge ' '* (number / time / date) + / l ' '* (number / time / date) + / g ' '* (number / time / date) + / equal ' '* (number / time / date / value) + / contains ' '* value + ) + +tag <- < (![ \t\n\r\\()"=><] .)+ > +value <- < (![ \t\n\r\\()"=><] .)+ > +number <- < ('0' + / [1-9] digit* ('.' digit*)?) > +digit <- [0-9] +time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') > +date <- "DATE " < year '-' month '-' day > +year <- ('1' / '2') digit digit digit +month <- ('0' / '1') digit +day <- ('0' / '1' / '2' / '3') digit +and <- "AND" + +equal <- "=" +contains <- "CONTAINS" +le <- "<=" +ge <- ">=" +l <- "<" +g <- ">" diff --git a/pubsub/query/query.peg.go b/pubsub/query/query.peg.go new file mode 100644 index 000000000..5cd0a9e32 --- /dev/null +++ b/pubsub/query/query.peg.go @@ -0,0 +1,1668 @@ +package query + +import ( + "fmt" + "math" + "sort" + "strconv" +) + +const endSymbol rune = 1114112 + +/* The rule types inferred from the grammar are below. */ +type pegRule uint8 + +const ( + ruleUnknown pegRule = iota + rulee + rulecondition + ruletag + rulevalue + rulenumber + ruledigit + ruletime + ruledate + ruleyear + rulemonth + ruleday + ruleand + ruleequal + rulecontains + rulele + rulege + rulel + ruleg + rulePegText +) + +var rul3s = [...]string{ + "Unknown", + "e", + "condition", + "tag", + "value", + "number", + "digit", + "time", + "date", + "year", + "month", + "day", + "and", + "equal", + "contains", + "le", + "ge", + "l", + "g", + "PegText", +} + +type token32 struct { + pegRule + begin, end uint32 +} + +func (t *token32) String() string { + return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end) +} + +type node32 struct { + token32 + up, next *node32 +} + +func (node *node32) print(pretty bool, buffer string) { + var print func(node *node32, depth int) + print = func(node *node32, depth int) { + for node != nil { + for c := 0; c < depth; c++ { + fmt.Printf(" ") + } + rule := rul3s[node.pegRule] + quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end]))) + if !pretty { + fmt.Printf("%v %v\n", rule, quote) + } else { + fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote) + } + if node.up != nil { + print(node.up, depth+1) + } + node = node.next + } + } + print(node, 0) +} + +func (node *node32) Print(buffer string) { + node.print(false, buffer) +} + +func (node *node32) PrettyPrint(buffer string) { + node.print(true, buffer) +} + +type tokens32 struct { + tree []token32 +} + +func (t *tokens32) Trim(length uint32) { + t.tree = t.tree[:length] +} + +func (t *tokens32) Print() { + for _, token := range t.tree { + fmt.Println(token.String()) + } +} + +func (t *tokens32) AST() *node32 { + type element struct { + node *node32 + down *element + } + tokens := t.Tokens() + var stack *element + for _, token := range tokens { + if token.begin == token.end { + continue + } + node := &node32{token32: token} + for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { + stack.node.next = node.up + node.up = stack.node + stack = stack.down + } + stack = &element{node: node, down: stack} + } + if stack != nil { + return stack.node + } + return nil +} + +func (t *tokens32) PrintSyntaxTree(buffer string) { + t.AST().Print(buffer) +} + +func (t *tokens32) PrettyPrintSyntaxTree(buffer string) { + t.AST().PrettyPrint(buffer) +} + +func (t *tokens32) Add(rule pegRule, begin, end, index uint32) { + if tree := t.tree; int(index) >= len(tree) { + expanded := make([]token32, 2*len(tree)) + copy(expanded, tree) + t.tree = expanded + } + t.tree[index] = token32{ + pegRule: rule, + begin: begin, + end: end, + } +} + +func (t *tokens32) Tokens() []token32 { + return t.tree +} + +type QueryParser struct { + Buffer string + buffer []rune + rules [20]func() bool + parse func(rule ...int) error + reset func() + Pretty bool + tokens32 +} + +func (p *QueryParser) Parse(rule ...int) error { + return p.parse(rule...) +} + +func (p *QueryParser) Reset() { + p.reset() +} + +type textPosition struct { + line, symbol int +} + +type textPositionMap map[int]textPosition + +func translatePositions(buffer []rune, positions []int) textPositionMap { + length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 + sort.Ints(positions) + +search: + for i, c := range buffer { + if c == '\n' { + line, symbol = line+1, 0 + } else { + symbol++ + } + if i == positions[j] { + translations[positions[j]] = textPosition{line, symbol} + for j++; j < length; j++ { + if i != positions[j] { + continue search + } + } + break search + } + } + + return translations +} + +type parseError struct { + p *QueryParser + max token32 +} + +func (e *parseError) Error() string { + tokens, error := []token32{e.max}, "\n" + positions, p := make([]int, 2*len(tokens)), 0 + for _, token := range tokens { + positions[p], p = int(token.begin), p+1 + positions[p], p = int(token.end), p+1 + } + translations := translatePositions(e.p.buffer, positions) + format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" + if e.p.Pretty { + format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" + } + for _, token := range tokens { + begin, end := int(token.begin), int(token.end) + error += fmt.Sprintf(format, + rul3s[token.pegRule], + translations[begin].line, translations[begin].symbol, + translations[end].line, translations[end].symbol, + strconv.Quote(string(e.p.buffer[begin:end]))) + } + + return error +} + +func (p *QueryParser) PrintSyntaxTree() { + if p.Pretty { + p.tokens32.PrettyPrintSyntaxTree(p.Buffer) + } else { + p.tokens32.PrintSyntaxTree(p.Buffer) + } +} + +func (p *QueryParser) Init() { + var ( + max token32 + position, tokenIndex uint32 + buffer []rune + ) + p.reset = func() { + max = token32{} + position, tokenIndex = 0, 0 + + p.buffer = []rune(p.Buffer) + if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { + p.buffer = append(p.buffer, endSymbol) + } + buffer = p.buffer + } + p.reset() + + _rules := p.rules + tree := tokens32{tree: make([]token32, math.MaxInt16)} + p.parse = func(rule ...int) error { + r := 1 + if len(rule) > 0 { + r = rule[0] + } + matches := p.rules[r]() + p.tokens32 = tree + if matches { + p.Trim(tokenIndex) + return nil + } + return &parseError{p, max} + } + + add := func(rule pegRule, begin uint32) { + tree.Add(rule, begin, position, tokenIndex) + tokenIndex++ + if begin != position && position > max.end { + max = token32{rule, begin, position} + } + } + + matchDot := func() bool { + if buffer[position] != endSymbol { + position++ + return true + } + return false + } + + /*matchChar := func(c byte) bool { + if buffer[position] == c { + position++ + return true + } + return false + }*/ + + /*matchRange := func(lower byte, upper byte) bool { + if c := buffer[position]; c >= lower && c <= upper { + position++ + return true + } + return false + }*/ + + _rules = [...]func() bool{ + nil, + /* 0 e <- <('"' condition (' '+ and ' '+ condition)* '"' !.)> */ + func() bool { + position0, tokenIndex0 := position, tokenIndex + { + position1 := position + if buffer[position] != rune('"') { + goto l0 + } + position++ + if !_rules[rulecondition]() { + goto l0 + } + l2: + { + position3, tokenIndex3 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l3 + } + position++ + l4: + { + position5, tokenIndex5 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l5 + } + position++ + goto l4 + l5: + position, tokenIndex = position5, tokenIndex5 + } + { + position6 := position + { + position7, tokenIndex7 := position, tokenIndex + if buffer[position] != rune('a') { + goto l8 + } + position++ + goto l7 + l8: + position, tokenIndex = position7, tokenIndex7 + if buffer[position] != rune('A') { + goto l3 + } + position++ + } + l7: + { + position9, tokenIndex9 := position, tokenIndex + if buffer[position] != rune('n') { + goto l10 + } + position++ + goto l9 + l10: + position, tokenIndex = position9, tokenIndex9 + if buffer[position] != rune('N') { + goto l3 + } + position++ + } + l9: + { + position11, tokenIndex11 := position, tokenIndex + if buffer[position] != rune('d') { + goto l12 + } + position++ + goto l11 + l12: + position, tokenIndex = position11, tokenIndex11 + if buffer[position] != rune('D') { + goto l3 + } + position++ + } + l11: + add(ruleand, position6) + } + if buffer[position] != rune(' ') { + goto l3 + } + position++ + l13: + { + position14, tokenIndex14 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l14 + } + position++ + goto l13 + l14: + position, tokenIndex = position14, tokenIndex14 + } + if !_rules[rulecondition]() { + goto l3 + } + goto l2 + l3: + position, tokenIndex = position3, tokenIndex3 + } + if buffer[position] != rune('"') { + goto l0 + } + position++ + { + position15, tokenIndex15 := position, tokenIndex + if !matchDot() { + goto l15 + } + goto l0 + l15: + position, tokenIndex = position15, tokenIndex15 + } + add(rulee, position1) + } + return true + l0: + position, tokenIndex = position0, tokenIndex0 + return false + }, + /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('=') (equal ' '* (number / time / date / value))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ + func() bool { + position16, tokenIndex16 := position, tokenIndex + { + position17 := position + { + position18 := position + { + position19 := position + { + position22, tokenIndex22 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l22 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l22 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l22 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l22 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l22 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l22 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l22 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l22 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l22 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l22 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l22 + } + position++ + break + } + } + + goto l16 + l22: + position, tokenIndex = position22, tokenIndex22 + } + if !matchDot() { + goto l16 + } + l20: + { + position21, tokenIndex21 := position, tokenIndex + { + position24, tokenIndex24 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l24 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l24 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l24 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l24 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l24 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l24 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l24 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l24 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l24 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l24 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l24 + } + position++ + break + } + } + + goto l21 + l24: + position, tokenIndex = position24, tokenIndex24 + } + if !matchDot() { + goto l21 + } + goto l20 + l21: + position, tokenIndex = position21, tokenIndex21 + } + add(rulePegText, position19) + } + add(ruletag, position18) + } + l26: + { + position27, tokenIndex27 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l27 + } + position++ + goto l26 + l27: + position, tokenIndex = position27, tokenIndex27 + } + { + position28, tokenIndex28 := position, tokenIndex + { + position30 := position + if buffer[position] != rune('<') { + goto l29 + } + position++ + if buffer[position] != rune('=') { + goto l29 + } + position++ + add(rulele, position30) + } + l31: + { + position32, tokenIndex32 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l32 + } + position++ + goto l31 + l32: + position, tokenIndex = position32, tokenIndex32 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l29 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l29 + } + break + default: + if !_rules[rulenumber]() { + goto l29 + } + break + } + } + + goto l28 + l29: + position, tokenIndex = position28, tokenIndex28 + { + position35 := position + if buffer[position] != rune('>') { + goto l34 + } + position++ + if buffer[position] != rune('=') { + goto l34 + } + position++ + add(rulege, position35) + } + l36: + { + position37, tokenIndex37 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l37 + } + position++ + goto l36 + l37: + position, tokenIndex = position37, tokenIndex37 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l34 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l34 + } + break + default: + if !_rules[rulenumber]() { + goto l34 + } + break + } + } + + goto l28 + l34: + position, tokenIndex = position28, tokenIndex28 + { + switch buffer[position] { + case '=': + { + position40 := position + if buffer[position] != rune('=') { + goto l16 + } + position++ + add(ruleequal, position40) + } + l41: + { + position42, tokenIndex42 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l42 + } + position++ + goto l41 + l42: + position, tokenIndex = position42, tokenIndex42 + } + { + position43, tokenIndex43 := position, tokenIndex + if !_rules[rulenumber]() { + goto l44 + } + goto l43 + l44: + position, tokenIndex = position43, tokenIndex43 + if !_rules[ruletime]() { + goto l45 + } + goto l43 + l45: + position, tokenIndex = position43, tokenIndex43 + if !_rules[ruledate]() { + goto l46 + } + goto l43 + l46: + position, tokenIndex = position43, tokenIndex43 + if !_rules[rulevalue]() { + goto l16 + } + } + l43: + break + case '>': + { + position47 := position + if buffer[position] != rune('>') { + goto l16 + } + position++ + add(ruleg, position47) + } + l48: + { + position49, tokenIndex49 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l49 + } + position++ + goto l48 + l49: + position, tokenIndex = position49, tokenIndex49 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + case '<': + { + position51 := position + if buffer[position] != rune('<') { + goto l16 + } + position++ + add(rulel, position51) + } + l52: + { + position53, tokenIndex53 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l53 + } + position++ + goto l52 + l53: + position, tokenIndex = position53, tokenIndex53 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + default: + { + position55 := position + { + position56, tokenIndex56 := position, tokenIndex + if buffer[position] != rune('c') { + goto l57 + } + position++ + goto l56 + l57: + position, tokenIndex = position56, tokenIndex56 + if buffer[position] != rune('C') { + goto l16 + } + position++ + } + l56: + { + position58, tokenIndex58 := position, tokenIndex + if buffer[position] != rune('o') { + goto l59 + } + position++ + goto l58 + l59: + position, tokenIndex = position58, tokenIndex58 + if buffer[position] != rune('O') { + goto l16 + } + position++ + } + l58: + { + position60, tokenIndex60 := position, tokenIndex + if buffer[position] != rune('n') { + goto l61 + } + position++ + goto l60 + l61: + position, tokenIndex = position60, tokenIndex60 + if buffer[position] != rune('N') { + goto l16 + } + position++ + } + l60: + { + position62, tokenIndex62 := position, tokenIndex + if buffer[position] != rune('t') { + goto l63 + } + position++ + goto l62 + l63: + position, tokenIndex = position62, tokenIndex62 + if buffer[position] != rune('T') { + goto l16 + } + position++ + } + l62: + { + position64, tokenIndex64 := position, tokenIndex + if buffer[position] != rune('a') { + goto l65 + } + position++ + goto l64 + l65: + position, tokenIndex = position64, tokenIndex64 + if buffer[position] != rune('A') { + goto l16 + } + position++ + } + l64: + { + position66, tokenIndex66 := position, tokenIndex + if buffer[position] != rune('i') { + goto l67 + } + position++ + goto l66 + l67: + position, tokenIndex = position66, tokenIndex66 + if buffer[position] != rune('I') { + goto l16 + } + position++ + } + l66: + { + position68, tokenIndex68 := position, tokenIndex + if buffer[position] != rune('n') { + goto l69 + } + position++ + goto l68 + l69: + position, tokenIndex = position68, tokenIndex68 + if buffer[position] != rune('N') { + goto l16 + } + position++ + } + l68: + { + position70, tokenIndex70 := position, tokenIndex + if buffer[position] != rune('s') { + goto l71 + } + position++ + goto l70 + l71: + position, tokenIndex = position70, tokenIndex70 + if buffer[position] != rune('S') { + goto l16 + } + position++ + } + l70: + add(rulecontains, position55) + } + l72: + { + position73, tokenIndex73 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l73 + } + position++ + goto l72 + l73: + position, tokenIndex = position73, tokenIndex73 + } + if !_rules[rulevalue]() { + goto l16 + } + break + } + } + + } + l28: + add(rulecondition, position17) + } + return true + l16: + position, tokenIndex = position16, tokenIndex16 + return false + }, + /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ + nil, + /* 3 value <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ + func() bool { + position75, tokenIndex75 := position, tokenIndex + { + position76 := position + { + position77 := position + { + position80, tokenIndex80 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l80 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l80 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l80 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l80 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l80 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l80 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l80 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l80 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l80 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l80 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l80 + } + position++ + break + } + } + + goto l75 + l80: + position, tokenIndex = position80, tokenIndex80 + } + if !matchDot() { + goto l75 + } + l78: + { + position79, tokenIndex79 := position, tokenIndex + { + position82, tokenIndex82 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l82 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l82 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l82 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l82 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l82 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l82 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l82 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l82 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l82 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l82 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l82 + } + position++ + break + } + } + + goto l79 + l82: + position, tokenIndex = position82, tokenIndex82 + } + if !matchDot() { + goto l79 + } + goto l78 + l79: + position, tokenIndex = position79, tokenIndex79 + } + add(rulePegText, position77) + } + add(rulevalue, position76) + } + return true + l75: + position, tokenIndex = position75, tokenIndex75 + return false + }, + /* 4 number <- <<('0' / ([1-9] digit* ('.' digit*)?))>> */ + func() bool { + position84, tokenIndex84 := position, tokenIndex + { + position85 := position + { + position86 := position + { + position87, tokenIndex87 := position, tokenIndex + if buffer[position] != rune('0') { + goto l88 + } + position++ + goto l87 + l88: + position, tokenIndex = position87, tokenIndex87 + if c := buffer[position]; c < rune('1') || c > rune('9') { + goto l84 + } + position++ + l89: + { + position90, tokenIndex90 := position, tokenIndex + if !_rules[ruledigit]() { + goto l90 + } + goto l89 + l90: + position, tokenIndex = position90, tokenIndex90 + } + { + position91, tokenIndex91 := position, tokenIndex + if buffer[position] != rune('.') { + goto l91 + } + position++ + l93: + { + position94, tokenIndex94 := position, tokenIndex + if !_rules[ruledigit]() { + goto l94 + } + goto l93 + l94: + position, tokenIndex = position94, tokenIndex94 + } + goto l92 + l91: + position, tokenIndex = position91, tokenIndex91 + } + l92: + } + l87: + add(rulePegText, position86) + } + add(rulenumber, position85) + } + return true + l84: + position, tokenIndex = position84, tokenIndex84 + return false + }, + /* 5 digit <- <[0-9]> */ + func() bool { + position95, tokenIndex95 := position, tokenIndex + { + position96 := position + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l95 + } + position++ + add(ruledigit, position96) + } + return true + l95: + position, tokenIndex = position95, tokenIndex95 + return false + }, + /* 6 time <- <(('t' / 'T') ('i' / 'I') ('m' / 'M') ('e' / 'E') ' ' <(year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit ((('-' / '+') digit digit ':' digit digit) / 'Z'))>)> */ + func() bool { + position97, tokenIndex97 := position, tokenIndex + { + position98 := position + { + position99, tokenIndex99 := position, tokenIndex + if buffer[position] != rune('t') { + goto l100 + } + position++ + goto l99 + l100: + position, tokenIndex = position99, tokenIndex99 + if buffer[position] != rune('T') { + goto l97 + } + position++ + } + l99: + { + position101, tokenIndex101 := position, tokenIndex + if buffer[position] != rune('i') { + goto l102 + } + position++ + goto l101 + l102: + position, tokenIndex = position101, tokenIndex101 + if buffer[position] != rune('I') { + goto l97 + } + position++ + } + l101: + { + position103, tokenIndex103 := position, tokenIndex + if buffer[position] != rune('m') { + goto l104 + } + position++ + goto l103 + l104: + position, tokenIndex = position103, tokenIndex103 + if buffer[position] != rune('M') { + goto l97 + } + position++ + } + l103: + { + position105, tokenIndex105 := position, tokenIndex + if buffer[position] != rune('e') { + goto l106 + } + position++ + goto l105 + l106: + position, tokenIndex = position105, tokenIndex105 + if buffer[position] != rune('E') { + goto l97 + } + position++ + } + l105: + if buffer[position] != rune(' ') { + goto l97 + } + position++ + { + position107 := position + if !_rules[ruleyear]() { + goto l97 + } + if buffer[position] != rune('-') { + goto l97 + } + position++ + if !_rules[rulemonth]() { + goto l97 + } + if buffer[position] != rune('-') { + goto l97 + } + position++ + if !_rules[ruleday]() { + goto l97 + } + if buffer[position] != rune('T') { + goto l97 + } + position++ + if !_rules[ruledigit]() { + goto l97 + } + if !_rules[ruledigit]() { + goto l97 + } + if buffer[position] != rune(':') { + goto l97 + } + position++ + if !_rules[ruledigit]() { + goto l97 + } + if !_rules[ruledigit]() { + goto l97 + } + if buffer[position] != rune(':') { + goto l97 + } + position++ + if !_rules[ruledigit]() { + goto l97 + } + if !_rules[ruledigit]() { + goto l97 + } + { + position108, tokenIndex108 := position, tokenIndex + { + position110, tokenIndex110 := position, tokenIndex + if buffer[position] != rune('-') { + goto l111 + } + position++ + goto l110 + l111: + position, tokenIndex = position110, tokenIndex110 + if buffer[position] != rune('+') { + goto l109 + } + position++ + } + l110: + if !_rules[ruledigit]() { + goto l109 + } + if !_rules[ruledigit]() { + goto l109 + } + if buffer[position] != rune(':') { + goto l109 + } + position++ + if !_rules[ruledigit]() { + goto l109 + } + if !_rules[ruledigit]() { + goto l109 + } + goto l108 + l109: + position, tokenIndex = position108, tokenIndex108 + if buffer[position] != rune('Z') { + goto l97 + } + position++ + } + l108: + add(rulePegText, position107) + } + add(ruletime, position98) + } + return true + l97: + position, tokenIndex = position97, tokenIndex97 + return false + }, + /* 7 date <- <(('d' / 'D') ('a' / 'A') ('t' / 'T') ('e' / 'E') ' ' <(year '-' month '-' day)>)> */ + func() bool { + position112, tokenIndex112 := position, tokenIndex + { + position113 := position + { + position114, tokenIndex114 := position, tokenIndex + if buffer[position] != rune('d') { + goto l115 + } + position++ + goto l114 + l115: + position, tokenIndex = position114, tokenIndex114 + if buffer[position] != rune('D') { + goto l112 + } + position++ + } + l114: + { + position116, tokenIndex116 := position, tokenIndex + if buffer[position] != rune('a') { + goto l117 + } + position++ + goto l116 + l117: + position, tokenIndex = position116, tokenIndex116 + if buffer[position] != rune('A') { + goto l112 + } + position++ + } + l116: + { + position118, tokenIndex118 := position, tokenIndex + if buffer[position] != rune('t') { + goto l119 + } + position++ + goto l118 + l119: + position, tokenIndex = position118, tokenIndex118 + if buffer[position] != rune('T') { + goto l112 + } + position++ + } + l118: + { + position120, tokenIndex120 := position, tokenIndex + if buffer[position] != rune('e') { + goto l121 + } + position++ + goto l120 + l121: + position, tokenIndex = position120, tokenIndex120 + if buffer[position] != rune('E') { + goto l112 + } + position++ + } + l120: + if buffer[position] != rune(' ') { + goto l112 + } + position++ + { + position122 := position + if !_rules[ruleyear]() { + goto l112 + } + if buffer[position] != rune('-') { + goto l112 + } + position++ + if !_rules[rulemonth]() { + goto l112 + } + if buffer[position] != rune('-') { + goto l112 + } + position++ + if !_rules[ruleday]() { + goto l112 + } + add(rulePegText, position122) + } + add(ruledate, position113) + } + return true + l112: + position, tokenIndex = position112, tokenIndex112 + return false + }, + /* 8 year <- <(('1' / '2') digit digit digit)> */ + func() bool { + position123, tokenIndex123 := position, tokenIndex + { + position124 := position + { + position125, tokenIndex125 := position, tokenIndex + if buffer[position] != rune('1') { + goto l126 + } + position++ + goto l125 + l126: + position, tokenIndex = position125, tokenIndex125 + if buffer[position] != rune('2') { + goto l123 + } + position++ + } + l125: + if !_rules[ruledigit]() { + goto l123 + } + if !_rules[ruledigit]() { + goto l123 + } + if !_rules[ruledigit]() { + goto l123 + } + add(ruleyear, position124) + } + return true + l123: + position, tokenIndex = position123, tokenIndex123 + return false + }, + /* 9 month <- <(('0' / '1') digit)> */ + func() bool { + position127, tokenIndex127 := position, tokenIndex + { + position128 := position + { + position129, tokenIndex129 := position, tokenIndex + if buffer[position] != rune('0') { + goto l130 + } + position++ + goto l129 + l130: + position, tokenIndex = position129, tokenIndex129 + if buffer[position] != rune('1') { + goto l127 + } + position++ + } + l129: + if !_rules[ruledigit]() { + goto l127 + } + add(rulemonth, position128) + } + return true + l127: + position, tokenIndex = position127, tokenIndex127 + return false + }, + /* 10 day <- <(((&('3') '3') | (&('2') '2') | (&('1') '1') | (&('0') '0')) digit)> */ + func() bool { + position131, tokenIndex131 := position, tokenIndex + { + position132 := position + { + switch buffer[position] { + case '3': + if buffer[position] != rune('3') { + goto l131 + } + position++ + break + case '2': + if buffer[position] != rune('2') { + goto l131 + } + position++ + break + case '1': + if buffer[position] != rune('1') { + goto l131 + } + position++ + break + default: + if buffer[position] != rune('0') { + goto l131 + } + position++ + break + } + } + + if !_rules[ruledigit]() { + goto l131 + } + add(ruleday, position132) + } + return true + l131: + position, tokenIndex = position131, tokenIndex131 + return false + }, + /* 11 and <- <(('a' / 'A') ('n' / 'N') ('d' / 'D'))> */ + nil, + /* 12 equal <- <'='> */ + nil, + /* 13 contains <- <(('c' / 'C') ('o' / 'O') ('n' / 'N') ('t' / 'T') ('a' / 'A') ('i' / 'I') ('n' / 'N') ('s' / 'S'))> */ + nil, + /* 14 le <- <('<' '=')> */ + nil, + /* 15 ge <- <('>' '=')> */ + nil, + /* 16 l <- <'<'> */ + nil, + /* 17 g <- <'>'> */ + nil, + nil, + } + p.rules = _rules +} diff --git a/pubsub/query/query_test.go b/pubsub/query/query_test.go new file mode 100644 index 000000000..75d02ee49 --- /dev/null +++ b/pubsub/query/query_test.go @@ -0,0 +1,64 @@ +package query_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tmlibs/pubsub/query" +) + +func TestMatches(t *testing.T) { + const shortForm = "2006-Jan-02" + txDate, err := time.Parse(shortForm, "2017-Jan-01") + require.NoError(t, err) + txTime, err := time.Parse(time.RFC3339, "2018-05-03T14:45:00Z") + require.NoError(t, err) + + testCases := []struct { + s string + tags map[string]interface{} + err bool + matches bool + }{ + {"tm.events.type=NewBlock", map[string]interface{}{"tm.events.type": "NewBlock"}, false, true}, + + {"tx.gas > 7", map[string]interface{}{"tx.gas": 8}, false, true}, + {"tx.gas > 7 AND tx.gas < 9", map[string]interface{}{"tx.gas": 8}, false, true}, + {"body.weight >= 3.5", map[string]interface{}{"body.weight": 3.5}, false, true}, + {"account.balance < 1000.0", map[string]interface{}{"account.balance": 900}, false, true}, + {"apples.kg <= 4", map[string]interface{}{"apples.kg": 4.0}, false, true}, + {"body.weight >= 4.5", map[string]interface{}{"body.weight": float32(4.5)}, false, true}, + {"oranges.kg < 4 AND watermellons.kg > 10", map[string]interface{}{"oranges.kg": 3, "watermellons.kg": 12}, false, true}, + {"peaches.kg < 4", map[string]interface{}{"peaches.kg": 5}, false, false}, + + {"tx.date > DATE 2017-01-01", map[string]interface{}{"tx.date": time.Now()}, false, true}, + {"tx.date = DATE 2017-01-01", map[string]interface{}{"tx.date": txDate}, false, true}, + {"tx.date = DATE 2018-01-01", map[string]interface{}{"tx.date": txDate}, false, false}, + + {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": time.Now()}, false, true}, + {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": txTime}, false, false}, + + {"abci.owner.name CONTAINS Igor", map[string]interface{}{"abci.owner.name": "Igor,Ivan"}, false, true}, + {"abci.owner.name CONTAINS Igor", map[string]interface{}{"abci.owner.name": "Pavel,Ivan"}, false, false}, + } + + for _, tc := range testCases { + query, err := query.New(tc.s) + if !tc.err { + require.Nil(t, err) + } + + if tc.matches { + assert.True(t, query.Matches(tc.tags), "Query '%s' should match %v", tc.s, tc.tags) + } else { + assert.False(t, query.Matches(tc.tags), "Query '%s' should not match %v", tc.s, tc.tags) + } + } +} + +func TestMustParse(t *testing.T) { + assert.Panics(t, func() { query.MustParse("=") }) + assert.NotPanics(t, func() { query.MustParse("tm.events.type=NewBlock") }) +} From 8062ade7876d1d82b7385c09a3cdb6aa0b7ba490 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 12 Jul 2017 13:10:36 +0300 Subject: [PATCH 184/515] remove all clients (including closing all channels) on shutdown --- pubsub/pubsub.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 264848161..ae642a4fb 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -216,7 +216,9 @@ loop: state.removeAll(cmd.clientID) } case shutdown: - state.reset() + for clientID, _ := range state.clients { + state.removeAll(clientID) + } break loop case sub: state.add(cmd.clientID, cmd.query, cmd.ch) @@ -286,11 +288,6 @@ func (state *state) removeAll(clientID string) { delete(state.clients, clientID) } -func (state *state) reset() { - state.queries = make(map[Query]map[string]chan<- interface{}) - state.clients = make(map[string]map[Query]struct{}) -} - func (state *state) send(msg interface{}, tags map[string]interface{}, slowClientStrategy overflowStrategy, logger log.Logger) { for q, clientToChannelMap := range state.queries { // NOTE we can use LRU cache to speed up common cases like query = " From e4f3f9d9bf327083eba26afa2b85ff09189856c3 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 12 Jul 2017 22:52:13 +0300 Subject: [PATCH 185/515] remove comment about LRU cache (see comments below) I've tried https://github.com/hashicorp/golang-lru/tree/master/simplelru today and here are the results: with LRU cache: ``` Benchmark10Clients-2 50000 29021 ns/op 3976 B/op 105 allocs/op Benchmark100Clients-2 3000 363432 ns/op 36382 B/op 1005 allocs/op Benchmark1000Clients-2 500 2473752 ns/op 360500 B/op 10009 allocs/op Benchmark10ClientsUsingTheSameQuery-2 300000 4059 ns/op 773 B/op 15 allocs/op Benchmark100ClientsUsingTheSameQuery-2 500000 4360 ns/op 773 B/op 15 allocs/op Benchmark1000ClientsUsingTheSameQuery-2 300000 4204 ns/op 773 B/op 15 allocs/op ``` without LRU cache: ``` Benchmark10Clients-2 200000 5267 ns/op 616 B/op 25 allocs/op Benchmark100Clients-2 30000 42134 ns/op 2776 B/op 205 allocs/op Benchmark1000Clients-2 3000 552648 ns/op 24376 B/op 2005 allocs/op Benchmark10ClientsOneQuery-2 1000000 2127 ns/op 462 B/op 9 allocs/op Benchmark100ClientsOneQuery-2 500000 2353 ns/op 462 B/op 9 allocs/op Benchmark1000ClientsOneQuery-2 500000 2339 ns/op 462 B/op 9 allocs/op ``` > How were you using the lru cache exactly? I was adding a KV pair each time there is a match plus checking if `lru.Contains(key)` before running the actual check (`q.Matches(tags)`). ``` key = fmt.Sprintf("%s/%v", query + tags) ``` --- pubsub/pubsub.go | 3 --- pubsub/pubsub_test.go | 32 +++++++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index ae642a4fb..007f93f32 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -290,11 +290,8 @@ func (state *state) removeAll(clientID string) { func (state *state) send(msg interface{}, tags map[string]interface{}, slowClientStrategy overflowStrategy, logger log.Logger) { for q, clientToChannelMap := range state.queries { - // NOTE we can use LRU cache to speed up common cases like query = " - // tm.events.type=NewBlock" and tags = {"tm.events.type": "NewBlock"} if q.Matches(tags) { for clientID, ch := range clientToChannelMap { - logger.Info("Sending message to client", "msg", msg, "client", clientID) switch slowClientStrategy { case drop: select { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 570f76a82..3112ab5d3 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -193,13 +193,21 @@ func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } +func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } +func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } +func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } + func benchmarkNClients(n int, b *testing.B) { - s := pubsub.NewServer(pubsub.BufferCapacity(b.N)) + s := pubsub.NewServer() s.Start() defer s.Stop() for i := 0; i < n; i++ { ch := make(chan interface{}) + go func() { + for range ch { + } + }() s.Subscribe(clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = Ivan AND abci.Invoices.Number = %d", i)), ch) } @@ -210,6 +218,28 @@ func benchmarkNClients(n int, b *testing.B) { } } +func benchmarkNClientsOneQuery(n int, b *testing.B) { + s := pubsub.NewServer() + s.Start() + defer s.Stop() + + q := query.MustParse("abci.Account.Owner = Ivan AND abci.Invoices.Number = 1") + for i := 0; i < n; i++ { + ch := make(chan interface{}) + go func() { + for range ch { + } + }() + s.Subscribe(clientID, q, ch) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.PublishWithTags("Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1}) + } +} + /////////////////////////////////////////////////////////////////////////////// /// HELPERS /////////////////////////////////////////////////////////////////////////////// From 4aa024d843b081977304c0184e8c56c05d22c32f Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 12 Jul 2017 23:10:36 +0300 Subject: [PATCH 186/515] add more info to error messages --- pubsub/pubsub.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 007f93f32..aec60bcc0 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -26,6 +26,7 @@ package pubsub import ( "errors" + "fmt" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/log" @@ -174,7 +175,7 @@ func (s *Server) PublishWithTags(msg interface{}, tags map[string]interface{}) e select { case s.cmds <- pubCmd: default: - s.Logger.Error("Server overflowed, dropping message...", "msg", msg) + s.Logger.Error("Server overflowed, dropping message...", "msg", msg, "tags", fmt.Sprintf("%v", tags)) return ErrorOverflow } case wait: @@ -297,7 +298,7 @@ func (state *state) send(msg interface{}, tags map[string]interface{}, slowClien select { case ch <- msg: default: - logger.Error("Client is busy, skipping...", "clientID", clientID) + logger.Error("Wanted to send a message, but the client is busy", "msg", msg, "tags", fmt.Sprintf("%v", tags), "clientID", clientID) } case wait: ch <- msg From 13207a5927e21d96afb9a9520f5a1a7b42b323bb Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 14 Jul 2017 12:32:01 +0300 Subject: [PATCH 187/515] remove overflow options --- pubsub/pubsub.go | 91 ++++--------------------------------------- pubsub/pubsub_test.go | 53 ------------------------- 2 files changed, 8 insertions(+), 136 deletions(-) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index aec60bcc0..2b1a569c7 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -10,26 +10,13 @@ // match, this message will be pushed to all clients, subscribed to that query. // See query subpackage for our implementation. // -// Overflow strategies (incoming publish requests): -// -// 1) drop - drops publish requests when there are too many of them -// 2) wait - blocks until the server is ready to accept more publish requests (default) -// // Subscribe/Unsubscribe calls are always blocking. -// -// Overflow strategies (outgoing messages): -// -// 1) skip - do not send a message if the client is busy or slow (default) -// 2) wait - wait until the client is ready to accept new messages -// package pubsub import ( "errors" - "fmt" cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" ) type operation int @@ -41,13 +28,6 @@ const ( shutdown ) -type overflowStrategy int - -const ( - drop overflowStrategy = iota - wait -) - var ( ErrorOverflow = errors.New("Server overflowed") ) @@ -72,20 +52,16 @@ type Server struct { cmn.BaseService cmds chan cmd - - overflowStrategy overflowStrategy - slowClientStrategy overflowStrategy } // Option sets a parameter for the server. type Option func(*Server) // NewServer returns a new server. See the commentary on the Option functions -// for a detailed description of how to configure buffering and overflow -// behavior. If no options are provided, the resulting server's queue is -// unbuffered and it blocks when overflowed. +// for a detailed description of how to configure buffering. If no options are +// provided, the resulting server's queue is unbuffered. func NewServer(options ...Option) *Server { - s := &Server{overflowStrategy: wait, slowClientStrategy: drop} + s := &Server{} s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) for _, option := range options { @@ -111,38 +87,6 @@ func BufferCapacity(cap int) Option { } } -// OverflowStrategyDrop will tell the server to drop messages when it can't -// process more messages. -func OverflowStrategyDrop() Option { - return func(s *Server) { - s.overflowStrategy = drop - } -} - -// OverflowStrategyWait will tell the server to block and wait for some time -// for server to process other messages. Default strategy. -func OverflowStrategyWait() func(*Server) { - return func(s *Server) { - s.overflowStrategy = wait - } -} - -// WaitSlowClients will tell the server to block and wait until subscriber -// reads a messages even if it is fast enough to process them. -func WaitSlowClients() func(*Server) { - return func(s *Server) { - s.slowClientStrategy = wait - } -} - -// SkipSlowClients will tell the server to skip subscriber if it is busy -// processing previous message(s). Default strategy. -func SkipSlowClients() func(*Server) { - return func(s *Server) { - s.slowClientStrategy = drop - } -} - // Subscribe returns a channel on which messages matching the given query can // be received. If the subscription already exists old channel will be closed // and new one returned. @@ -170,17 +114,7 @@ func (s *Server) Publish(msg interface{}) error { // will be sent to a client. func (s *Server) PublishWithTags(msg interface{}, tags map[string]interface{}) error { pubCmd := cmd{op: pub, msg: msg, tags: tags} - switch s.overflowStrategy { - case drop: - select { - case s.cmds <- pubCmd: - default: - s.Logger.Error("Server overflowed, dropping message...", "msg", msg, "tags", fmt.Sprintf("%v", tags)) - return ErrorOverflow - } - case wait: - s.cmds <- pubCmd - } + s.cmds <- pubCmd return nil } @@ -224,7 +158,7 @@ loop: case sub: state.add(cmd.clientID, cmd.query, cmd.ch) case pub: - state.send(cmd.msg, cmd.tags, s.slowClientStrategy, s.Logger) + state.send(cmd.msg, cmd.tags) } } } @@ -289,20 +223,11 @@ func (state *state) removeAll(clientID string) { delete(state.clients, clientID) } -func (state *state) send(msg interface{}, tags map[string]interface{}, slowClientStrategy overflowStrategy, logger log.Logger) { +func (state *state) send(msg interface{}, tags map[string]interface{}) { for q, clientToChannelMap := range state.queries { if q.Matches(tags) { - for clientID, ch := range clientToChannelMap { - switch slowClientStrategy { - case drop: - select { - case ch <- msg: - default: - logger.Error("Wanted to send a message, but the client is busy", "msg", msg, "tags", fmt.Sprintf("%v", tags), "clientID", clientID) - } - case wait: - ch <- msg - } + for _, ch := range clientToChannelMap { + ch <- msg } } } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 3112ab5d3..7cc4e5998 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -126,30 +126,6 @@ func TestUnsubscribeAll(t *testing.T) { assert.False(t, ok) } -func TestOverflowStrategyDrop(t *testing.T) { - s := pubsub.NewServer(pubsub.OverflowStrategyDrop()) - s.SetLogger(log.TestingLogger()) - - err := s.Publish("Veda") - if assert.Error(t, err) { - assert.Equal(t, pubsub.ErrorOverflow, err) - } -} - -func TestOverflowStrategyWait(t *testing.T) { - s := pubsub.NewServer(pubsub.OverflowStrategyWait()) - s.SetLogger(log.TestingLogger()) - - go func() { - time.Sleep(1 * time.Second) - s.Start() - defer s.Stop() - }() - - err := s.Publish("Veda") - assert.NoError(t, err) -} - func TestBufferCapacity(t *testing.T) { s := pubsub.NewServer(pubsub.BufferCapacity(2)) s.SetLogger(log.TestingLogger()) @@ -160,35 +136,6 @@ func TestBufferCapacity(t *testing.T) { require.NoError(t, err) } -func TestWaitSlowClients(t *testing.T) { - s := pubsub.NewServer(pubsub.WaitSlowClients()) - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ch := make(chan interface{}) - s.Subscribe(clientID, query.Empty{}, ch) - err := s.Publish("Wonderwoman") - require.NoError(t, err) - - time.Sleep(1 * time.Second) - - assertReceive(t, "Wonderwoman", ch) -} - -func TestSkipSlowClients(t *testing.T) { - s := pubsub.NewServer(pubsub.SkipSlowClients()) - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ch := make(chan interface{}) - s.Subscribe(clientID, query.Empty{}, ch) - err := s.Publish("Cyclops") - require.NoError(t, err) - assert.Zero(t, len(ch)) -} - func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } From 0006bfc359e2b50a6a083ea750e2b1368477fcbc Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 14 Jul 2017 13:02:32 +0300 Subject: [PATCH 188/515] return ErrorOverflow on Subscribe if server is overflowed > why we need it? most of our subscribers will be RPC WS subscribers, so if there are too many, nothing wrong with rejecting to subscribe. however, consensus reactor must be the first to subscribe, since its work depends on the pubsub package. --- pubsub/example_test.go | 4 +-- pubsub/pubsub.go | 50 +++++++++++++++++++++------------- pubsub/pubsub_test.go | 61 ++++++++++++++++++++++++------------------ 3 files changed, 68 insertions(+), 47 deletions(-) diff --git a/pubsub/example_test.go b/pubsub/example_test.go index d64b96eab..38026ccd6 100644 --- a/pubsub/example_test.go +++ b/pubsub/example_test.go @@ -17,8 +17,8 @@ func TestExample(t *testing.T) { defer s.Stop() ch := make(chan interface{}, 1) - s.Subscribe("example-client", query.MustParse("abci.account.name=John"), ch) - err := s.PublishWithTags("Tombstone", map[string]interface{}{"abci.account.name": "John"}) + err := s.Subscribe("example-client", query.MustParse("abci.account.name=John"), ch) require.NoError(t, err) + s.PublishWithTags("Tombstone", map[string]interface{}{"abci.account.name": "John"}) assertReceive(t, "Tombstone", ch) } diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 2b1a569c7..9ac260c93 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -15,6 +15,7 @@ package pubsub import ( "errors" + "time" cmn "github.com/tendermint/tmlibs/common" ) @@ -28,8 +29,10 @@ const ( shutdown ) +const subscribeTimeout = 10 * time.Millisecond + var ( - ErrorOverflow = errors.New("Server overflowed") + ErrorOverflow = errors.New("server overflowed") ) type cmd struct { @@ -51,7 +54,8 @@ type Query interface { type Server struct { cmn.BaseService - cmds chan cmd + cmds chan cmd + cmdsCap int } // Option sets a parameter for the server. @@ -68,9 +72,8 @@ func NewServer(options ...Option) *Server { option(s) } - if s.cmds == nil { // if BufferCapacity was not set, create unbuffered channel - s.cmds = make(chan cmd) - } + // if BufferCapacity option was not set, the channel is unbuffered + s.cmds = make(chan cmd, s.cmdsCap) return s } @@ -82,40 +85,49 @@ func NewServer(options ...Option) *Server { func BufferCapacity(cap int) Option { return func(s *Server) { if cap > 0 { - s.cmds = make(chan cmd, cap) + s.cmdsCap = cap } } } +// Returns capacity of the internal server's queue. +func (s Server) BufferCapacity() int { + return s.cmdsCap +} + // Subscribe returns a channel on which messages matching the given query can // be received. If the subscription already exists old channel will be closed -// and new one returned. -func (s *Server) Subscribe(clientID string, query Query, out chan<- interface{}) { - s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out} +// and new one returned. Error will be returned to the caller if the server is +// overflowed. +func (s *Server) Subscribe(clientID string, query Query, out chan<- interface{}) error { + select { + case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: + return nil + case <-time.After(subscribeTimeout): + return ErrorOverflow + } } -// Unsubscribe unsubscribes the given client from the query. +// Unsubscribe unsubscribes the given client from the query. Blocking. func (s *Server) Unsubscribe(clientID string, query Query) { s.cmds <- cmd{op: unsub, clientID: clientID, query: query} } -// Unsubscribe unsubscribes the given channel. +// Unsubscribe unsubscribes the given channel. Blocking. func (s *Server) UnsubscribeAll(clientID string) { s.cmds <- cmd{op: unsub, clientID: clientID} } -// Publish publishes the given message. -func (s *Server) Publish(msg interface{}) error { - return s.PublishWithTags(msg, make(map[string]interface{})) +// Publish publishes the given message. Blocking. +func (s *Server) Publish(msg interface{}) { + s.PublishWithTags(msg, make(map[string]interface{})) } // PublishWithTags publishes the given message with a set of tags. This set of // tags will be matched with client queries. If there is a match, the message -// will be sent to a client. -func (s *Server) PublishWithTags(msg interface{}, tags map[string]interface{}) error { - pubCmd := cmd{op: pub, msg: msg, tags: tags} - s.cmds <- pubCmd - return nil +// will be sent to a client. Blocking. +func (s *Server) PublishWithTags(msg interface{}, tags map[string]interface{}) { + s.cmds <- cmd{op: pub, msg: msg, tags: tags} } // OnStop implements Service.OnStop by shutting down the server. diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 7cc4e5998..fb15b3489 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -25,13 +25,12 @@ func TestSubscribe(t *testing.T) { defer s.Stop() ch := make(chan interface{}, 1) - s.Subscribe(clientID, query.Empty{}, ch) - err := s.Publish("Ka-Zar") + err := s.Subscribe(clientID, query.Empty{}, ch) require.NoError(t, err) + s.Publish("Ka-Zar") assertReceive(t, "Ka-Zar", ch) - err = s.Publish("Quicksilver") - require.NoError(t, err) + s.Publish("Quicksilver") assertReceive(t, "Quicksilver", ch) } @@ -41,22 +40,22 @@ func TestDifferentClients(t *testing.T) { s.Start() defer s.Stop() ch1 := make(chan interface{}, 1) - s.Subscribe("client-1", query.MustParse("tm.events.type=NewBlock"), ch1) - err := s.PublishWithTags("Iceman", map[string]interface{}{"tm.events.type": "NewBlock"}) + err := s.Subscribe("client-1", query.MustParse("tm.events.type=NewBlock"), ch1) require.NoError(t, err) + s.PublishWithTags("Iceman", map[string]interface{}{"tm.events.type": "NewBlock"}) assertReceive(t, "Iceman", ch1) ch2 := make(chan interface{}, 1) - s.Subscribe("client-2", query.MustParse("tm.events.type=NewBlock AND abci.account.name=Igor"), ch2) - err = s.PublishWithTags("Ultimo", map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) + err = s.Subscribe("client-2", query.MustParse("tm.events.type=NewBlock AND abci.account.name=Igor"), ch2) require.NoError(t, err) + s.PublishWithTags("Ultimo", map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) assertReceive(t, "Ultimo", ch1) assertReceive(t, "Ultimo", ch2) ch3 := make(chan interface{}, 1) - s.Subscribe("client-3", query.MustParse("tm.events.type=NewRoundStep AND abci.account.name=Igor AND abci.invoice.number = 10"), ch3) - err = s.PublishWithTags("Valeria Richards", map[string]interface{}{"tm.events.type": "NewRoundStep"}) + err = s.Subscribe("client-3", query.MustParse("tm.events.type=NewRoundStep AND abci.account.name=Igor AND abci.invoice.number = 10"), ch3) require.NoError(t, err) + s.PublishWithTags("Valeria Richards", map[string]interface{}{"tm.events.type": "NewRoundStep"}) assert.Zero(t, len(ch3)) } @@ -69,19 +68,19 @@ func TestClientResubscribes(t *testing.T) { q := query.MustParse("tm.events.type=NewBlock") ch1 := make(chan interface{}, 1) - s.Subscribe(clientID, q, ch1) - err := s.PublishWithTags("Goblin Queen", map[string]interface{}{"tm.events.type": "NewBlock"}) + err := s.Subscribe(clientID, q, ch1) require.NoError(t, err) + s.PublishWithTags("Goblin Queen", map[string]interface{}{"tm.events.type": "NewBlock"}) assertReceive(t, "Goblin Queen", ch1) ch2 := make(chan interface{}, 1) - s.Subscribe(clientID, q, ch2) + err = s.Subscribe(clientID, q, ch2) + require.NoError(t, err) _, ok := <-ch1 assert.False(t, ok) - err = s.PublishWithTags("Spider-Man", map[string]interface{}{"tm.events.type": "NewBlock"}) - require.NoError(t, err) + s.PublishWithTags("Spider-Man", map[string]interface{}{"tm.events.type": "NewBlock"}) assertReceive(t, "Spider-Man", ch2) } @@ -92,11 +91,11 @@ func TestUnsubscribe(t *testing.T) { defer s.Stop() ch := make(chan interface{}) - s.Subscribe(clientID, query.Empty{}, ch) + err := s.Subscribe(clientID, query.Empty{}, ch) + require.NoError(t, err) s.Unsubscribe(clientID, query.Empty{}) - err := s.Publish("Nick Fury") - require.NoError(t, err) + s.Publish("Nick Fury") assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe") _, ok := <-ch @@ -110,13 +109,14 @@ func TestUnsubscribeAll(t *testing.T) { defer s.Stop() ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) - s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlock"), ch1) - s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlockHeader"), ch2) + err := s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlock"), ch1) + require.NoError(t, err) + err = s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlockHeader"), ch2) + require.NoError(t, err) s.UnsubscribeAll(clientID) - err := s.Publish("Nick Fury") - require.NoError(t, err) + s.Publish("Nick Fury") assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll") assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll") @@ -130,10 +130,19 @@ func TestBufferCapacity(t *testing.T) { s := pubsub.NewServer(pubsub.BufferCapacity(2)) s.SetLogger(log.TestingLogger()) - err := s.Publish("Nighthawk") - require.NoError(t, err) - err = s.Publish("Sage") - require.NoError(t, err) + s.Publish("Nighthawk") + s.Publish("Sage") +} + +func TestSubscribeReturnsErrorIfServerOverflowed(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + + ch := make(chan interface{}, 1) + err := s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlock"), ch) + if assert.Error(t, err) { + assert.Equal(t, pubsub.ErrorOverflow, err) + } } func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } From e664f9c68861299060174ba348ad64a6854551d3 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 14 Jul 2017 14:49:25 +0300 Subject: [PATCH 189/515] use context to provide timeouts! --- pubsub/example_test.go | 7 +++- pubsub/pubsub.go | 53 +++++++++++++++----------- pubsub/pubsub_test.go | 86 ++++++++++++++++++++++++------------------ 3 files changed, 85 insertions(+), 61 deletions(-) diff --git a/pubsub/example_test.go b/pubsub/example_test.go index 38026ccd6..6597c858d 100644 --- a/pubsub/example_test.go +++ b/pubsub/example_test.go @@ -1,6 +1,7 @@ package pubsub_test import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -16,9 +17,11 @@ func TestExample(t *testing.T) { s.Start() defer s.Stop() + ctx := context.Background() ch := make(chan interface{}, 1) - err := s.Subscribe("example-client", query.MustParse("abci.account.name=John"), ch) + err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name=John"), ch) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Tombstone", map[string]interface{}{"abci.account.name": "John"}) require.NoError(t, err) - s.PublishWithTags("Tombstone", map[string]interface{}{"abci.account.name": "John"}) assertReceive(t, "Tombstone", ch) } diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 9ac260c93..34df86a45 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -14,8 +14,7 @@ package pubsub import ( - "errors" - "time" + "context" cmn "github.com/tendermint/tmlibs/common" ) @@ -29,12 +28,6 @@ const ( shutdown ) -const subscribeTimeout = 10 * time.Millisecond - -var ( - ErrorOverflow = errors.New("server overflowed") -) - type cmd struct { op operation query Query @@ -97,37 +90,53 @@ func (s Server) BufferCapacity() int { // Subscribe returns a channel on which messages matching the given query can // be received. If the subscription already exists old channel will be closed -// and new one returned. Error will be returned to the caller if the server is -// overflowed. -func (s *Server) Subscribe(clientID string, query Query, out chan<- interface{}) error { +// and new one returned. Error will be returned to the caller if the context is +// cancelled. +func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error { select { case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: return nil - case <-time.After(subscribeTimeout): - return ErrorOverflow + case <-ctx.Done(): + return ctx.Err() } } -// Unsubscribe unsubscribes the given client from the query. Blocking. -func (s *Server) Unsubscribe(clientID string, query Query) { - s.cmds <- cmd{op: unsub, clientID: clientID, query: query} +// Unsubscribe unsubscribes the given client from the query. Error will be +// returned to the caller if the context is cancelled. +func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { + select { + case s.cmds <- cmd{op: unsub, clientID: clientID, query: query}: + return nil + case <-ctx.Done(): + return ctx.Err() + } } // Unsubscribe unsubscribes the given channel. Blocking. -func (s *Server) UnsubscribeAll(clientID string) { - s.cmds <- cmd{op: unsub, clientID: clientID} +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + select { + case s.cmds <- cmd{op: unsub, clientID: clientID}: + return nil + case <-ctx.Done(): + return ctx.Err() + } } // Publish publishes the given message. Blocking. -func (s *Server) Publish(msg interface{}) { - s.PublishWithTags(msg, make(map[string]interface{})) +func (s *Server) Publish(ctx context.Context, msg interface{}) error { + return s.PublishWithTags(ctx, msg, make(map[string]interface{})) } // PublishWithTags publishes the given message with a set of tags. This set of // tags will be matched with client queries. If there is a match, the message // will be sent to a client. Blocking. -func (s *Server) PublishWithTags(msg interface{}, tags map[string]interface{}) { - s.cmds <- cmd{op: pub, msg: msg, tags: tags} +func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags map[string]interface{}) error { + select { + case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: + return nil + case <-ctx.Done(): + return ctx.Err() + } } // OnStop implements Service.OnStop by shutting down the server. diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index fb15b3489..9c9841440 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -1,6 +1,7 @@ package pubsub_test import ( + "context" "fmt" "runtime/debug" "testing" @@ -24,13 +25,16 @@ func TestSubscribe(t *testing.T) { s.Start() defer s.Stop() + ctx := context.Background() ch := make(chan interface{}, 1) - err := s.Subscribe(clientID, query.Empty{}, ch) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + err = s.Publish(ctx, "Ka-Zar") require.NoError(t, err) - s.Publish("Ka-Zar") assertReceive(t, "Ka-Zar", ch) - s.Publish("Quicksilver") + err = s.Publish(ctx, "Quicksilver") + require.NoError(t, err) assertReceive(t, "Quicksilver", ch) } @@ -39,23 +43,28 @@ func TestDifferentClients(t *testing.T) { s.SetLogger(log.TestingLogger()) s.Start() defer s.Stop() + + ctx := context.Background() ch1 := make(chan interface{}, 1) - err := s.Subscribe("client-1", query.MustParse("tm.events.type=NewBlock"), ch1) + err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type=NewBlock"), ch1) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Iceman", map[string]interface{}{"tm.events.type": "NewBlock"}) require.NoError(t, err) - s.PublishWithTags("Iceman", map[string]interface{}{"tm.events.type": "NewBlock"}) assertReceive(t, "Iceman", ch1) ch2 := make(chan interface{}, 1) - err = s.Subscribe("client-2", query.MustParse("tm.events.type=NewBlock AND abci.account.name=Igor"), ch2) + err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type=NewBlock AND abci.account.name=Igor"), ch2) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Ultimo", map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) require.NoError(t, err) - s.PublishWithTags("Ultimo", map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) assertReceive(t, "Ultimo", ch1) assertReceive(t, "Ultimo", ch2) ch3 := make(chan interface{}, 1) - err = s.Subscribe("client-3", query.MustParse("tm.events.type=NewRoundStep AND abci.account.name=Igor AND abci.invoice.number = 10"), ch3) + err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type=NewRoundStep AND abci.account.name=Igor AND abci.invoice.number = 10"), ch3) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Valeria Richards", map[string]interface{}{"tm.events.type": "NewRoundStep"}) require.NoError(t, err) - s.PublishWithTags("Valeria Richards", map[string]interface{}{"tm.events.type": "NewRoundStep"}) assert.Zero(t, len(ch3)) } @@ -65,22 +74,25 @@ func TestClientResubscribes(t *testing.T) { s.Start() defer s.Stop() + ctx := context.Background() q := query.MustParse("tm.events.type=NewBlock") ch1 := make(chan interface{}, 1) - err := s.Subscribe(clientID, q, ch1) + err := s.Subscribe(ctx, clientID, q, ch1) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Goblin Queen", map[string]interface{}{"tm.events.type": "NewBlock"}) require.NoError(t, err) - s.PublishWithTags("Goblin Queen", map[string]interface{}{"tm.events.type": "NewBlock"}) assertReceive(t, "Goblin Queen", ch1) ch2 := make(chan interface{}, 1) - err = s.Subscribe(clientID, q, ch2) + err = s.Subscribe(ctx, clientID, q, ch2) require.NoError(t, err) _, ok := <-ch1 assert.False(t, ok) - s.PublishWithTags("Spider-Man", map[string]interface{}{"tm.events.type": "NewBlock"}) + err = s.PublishWithTags(ctx, "Spider-Man", map[string]interface{}{"tm.events.type": "NewBlock"}) + require.NoError(t, err) assertReceive(t, "Spider-Man", ch2) } @@ -90,12 +102,15 @@ func TestUnsubscribe(t *testing.T) { s.Start() defer s.Stop() + ctx := context.Background() ch := make(chan interface{}) - err := s.Subscribe(clientID, query.Empty{}, ch) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) - s.Unsubscribe(clientID, query.Empty{}) - s.Publish("Nick Fury") + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe") _, ok := <-ch @@ -108,15 +123,18 @@ func TestUnsubscribeAll(t *testing.T) { s.Start() defer s.Stop() + ctx := context.Background() ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) - err := s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlock"), ch1) + err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type=NewBlock"), ch1) require.NoError(t, err) - err = s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlockHeader"), ch2) + err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type=NewBlockHeader"), ch2) require.NoError(t, err) - s.UnsubscribeAll(clientID) + err = s.UnsubscribeAll(ctx, clientID) + require.NoError(t, err) - s.Publish("Nick Fury") + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll") assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll") @@ -130,19 +148,11 @@ func TestBufferCapacity(t *testing.T) { s := pubsub.NewServer(pubsub.BufferCapacity(2)) s.SetLogger(log.TestingLogger()) - s.Publish("Nighthawk") - s.Publish("Sage") -} - -func TestSubscribeReturnsErrorIfServerOverflowed(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - ch := make(chan interface{}, 1) - err := s.Subscribe(clientID, query.MustParse("tm.events.type=NewBlock"), ch) - if assert.Error(t, err) { - assert.Equal(t, pubsub.ErrorOverflow, err) - } + ctx := context.Background() + err := s.Publish(ctx, "Nighthawk") + require.NoError(t, err) + err = s.Publish(ctx, "Sage") + require.NoError(t, err) } func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } @@ -158,19 +168,20 @@ func benchmarkNClients(n int, b *testing.B) { s.Start() defer s.Stop() + ctx := context.Background() for i := 0; i < n; i++ { ch := make(chan interface{}) go func() { for range ch { } }() - s.Subscribe(clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = Ivan AND abci.Invoices.Number = %d", i)), ch) + s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = Ivan AND abci.Invoices.Number = %d", i)), ch) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags("Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i}) + s.PublishWithTags(ctx, "Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i}) } } @@ -179,6 +190,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { s.Start() defer s.Stop() + ctx := context.Background() q := query.MustParse("abci.Account.Owner = Ivan AND abci.Invoices.Number = 1") for i := 0; i < n; i++ { ch := make(chan interface{}) @@ -186,13 +198,13 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { for range ch { } }() - s.Subscribe(clientID, q, ch) + s.Subscribe(ctx, clientID, q, ch) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags("Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1}) + s.PublishWithTags(ctx, "Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1}) } } From 17d6091ef42305e0a32a119e815f961924b64de2 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 15 Jul 2017 13:33:47 +0300 Subject: [PATCH 190/515] updates as per Bucky's comments --- pubsub/pubsub.go | 38 ++++++++++++++++++-------------------- pubsub/pubsub_test.go | 7 +++++++ 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 34df86a45..f5df418a8 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -9,8 +9,6 @@ // When some message is published, we match it with all queries. If there is a // match, this message will be pushed to all clients, subscribed to that query. // See query subpackage for our implementation. -// -// Subscribe/Unsubscribe calls are always blocking. package pubsub import ( @@ -42,7 +40,7 @@ type Query interface { Matches(tags map[string]interface{}) bool } -// Server allows clients to subscribe/unsubscribe for messages, pubsling +// Server allows clients to subscribe/unsubscribe for messages, publishing // messages with or without tags, and manages internal state. type Server struct { cmn.BaseService @@ -83,15 +81,15 @@ func BufferCapacity(cap int) Option { } } -// Returns capacity of the internal server's queue. +// BufferCapacity returns capacity of the internal server's queue. func (s Server) BufferCapacity() int { return s.cmdsCap } -// Subscribe returns a channel on which messages matching the given query can -// be received. If the subscription already exists old channel will be closed -// and new one returned. Error will be returned to the caller if the context is -// cancelled. +// Subscribe creates a subscription for the given client. It accepts a channel +// on which messages matching the given query can be received. If the +// subscription already exists, the old channel will be closed. An error will +// be returned to the caller if the context is canceled. func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error { select { case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: @@ -101,8 +99,8 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou } } -// Unsubscribe unsubscribes the given client from the query. Error will be -// returned to the caller if the context is cancelled. +// Unsubscribe removes the subscription on the given query. An error will be +// returned to the caller if the context is canceled. func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { select { case s.cmds <- cmd{op: unsub, clientID: clientID, query: query}: @@ -112,7 +110,8 @@ func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) } } -// Unsubscribe unsubscribes the given channel. Blocking. +// Unsubscribe removes all client subscriptions. An error will be returned to +// the caller if the context is canceled. func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { select { case s.cmds <- cmd{op: unsub, clientID: clientID}: @@ -122,14 +121,15 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { } } -// Publish publishes the given message. Blocking. +// Publish publishes the given message. An error will be returned to the caller +// if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { return s.PublishWithTags(ctx, msg, make(map[string]interface{})) } -// PublishWithTags publishes the given message with a set of tags. This set of -// tags will be matched with client queries. If there is a match, the message -// will be sent to a client. Blocking. +// PublishWithTags publishes the given message with the set of tags. The set is +// matched with clients queries. If there is a match, the message is sent to +// the client. func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags map[string]interface{}) error { select { case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: @@ -152,7 +152,7 @@ type state struct { clients map[string]map[Query]struct{} } -// OnStart implements Service.OnStart by creating a main loop. +// OnStart implements Service.OnStart by starting the server. func (s *Server) OnStart() error { go s.loop(state{ queries: make(map[Query]map[string]chan<- interface{}), @@ -194,6 +194,8 @@ func (state *state) add(clientID string, q Query, ch chan<- interface{}) { close(oldCh) } } + + // create subscription state.queries[q][clientID] = ch // add client if needed @@ -201,10 +203,6 @@ func (state *state) add(clientID string, q Query, ch chan<- interface{}) { state.clients[clientID] = make(map[Query]struct{}) } state.clients[clientID][q] = struct{}{} - - // create subscription - clientToChannelMap := state.queries[q] - clientToChannelMap[clientID] = ch } func (state *state) remove(clientID string, q Query) { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 9c9841440..9d003cff0 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -153,6 +153,13 @@ func TestBufferCapacity(t *testing.T) { require.NoError(t, err) err = s.Publish(ctx, "Sage") require.NoError(t, err) + + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + err = s.Publish(ctx, "Ironclad") + if assert.Error(t, err) { + assert.Equal(t, context.DeadlineExceeded, err) + } } func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } From 992c54253f14b1d76766403bb2ca25ede71d5022 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 18 Jul 2017 11:47:30 +0300 Subject: [PATCH 191/515] fixes from gometalinter review --- pubsub/pubsub.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index f5df418a8..52b8361f8 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -110,8 +110,8 @@ func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) } } -// Unsubscribe removes all client subscriptions. An error will be returned to -// the caller if the context is canceled. +// UnsubscribeAll removes all client subscriptions. An error will be returned +// to the caller if the context is canceled. func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { select { case s.cmds <- cmd{op: unsub, clientID: clientID}: @@ -172,7 +172,7 @@ loop: state.removeAll(cmd.clientID) } case shutdown: - for clientID, _ := range state.clients { + for clientID := range state.clients { state.removeAll(clientID) } break loop @@ -232,7 +232,7 @@ func (state *state) removeAll(clientID string) { return } - for q, _ := range queryMap { + for q := range queryMap { ch := state.queries[q][clientID] close(ch) From 3c6c1b7d334f54b70eb44c39a69e95a4b10ad1dc Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 19 Jul 2017 15:02:04 -0400 Subject: [PATCH 192/515] common: ProtocolAndAddress --- common/net.go | 18 +++++++++++++++--- common/net_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 common/net_test.go diff --git a/common/net.go b/common/net.go index 2f9c9c8c2..bdbe38f79 100644 --- a/common/net.go +++ b/common/net.go @@ -5,10 +5,22 @@ import ( "strings" ) -// protoAddr: e.g. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" +// Connect dials the given address and returns a net.Conn. The protoAddr argument should be prefixed with the protocol, +// eg. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" func Connect(protoAddr string) (net.Conn, error) { - parts := strings.SplitN(protoAddr, "://", 2) - proto, address := parts[0], parts[1] + proto, address := ProtocolAndAddress(protoAddr) conn, err := net.Dial(proto, address) return conn, err } + +// ProtocolAndAddress splits an address into the protocol and address components. +// For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". +// If the address has no protocol prefix, the default is "tcp". +func ProtocolAndAddress(listenAddr string) (string, string) { + protocol, address := "tcp", listenAddr + parts := strings.SplitN(address, "://", 2) + if len(parts) == 2 { + protocol, address = parts[0], parts[1] + } + return protocol, address +} diff --git a/common/net_test.go b/common/net_test.go new file mode 100644 index 000000000..38d2ae82d --- /dev/null +++ b/common/net_test.go @@ -0,0 +1,38 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestProtocolAndAddress(t *testing.T) { + + cases := []struct { + fullAddr string + proto string + addr string + }{ + { + "tcp://mydomain:80", + "tcp", + "mydomain:80", + }, + { + "mydomain:80", + "tcp", + "mydomain:80", + }, + { + "unix://mydomain:80", + "unix", + "mydomain:80", + }, + } + + for _, c := range cases { + proto, addr := ProtocolAndAddress(c.fullAddr) + assert.Equal(t, proto, c.proto) + assert.Equal(t, addr, c.addr) + } +} From 77f6febb034e36424791a92848385bd420930a9b Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 20 Jul 2017 11:46:22 +0300 Subject: [PATCH 193/515] rename TestClientResubscribes to TestClientSubscribesTwice test UnsubscribeAll properly test BufferCapacity getter --- pubsub/pubsub_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 9d003cff0..85b4b1e4b 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -68,7 +68,7 @@ func TestDifferentClients(t *testing.T) { assert.Zero(t, len(ch3)) } -func TestClientResubscribes(t *testing.T) { +func TestClientSubscribesTwice(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) s.Start() @@ -125,9 +125,9 @@ func TestUnsubscribeAll(t *testing.T) { ctx := context.Background() ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type=NewBlock"), ch1) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch1) require.NoError(t, err) - err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type=NewBlockHeader"), ch2) + err = s.Subscribe(ctx, clientID, query.Empty{}, ch2) require.NoError(t, err) err = s.UnsubscribeAll(ctx, clientID) @@ -148,6 +148,8 @@ func TestBufferCapacity(t *testing.T) { s := pubsub.NewServer(pubsub.BufferCapacity(2)) s.SetLogger(log.TestingLogger()) + assert.Equal(t, 2, s.BufferCapacity()) + ctx := context.Background() err := s.Publish(ctx, "Nighthawk") require.NoError(t, err) From a6a06f820fa45aba2bac5f6648d0c8172d1a353a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 21 Jul 2017 13:09:58 +0300 Subject: [PATCH 194/515] [pubsub/query] quote values using single quotes This fixes the problem with base-16 encoded values which may start with digits: 015AB.... In such cases, the parser recognizes them as numbers but fails to parse because of the follow-up characters (AB). ``` failed to parse tm.events.type=Tx AND hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63: parse error near digit (line 1 symbol 31 - line 1 symbol 32): "6" ``` So, from now on we should quote any values. This seems to be the way Postgresql has chosen. --- pubsub/example_test.go | 2 +- pubsub/pubsub_test.go | 12 +- pubsub/query/parser_test.go | 33 +- pubsub/query/query.go | 5 +- pubsub/query/query.peg | 4 +- pubsub/query/query.peg.go | 772 +++++++++++++++--------------------- pubsub/query/query_test.go | 8 +- 7 files changed, 365 insertions(+), 471 deletions(-) diff --git a/pubsub/example_test.go b/pubsub/example_test.go index 6597c858d..3eda7d32d 100644 --- a/pubsub/example_test.go +++ b/pubsub/example_test.go @@ -19,7 +19,7 @@ func TestExample(t *testing.T) { ctx := context.Background() ch := make(chan interface{}, 1) - err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name=John"), ch) + err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch) require.NoError(t, err) err = s.PublishWithTags(ctx, "Tombstone", map[string]interface{}{"abci.account.name": "John"}) require.NoError(t, err) diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 85b4b1e4b..7bf7b41f7 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -46,14 +46,14 @@ func TestDifferentClients(t *testing.T) { ctx := context.Background() ch1 := make(chan interface{}, 1) - err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type=NewBlock"), ch1) + err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1) require.NoError(t, err) err = s.PublishWithTags(ctx, "Iceman", map[string]interface{}{"tm.events.type": "NewBlock"}) require.NoError(t, err) assertReceive(t, "Iceman", ch1) ch2 := make(chan interface{}, 1) - err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type=NewBlock AND abci.account.name=Igor"), ch2) + err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2) require.NoError(t, err) err = s.PublishWithTags(ctx, "Ultimo", map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) require.NoError(t, err) @@ -61,7 +61,7 @@ func TestDifferentClients(t *testing.T) { assertReceive(t, "Ultimo", ch2) ch3 := make(chan interface{}, 1) - err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type=NewRoundStep AND abci.account.name=Igor AND abci.invoice.number = 10"), ch3) + err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3) require.NoError(t, err) err = s.PublishWithTags(ctx, "Valeria Richards", map[string]interface{}{"tm.events.type": "NewRoundStep"}) require.NoError(t, err) @@ -75,7 +75,7 @@ func TestClientSubscribesTwice(t *testing.T) { defer s.Stop() ctx := context.Background() - q := query.MustParse("tm.events.type=NewBlock") + q := query.MustParse("tm.events.type='NewBlock'") ch1 := make(chan interface{}, 1) err := s.Subscribe(ctx, clientID, q, ch1) @@ -184,7 +184,7 @@ func benchmarkNClients(n int, b *testing.B) { for range ch { } }() - s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = Ivan AND abci.Invoices.Number = %d", i)), ch) + s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch) } b.ReportAllocs() @@ -200,7 +200,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { defer s.Stop() ctx := context.Background() - q := query.MustParse("abci.Account.Owner = Ivan AND abci.Invoices.Number = 1") + q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") for i := 0; i < n; i++ { ch := make(chan interface{}) go func() { diff --git a/pubsub/query/parser_test.go b/pubsub/query/parser_test.go index 194966664..d96ac0821 100644 --- a/pubsub/query/parser_test.go +++ b/pubsub/query/parser_test.go @@ -13,30 +13,37 @@ func TestParser(t *testing.T) { query string valid bool }{ - {"tm.events.type=NewBlock", true}, - {"tm.events.type = NewBlock", true}, - {"tm.events.type=TIME", true}, - {"tm.events.type=DATE", true}, + {"tm.events.type='NewBlock'", true}, + {"tm.events.type = 'NewBlock'", true}, + {"tm.events.name = ''", true}, + {"tm.events.type='TIME'", true}, + {"tm.events.type='DATE'", true}, + {"tm.events.type='='", true}, + {"tm.events.type='TIME", false}, + {"tm.events.type=TIME'", false}, {"tm.events.type==", false}, + {"tm.events.type=NewBlock", false}, {">==", false}, - {"tm.events.type NewBlock =", false}, - {"tm.events.type>NewBlock", false}, + {"tm.events.type 'NewBlock' =", false}, + {"tm.events.type>'NewBlock'", false}, {"", false}, {"=", false}, - {"=NewBlock", false}, + {"='NewBlock'", false}, {"tm.events.type=", false}, {"tm.events.typeNewBlock", false}, + {"tm.events.type'NewBlock'", false}, + {"'NewBlock'", false}, {"NewBlock", false}, {"", false}, - {"tm.events.type=NewBlock AND abci.account.name=Igor", true}, - {"tm.events.type=NewBlock AND", false}, - {"tm.events.type=NewBlock AN", false}, - {"tm.events.type=NewBlock AN tm.events.type=NewBlockHeader", false}, - {"AND tm.events.type=NewBlock ", false}, + {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, + {"tm.events.type='NewBlock' AND", false}, + {"tm.events.type='NewBlock' AN", false}, + {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, + {"AND tm.events.type='NewBlock' ", false}, - {"abci.account.name CONTAINS Igor", true}, + {"abci.account.name CONTAINS 'Igor'", true}, {"tx.date > DATE 2013-05-03", true}, {"tx.date < DATE 2013-05-03", true}, diff --git a/pubsub/query/query.go b/pubsub/query/query.go index f084a3f98..fdfb87d7a 100644 --- a/pubsub/query/query.go +++ b/pubsub/query/query.go @@ -94,9 +94,12 @@ func (q *Query) Matches(tags map[string]interface{}) bool { case rulecontains: op = opContains case rulevalue: + // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") + valueWithoutSingleQuotes := buffer[begin+1 : end-1] + // see if the triplet (tag, operator, operand) matches any tag // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } - if !match(tag, op, reflect.ValueOf(buffer[begin:end]), tags) { + if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) { return false } case rulenumber: diff --git a/pubsub/query/query.peg b/pubsub/query/query.peg index 9654289c4..739892e4f 100644 --- a/pubsub/query/query.peg +++ b/pubsub/query/query.peg @@ -13,8 +13,8 @@ condition <- tag ' '* (le ' '* (number / time / date) / contains ' '* value ) -tag <- < (![ \t\n\r\\()"=><] .)+ > -value <- < (![ \t\n\r\\()"=><] .)+ > +tag <- < (![ \t\n\r\\()"'=><] .)+ > +value <- < '\'' (!["'] .)* '\''> number <- < ('0' / [1-9] digit* ('.' digit*)?) > digit <- [0-9] diff --git a/pubsub/query/query.peg.go b/pubsub/query/query.peg.go index 5cd0a9e32..37ce75cd9 100644 --- a/pubsub/query/query.peg.go +++ b/pubsub/query/query.peg.go @@ -442,7 +442,7 @@ func (p *QueryParser) Init() { position, tokenIndex = position0, tokenIndex0 return false }, - /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('=') (equal ' '* (number / time / date / value))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ + /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('=') (equal ' '* ((&('\'') value) | (&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ func() bool { position16, tokenIndex16 := position, tokenIndex { @@ -473,6 +473,12 @@ func (p *QueryParser) Init() { } position++ break + case '\'': + if buffer[position] != rune('\'') { + goto l22 + } + position++ + break case '"': if buffer[position] != rune('"') { goto l22 @@ -556,6 +562,12 @@ func (p *QueryParser) Init() { } position++ break + case '\'': + if buffer[position] != rune('\'') { + goto l24 + } + position++ + break case '"': if buffer[position] != rune('"') { goto l24 @@ -750,50 +762,50 @@ func (p *QueryParser) Init() { position, tokenIndex = position42, tokenIndex42 } { - position43, tokenIndex43 := position, tokenIndex - if !_rules[rulenumber]() { - goto l44 - } - goto l43 - l44: - position, tokenIndex = position43, tokenIndex43 - if !_rules[ruletime]() { - goto l45 - } - goto l43 - l45: - position, tokenIndex = position43, tokenIndex43 - if !_rules[ruledate]() { - goto l46 - } - goto l43 - l46: - position, tokenIndex = position43, tokenIndex43 - if !_rules[rulevalue]() { - goto l16 + switch buffer[position] { + case '\'': + if !_rules[rulevalue]() { + goto l16 + } + break + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break } } - l43: + break case '>': { - position47 := position + position44 := position if buffer[position] != rune('>') { goto l16 } position++ - add(ruleg, position47) + add(ruleg, position44) } - l48: + l45: { - position49, tokenIndex49 := position, tokenIndex + position46, tokenIndex46 := position, tokenIndex if buffer[position] != rune(' ') { - goto l49 + goto l46 } position++ - goto l48 - l49: - position, tokenIndex = position49, tokenIndex49 + goto l45 + l46: + position, tokenIndex = position46, tokenIndex46 } { switch buffer[position] { @@ -818,23 +830,23 @@ func (p *QueryParser) Init() { break case '<': { - position51 := position + position48 := position if buffer[position] != rune('<') { goto l16 } position++ - add(rulel, position51) + add(rulel, position48) } - l52: + l49: { - position53, tokenIndex53 := position, tokenIndex + position50, tokenIndex50 := position, tokenIndex if buffer[position] != rune(' ') { - goto l53 + goto l50 } position++ - goto l52 - l53: - position, tokenIndex = position53, tokenIndex53 + goto l49 + l50: + position, tokenIndex = position50, tokenIndex50 } { switch buffer[position] { @@ -859,139 +871,139 @@ func (p *QueryParser) Init() { break default: { - position55 := position + position52 := position { - position56, tokenIndex56 := position, tokenIndex + position53, tokenIndex53 := position, tokenIndex if buffer[position] != rune('c') { - goto l57 + goto l54 } position++ - goto l56 - l57: - position, tokenIndex = position56, tokenIndex56 + goto l53 + l54: + position, tokenIndex = position53, tokenIndex53 if buffer[position] != rune('C') { goto l16 } position++ } - l56: + l53: { - position58, tokenIndex58 := position, tokenIndex + position55, tokenIndex55 := position, tokenIndex if buffer[position] != rune('o') { - goto l59 + goto l56 } position++ - goto l58 - l59: - position, tokenIndex = position58, tokenIndex58 + goto l55 + l56: + position, tokenIndex = position55, tokenIndex55 if buffer[position] != rune('O') { goto l16 } position++ } - l58: + l55: { - position60, tokenIndex60 := position, tokenIndex + position57, tokenIndex57 := position, tokenIndex if buffer[position] != rune('n') { - goto l61 + goto l58 } position++ - goto l60 - l61: - position, tokenIndex = position60, tokenIndex60 + goto l57 + l58: + position, tokenIndex = position57, tokenIndex57 if buffer[position] != rune('N') { goto l16 } position++ } - l60: + l57: { - position62, tokenIndex62 := position, tokenIndex + position59, tokenIndex59 := position, tokenIndex if buffer[position] != rune('t') { - goto l63 + goto l60 } position++ - goto l62 - l63: - position, tokenIndex = position62, tokenIndex62 + goto l59 + l60: + position, tokenIndex = position59, tokenIndex59 if buffer[position] != rune('T') { goto l16 } position++ } - l62: + l59: { - position64, tokenIndex64 := position, tokenIndex + position61, tokenIndex61 := position, tokenIndex if buffer[position] != rune('a') { - goto l65 + goto l62 } position++ - goto l64 - l65: - position, tokenIndex = position64, tokenIndex64 + goto l61 + l62: + position, tokenIndex = position61, tokenIndex61 if buffer[position] != rune('A') { goto l16 } position++ } - l64: + l61: { - position66, tokenIndex66 := position, tokenIndex + position63, tokenIndex63 := position, tokenIndex if buffer[position] != rune('i') { - goto l67 + goto l64 } position++ - goto l66 - l67: - position, tokenIndex = position66, tokenIndex66 + goto l63 + l64: + position, tokenIndex = position63, tokenIndex63 if buffer[position] != rune('I') { goto l16 } position++ } - l66: + l63: { - position68, tokenIndex68 := position, tokenIndex + position65, tokenIndex65 := position, tokenIndex if buffer[position] != rune('n') { - goto l69 + goto l66 } position++ - goto l68 - l69: - position, tokenIndex = position68, tokenIndex68 + goto l65 + l66: + position, tokenIndex = position65, tokenIndex65 if buffer[position] != rune('N') { goto l16 } position++ } - l68: + l65: { - position70, tokenIndex70 := position, tokenIndex + position67, tokenIndex67 := position, tokenIndex if buffer[position] != rune('s') { - goto l71 + goto l68 } position++ - goto l70 - l71: - position, tokenIndex = position70, tokenIndex70 + goto l67 + l68: + position, tokenIndex = position67, tokenIndex67 if buffer[position] != rune('S') { goto l16 } position++ } - l70: - add(rulecontains, position55) + l67: + add(rulecontains, position52) } - l72: + l69: { - position73, tokenIndex73 := position, tokenIndex + position70, tokenIndex70 := position, tokenIndex if buffer[position] != rune(' ') { - goto l73 + goto l70 } position++ - goto l72 - l73: - position, tokenIndex = position73, tokenIndex73 + goto l69 + l70: + position, tokenIndex = position70, tokenIndex70 } if !_rules[rulevalue]() { goto l16 @@ -1009,629 +1021,501 @@ func (p *QueryParser) Init() { position, tokenIndex = position16, tokenIndex16 return false }, - /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ + /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('\'') '\'') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ nil, - /* 3 value <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ + /* 3 value <- <<('\'' (!('"' / '\'') .)* '\'')>> */ func() bool { - position75, tokenIndex75 := position, tokenIndex + position72, tokenIndex72 := position, tokenIndex { - position76 := position + position73 := position { - position77 := position + position74 := position + if buffer[position] != rune('\'') { + goto l72 + } + position++ + l75: { - position80, tokenIndex80 := position, tokenIndex + position76, tokenIndex76 := position, tokenIndex { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l80 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l80 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l80 - } - position++ - break - case '"': + position77, tokenIndex77 := position, tokenIndex + { + position78, tokenIndex78 := position, tokenIndex if buffer[position] != rune('"') { - goto l80 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l80 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l80 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l80 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l80 + goto l79 } position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l80 + goto l78 + l79: + position, tokenIndex = position78, tokenIndex78 + if buffer[position] != rune('\'') { + goto l77 } position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l80 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l80 - } - position++ - break } - } - - goto l75 - l80: - position, tokenIndex = position80, tokenIndex80 - } - if !matchDot() { - goto l75 - } - l78: - { - position79, tokenIndex79 := position, tokenIndex - { - position82, tokenIndex82 := position, tokenIndex - { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l82 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l82 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l82 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l82 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l82 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l82 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l82 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l82 - } - position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l82 - } - position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l82 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l82 - } - position++ - break - } - } - - goto l79 - l82: - position, tokenIndex = position82, tokenIndex82 + l78: + goto l76 + l77: + position, tokenIndex = position77, tokenIndex77 } if !matchDot() { - goto l79 + goto l76 } - goto l78 - l79: - position, tokenIndex = position79, tokenIndex79 + goto l75 + l76: + position, tokenIndex = position76, tokenIndex76 } - add(rulePegText, position77) + if buffer[position] != rune('\'') { + goto l72 + } + position++ + add(rulePegText, position74) } - add(rulevalue, position76) + add(rulevalue, position73) } return true - l75: - position, tokenIndex = position75, tokenIndex75 + l72: + position, tokenIndex = position72, tokenIndex72 return false }, /* 4 number <- <<('0' / ([1-9] digit* ('.' digit*)?))>> */ func() bool { - position84, tokenIndex84 := position, tokenIndex + position80, tokenIndex80 := position, tokenIndex { - position85 := position + position81 := position { - position86 := position + position82 := position { - position87, tokenIndex87 := position, tokenIndex + position83, tokenIndex83 := position, tokenIndex if buffer[position] != rune('0') { - goto l88 + goto l84 } position++ - goto l87 - l88: - position, tokenIndex = position87, tokenIndex87 + goto l83 + l84: + position, tokenIndex = position83, tokenIndex83 if c := buffer[position]; c < rune('1') || c > rune('9') { - goto l84 + goto l80 } position++ - l89: + l85: { - position90, tokenIndex90 := position, tokenIndex + position86, tokenIndex86 := position, tokenIndex if !_rules[ruledigit]() { - goto l90 + goto l86 } - goto l89 - l90: - position, tokenIndex = position90, tokenIndex90 + goto l85 + l86: + position, tokenIndex = position86, tokenIndex86 } { - position91, tokenIndex91 := position, tokenIndex + position87, tokenIndex87 := position, tokenIndex if buffer[position] != rune('.') { - goto l91 + goto l87 } position++ - l93: + l89: { - position94, tokenIndex94 := position, tokenIndex + position90, tokenIndex90 := position, tokenIndex if !_rules[ruledigit]() { - goto l94 + goto l90 } - goto l93 - l94: - position, tokenIndex = position94, tokenIndex94 + goto l89 + l90: + position, tokenIndex = position90, tokenIndex90 } - goto l92 - l91: - position, tokenIndex = position91, tokenIndex91 + goto l88 + l87: + position, tokenIndex = position87, tokenIndex87 } - l92: + l88: } - l87: - add(rulePegText, position86) + l83: + add(rulePegText, position82) } - add(rulenumber, position85) + add(rulenumber, position81) } return true - l84: - position, tokenIndex = position84, tokenIndex84 + l80: + position, tokenIndex = position80, tokenIndex80 return false }, /* 5 digit <- <[0-9]> */ func() bool { - position95, tokenIndex95 := position, tokenIndex + position91, tokenIndex91 := position, tokenIndex { - position96 := position + position92 := position if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l95 + goto l91 } position++ - add(ruledigit, position96) + add(ruledigit, position92) } return true - l95: - position, tokenIndex = position95, tokenIndex95 + l91: + position, tokenIndex = position91, tokenIndex91 return false }, /* 6 time <- <(('t' / 'T') ('i' / 'I') ('m' / 'M') ('e' / 'E') ' ' <(year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit ((('-' / '+') digit digit ':' digit digit) / 'Z'))>)> */ func() bool { - position97, tokenIndex97 := position, tokenIndex + position93, tokenIndex93 := position, tokenIndex { - position98 := position + position94 := position { - position99, tokenIndex99 := position, tokenIndex + position95, tokenIndex95 := position, tokenIndex if buffer[position] != rune('t') { - goto l100 + goto l96 } position++ - goto l99 - l100: - position, tokenIndex = position99, tokenIndex99 + goto l95 + l96: + position, tokenIndex = position95, tokenIndex95 if buffer[position] != rune('T') { - goto l97 + goto l93 } position++ } - l99: + l95: { - position101, tokenIndex101 := position, tokenIndex + position97, tokenIndex97 := position, tokenIndex if buffer[position] != rune('i') { - goto l102 + goto l98 } position++ - goto l101 - l102: - position, tokenIndex = position101, tokenIndex101 + goto l97 + l98: + position, tokenIndex = position97, tokenIndex97 if buffer[position] != rune('I') { - goto l97 + goto l93 } position++ } - l101: + l97: { - position103, tokenIndex103 := position, tokenIndex + position99, tokenIndex99 := position, tokenIndex if buffer[position] != rune('m') { - goto l104 + goto l100 } position++ - goto l103 - l104: - position, tokenIndex = position103, tokenIndex103 + goto l99 + l100: + position, tokenIndex = position99, tokenIndex99 if buffer[position] != rune('M') { - goto l97 + goto l93 } position++ } - l103: + l99: { - position105, tokenIndex105 := position, tokenIndex + position101, tokenIndex101 := position, tokenIndex if buffer[position] != rune('e') { - goto l106 + goto l102 } position++ - goto l105 - l106: - position, tokenIndex = position105, tokenIndex105 + goto l101 + l102: + position, tokenIndex = position101, tokenIndex101 if buffer[position] != rune('E') { - goto l97 + goto l93 } position++ } - l105: + l101: if buffer[position] != rune(' ') { - goto l97 + goto l93 } position++ { - position107 := position + position103 := position if !_rules[ruleyear]() { - goto l97 + goto l93 } if buffer[position] != rune('-') { - goto l97 + goto l93 } position++ if !_rules[rulemonth]() { - goto l97 + goto l93 } if buffer[position] != rune('-') { - goto l97 + goto l93 } position++ if !_rules[ruleday]() { - goto l97 + goto l93 } if buffer[position] != rune('T') { - goto l97 + goto l93 } position++ if !_rules[ruledigit]() { - goto l97 + goto l93 } if !_rules[ruledigit]() { - goto l97 + goto l93 } if buffer[position] != rune(':') { - goto l97 + goto l93 } position++ if !_rules[ruledigit]() { - goto l97 + goto l93 } if !_rules[ruledigit]() { - goto l97 + goto l93 } if buffer[position] != rune(':') { - goto l97 + goto l93 } position++ if !_rules[ruledigit]() { - goto l97 + goto l93 } if !_rules[ruledigit]() { - goto l97 + goto l93 } { - position108, tokenIndex108 := position, tokenIndex + position104, tokenIndex104 := position, tokenIndex { - position110, tokenIndex110 := position, tokenIndex + position106, tokenIndex106 := position, tokenIndex if buffer[position] != rune('-') { - goto l111 + goto l107 } position++ - goto l110 - l111: - position, tokenIndex = position110, tokenIndex110 + goto l106 + l107: + position, tokenIndex = position106, tokenIndex106 if buffer[position] != rune('+') { - goto l109 + goto l105 } position++ } - l110: + l106: if !_rules[ruledigit]() { - goto l109 + goto l105 } if !_rules[ruledigit]() { - goto l109 + goto l105 } if buffer[position] != rune(':') { - goto l109 + goto l105 } position++ if !_rules[ruledigit]() { - goto l109 + goto l105 } if !_rules[ruledigit]() { - goto l109 + goto l105 } - goto l108 - l109: - position, tokenIndex = position108, tokenIndex108 + goto l104 + l105: + position, tokenIndex = position104, tokenIndex104 if buffer[position] != rune('Z') { - goto l97 + goto l93 } position++ } - l108: - add(rulePegText, position107) + l104: + add(rulePegText, position103) } - add(ruletime, position98) + add(ruletime, position94) } return true - l97: - position, tokenIndex = position97, tokenIndex97 + l93: + position, tokenIndex = position93, tokenIndex93 return false }, /* 7 date <- <(('d' / 'D') ('a' / 'A') ('t' / 'T') ('e' / 'E') ' ' <(year '-' month '-' day)>)> */ func() bool { - position112, tokenIndex112 := position, tokenIndex + position108, tokenIndex108 := position, tokenIndex { - position113 := position + position109 := position { - position114, tokenIndex114 := position, tokenIndex + position110, tokenIndex110 := position, tokenIndex if buffer[position] != rune('d') { - goto l115 + goto l111 } position++ - goto l114 - l115: - position, tokenIndex = position114, tokenIndex114 + goto l110 + l111: + position, tokenIndex = position110, tokenIndex110 if buffer[position] != rune('D') { - goto l112 + goto l108 } position++ } - l114: + l110: { - position116, tokenIndex116 := position, tokenIndex + position112, tokenIndex112 := position, tokenIndex if buffer[position] != rune('a') { - goto l117 + goto l113 } position++ - goto l116 - l117: - position, tokenIndex = position116, tokenIndex116 + goto l112 + l113: + position, tokenIndex = position112, tokenIndex112 if buffer[position] != rune('A') { - goto l112 + goto l108 } position++ } - l116: + l112: { - position118, tokenIndex118 := position, tokenIndex + position114, tokenIndex114 := position, tokenIndex if buffer[position] != rune('t') { - goto l119 + goto l115 } position++ - goto l118 - l119: - position, tokenIndex = position118, tokenIndex118 + goto l114 + l115: + position, tokenIndex = position114, tokenIndex114 if buffer[position] != rune('T') { - goto l112 + goto l108 } position++ } - l118: + l114: { - position120, tokenIndex120 := position, tokenIndex + position116, tokenIndex116 := position, tokenIndex if buffer[position] != rune('e') { - goto l121 + goto l117 } position++ - goto l120 - l121: - position, tokenIndex = position120, tokenIndex120 + goto l116 + l117: + position, tokenIndex = position116, tokenIndex116 if buffer[position] != rune('E') { - goto l112 + goto l108 } position++ } - l120: + l116: if buffer[position] != rune(' ') { - goto l112 + goto l108 } position++ { - position122 := position + position118 := position if !_rules[ruleyear]() { - goto l112 + goto l108 } if buffer[position] != rune('-') { - goto l112 + goto l108 } position++ if !_rules[rulemonth]() { - goto l112 + goto l108 } if buffer[position] != rune('-') { - goto l112 + goto l108 } position++ if !_rules[ruleday]() { - goto l112 + goto l108 } - add(rulePegText, position122) + add(rulePegText, position118) } - add(ruledate, position113) + add(ruledate, position109) } return true - l112: - position, tokenIndex = position112, tokenIndex112 + l108: + position, tokenIndex = position108, tokenIndex108 return false }, /* 8 year <- <(('1' / '2') digit digit digit)> */ func() bool { - position123, tokenIndex123 := position, tokenIndex + position119, tokenIndex119 := position, tokenIndex { - position124 := position + position120 := position { - position125, tokenIndex125 := position, tokenIndex + position121, tokenIndex121 := position, tokenIndex if buffer[position] != rune('1') { - goto l126 + goto l122 } position++ - goto l125 - l126: - position, tokenIndex = position125, tokenIndex125 + goto l121 + l122: + position, tokenIndex = position121, tokenIndex121 if buffer[position] != rune('2') { - goto l123 + goto l119 } position++ } - l125: + l121: if !_rules[ruledigit]() { - goto l123 + goto l119 } if !_rules[ruledigit]() { - goto l123 + goto l119 } if !_rules[ruledigit]() { - goto l123 + goto l119 } - add(ruleyear, position124) + add(ruleyear, position120) } return true - l123: - position, tokenIndex = position123, tokenIndex123 + l119: + position, tokenIndex = position119, tokenIndex119 return false }, /* 9 month <- <(('0' / '1') digit)> */ func() bool { - position127, tokenIndex127 := position, tokenIndex + position123, tokenIndex123 := position, tokenIndex { - position128 := position + position124 := position { - position129, tokenIndex129 := position, tokenIndex + position125, tokenIndex125 := position, tokenIndex if buffer[position] != rune('0') { - goto l130 + goto l126 } position++ - goto l129 - l130: - position, tokenIndex = position129, tokenIndex129 + goto l125 + l126: + position, tokenIndex = position125, tokenIndex125 if buffer[position] != rune('1') { - goto l127 + goto l123 } position++ } - l129: + l125: if !_rules[ruledigit]() { - goto l127 + goto l123 } - add(rulemonth, position128) + add(rulemonth, position124) } return true - l127: - position, tokenIndex = position127, tokenIndex127 + l123: + position, tokenIndex = position123, tokenIndex123 return false }, /* 10 day <- <(((&('3') '3') | (&('2') '2') | (&('1') '1') | (&('0') '0')) digit)> */ func() bool { - position131, tokenIndex131 := position, tokenIndex + position127, tokenIndex127 := position, tokenIndex { - position132 := position + position128 := position { switch buffer[position] { case '3': if buffer[position] != rune('3') { - goto l131 + goto l127 } position++ break case '2': if buffer[position] != rune('2') { - goto l131 + goto l127 } position++ break case '1': if buffer[position] != rune('1') { - goto l131 + goto l127 } position++ break default: if buffer[position] != rune('0') { - goto l131 + goto l127 } position++ break @@ -1639,13 +1523,13 @@ func (p *QueryParser) Init() { } if !_rules[ruledigit]() { - goto l131 + goto l127 } - add(ruleday, position132) + add(ruleday, position128) } return true - l131: - position, tokenIndex = position131, tokenIndex131 + l127: + position, tokenIndex = position127, tokenIndex127 return false }, /* 11 and <- <(('a' / 'A') ('n' / 'N') ('d' / 'D'))> */ diff --git a/pubsub/query/query_test.go b/pubsub/query/query_test.go index 75d02ee49..431ae1fef 100644 --- a/pubsub/query/query_test.go +++ b/pubsub/query/query_test.go @@ -22,7 +22,7 @@ func TestMatches(t *testing.T) { err bool matches bool }{ - {"tm.events.type=NewBlock", map[string]interface{}{"tm.events.type": "NewBlock"}, false, true}, + {"tm.events.type='NewBlock'", map[string]interface{}{"tm.events.type": "NewBlock"}, false, true}, {"tx.gas > 7", map[string]interface{}{"tx.gas": 8}, false, true}, {"tx.gas > 7 AND tx.gas < 9", map[string]interface{}{"tx.gas": 8}, false, true}, @@ -40,8 +40,8 @@ func TestMatches(t *testing.T) { {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": time.Now()}, false, true}, {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": txTime}, false, false}, - {"abci.owner.name CONTAINS Igor", map[string]interface{}{"abci.owner.name": "Igor,Ivan"}, false, true}, - {"abci.owner.name CONTAINS Igor", map[string]interface{}{"abci.owner.name": "Pavel,Ivan"}, false, false}, + {"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Igor,Ivan"}, false, true}, + {"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Pavel,Ivan"}, false, false}, } for _, tc := range testCases { @@ -60,5 +60,5 @@ func TestMatches(t *testing.T) { func TestMustParse(t *testing.T) { assert.Panics(t, func() { query.MustParse("=") }) - assert.NotPanics(t, func() { query.MustParse("tm.events.type=NewBlock") }) + assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") }) } From 8a51210efca6ecd881d3336c8d7b4f6ef0ff30e2 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 28 Jul 2017 11:22:48 -0400 Subject: [PATCH 195/515] [common] use temp intead of {filePath}.new The problem with {filePath}.new is that it is not safe for concurrent use! Calling this function with the same params results in the following error: ``` panic: Panicked on a Crisis: rename /root/.tendermint_test/consensus_replay_test/priv_validator.json.new /root/.tendermint_test/consensus_replay_test/priv_validator.json: no such file or directory goroutine 47860 [running]: github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.PanicCrisis(0xcba800, 0xc42152d640) /go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/errors.go:33 +0x10f github.com/tendermint/tendermint/types.(*PrivValidator).save(0xc42235f2c0) /go/src/github.com/tendermint/tendermint/types/priv_validator.go:165 +0x159 github.com/tendermint/tendermint/types.(*PrivValidator).signBytesHRS(0xc42235f2c0, 0x6, 0x0, 0xc424e88f03, 0xc429908580, 0xca, 0x155, 0x80, 0xc424e88f00, 0x7f4ecafc88d0, ...) /go/src/github.com/tendermint/tendermint/types/priv_validator.go:249 +0x2bb github.com/tendermint/tendermint/types.(*PrivValidator).SignVote(0xc42235f2c0, 0xc4228c7460, 0xf, 0xc424e88f00, 0x0, 0x0) /go/src/github.com/tendermint/tendermint/types/priv_validator.go:186 +0x1a2 github.com/tendermint/tendermint/consensus.(*ConsensusState).signVote(0xc424efd520, 0xc400000002, 0xc422d5e3c0, 0x14, 0x20, 0x1, 0xc4247b6560, 0x14, 0x20, 0x0, ...) github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:1556 +0x35e github.com/tendermint/tendermint/consensus.(*ConsensusState).signAddVote(0xc424efd520, 0x2, 0xc422d5e3c0, 0x14, 0x20, 0x1, 0xc4247b6560, 0x14, 0x20, 0xc42001b300) github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:1568 +0x200 github.com/tendermint/tendermint/consensus.(*ConsensusState).enterPrecommit(0xc424efd520, 0x6, 0x0) github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:1082 +0x13a4 github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote(0xc424efd520, 0xc424e88780, 0x0, 0x0, 0x39, 0x1dc, 0x28c) github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:1477 +0x1be5 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote(0xc424efd520, 0xc424e88780, 0x0, 0x0, 0xd7fb00, 0xc42152ce00) github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:1382 +0x93 github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg(0xc424efd520, 0xcb58e0, 0xc42547feb8, 0x0, 0x0, 0x6, 0x0, 0x4, 0xed10ca07e, 0x3077bfea, ...) github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:660 +0x9fc github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine(0xc424efd520, 0x0) github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:615 +0x5f5 created by github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart github.com/tendermint/tendermint/consensus/_test/_obj_test/state.go:332 +0x4a7 exit status 2 FAIL github.com/tendermint/tendermint/consensus 76.644s make: *** [test_integrations] Error 1 ``` See https://github.com/tendermint/tendermint/pull/568 --- common/os.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/common/os.go b/common/os.go index 9dc81c579..ae2ed087d 100644 --- a/common/os.go +++ b/common/os.go @@ -93,9 +93,8 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { } } -// Writes to newBytes to filePath. -// Guaranteed not to lose *both* oldBytes and newBytes, -// (assuming that the OS is perfect) +// WriteFileAtomic writes newBytes to temp and atomically moves to filePath +// when everything else succeeds. func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { // If a file already exists there, copy to filePath+".bak" (overwrite anything) if _, err := os.Stat(filePath); !os.IsNotExist(err) { @@ -108,13 +107,27 @@ func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { return fmt.Errorf("Could not write file %v. %v", filePath+".bak", err) } } - // Write newBytes to filePath.new - err := ioutil.WriteFile(filePath+".new", newBytes, mode) + f, err := ioutil.TempFile("", "") if err != nil { - return fmt.Errorf("Could not write file %v. %v", filePath+".new", err) + return err + } + _, err = f.Write(newBytes) + if err == nil { + err = f.Sync() + } + if closeErr := f.Close(); err == nil { + err = closeErr + } + if permErr := os.Chmod(f.Name(), mode); err == nil { + err = permErr + } + if err == nil { + err = os.Rename(f.Name(), filePath) + } + // any err should result in full cleanup + if err != nil { + os.Remove(f.Name()) } - // Move filePath.new to filePath - err = os.Rename(filePath+".new", filePath) return err } From b25aa3b472f67638710954460ef4c77e28dd9e8f Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 28 Jul 2017 11:26:04 -0400 Subject: [PATCH 196/515] [common] do not create {filePath}.bak in WriteFileAtomic We use WriteFileAtomic in two places: ``` p2p/addrbook.go 338: err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644) types/priv_validator.go 162: err = WriteFileAtomic(privVal.filePath, jsonBytes, 0600) ``` and we don't need .bak in any of the above. We save priv_validator every 10ms and addrbook every 2 min. --- common/os.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/common/os.go b/common/os.go index ae2ed087d..b1e778977 100644 --- a/common/os.go +++ b/common/os.go @@ -96,17 +96,6 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { // WriteFileAtomic writes newBytes to temp and atomically moves to filePath // when everything else succeeds. func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { - // If a file already exists there, copy to filePath+".bak" (overwrite anything) - if _, err := os.Stat(filePath); !os.IsNotExist(err) { - fileBytes, err := ioutil.ReadFile(filePath) - if err != nil { - return fmt.Errorf("Could not read file %v. %v", filePath, err) - } - err = ioutil.WriteFile(filePath+".bak", fileBytes, mode) - if err != nil { - return fmt.Errorf("Could not write file %v. %v", filePath+".bak", err) - } - } f, err := ioutil.TempFile("", "") if err != nil { return err From d1ca2c6f838fe7428b7da6f62ebf293d4d91b44b Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 28 Jul 2017 11:31:50 -0400 Subject: [PATCH 197/515] [common] add a test for WriteFileAtomic --- common/os_test.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 common/os_test.go diff --git a/common/os_test.go b/common/os_test.go new file mode 100644 index 000000000..05359e36e --- /dev/null +++ b/common/os_test.go @@ -0,0 +1,29 @@ +package common + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + "time" +) + +func TestWriteFileAtomic(t *testing.T) { + data := []byte("Becatron") + fname := fmt.Sprintf("/tmp/write-file-atomic-test-%v.txt", time.Now().UnixNano()) + err := WriteFileAtomic(fname, data, 0664) + if err != nil { + t.Fatal(err) + } + rData, err := ioutil.ReadFile(fname) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(data, rData) { + t.Fatalf("data mismatch: %v != %v", data, rData) + } + if err := os.Remove(fname); err != nil { + t.Fatal(err) + } +} From d67a62171542ef848f97eecf898b5ecf64fe83b5 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Mon, 31 Jul 2017 15:11:15 -0600 Subject: [PATCH 198/515] http: http-utils added after extraction Found common http utils that were being multiply duplicated across many libraries and since am moving things in basecoin/unstable to add for more functionality, it's better to put them in one place. Utilities and tests added: - [X] FparseJSON - [X] FparseAndValidateJSON - [X] ParseRequestJSON - [X] ParseAndValidateRequestJSON - [X] WriteCode - [X] WriteError - [X] WriteSuccess - [X] ErrorResponse During review from @ethanfrey, made updates: * Removed tt.want since it was a distraction/artifact that made the reviewer think the tests weren't testing for both failed and passed results. * Added ErrorWithCode as WithCode is a common options pattern in Go that could cause confusion: ErrorWithCode(error, int) ErrorResponse * Using json.NewDecoder(io.Reader) error instead of ioutil.ReadAll(io.Reader) to slurp all the bytes. * Added more test scenarios to achieve 100% coverage of http.go --- common/http.go | 153 +++++++++++++++++++++++++++ common/http_test.go | 250 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 403 insertions(+) create mode 100644 common/http.go create mode 100644 common/http_test.go diff --git a/common/http.go b/common/http.go new file mode 100644 index 000000000..56b5b6c63 --- /dev/null +++ b/common/http.go @@ -0,0 +1,153 @@ +package common + +import ( + "encoding/json" + "io" + "net/http" + + "gopkg.in/go-playground/validator.v9" + + "github.com/pkg/errors" +) + +type ErrorResponse struct { + Success bool `json:"success,omitempty"` + + // Err is the error message if Success is false + Err string `json:"error,omitempty"` + + // Code is set if Success is false + Code int `json:"code,omitempty"` +} + +// ErrorWithCode makes an ErrorResponse with the +// provided err's Error() content, and status code. +// It panics if err is nil. +func ErrorWithCode(err error, code int) *ErrorResponse { + return &ErrorResponse{ + Err: err.Error(), + Code: code, + } +} + +// Ensure that ErrorResponse implements error +var _ error = (*ErrorResponse)(nil) + +func (er *ErrorResponse) Error() string { + return er.Err +} + +// Ensure that ErrorResponse implements httpCoder +var _ httpCoder = (*ErrorResponse)(nil) + +func (er *ErrorResponse) HTTPCode() int { + return er.Code +} + +var errNilBody = errors.Errorf("expecting a non-nil body") + +// FparseJSON unmarshals into save, the body of the provided reader. +// Since it uses json.Unmarshal, save must be of a pointer type +// or compatible with json.Unmarshal. +func FparseJSON(r io.Reader, save interface{}) error { + if r == nil { + return errors.Wrap(errNilBody, "Reader") + } + + dec := json.NewDecoder(r) + if err := dec.Decode(save); err != nil { + return errors.Wrap(err, "Decode/Unmarshal") + } + return nil +} + +// ParseRequestJSON unmarshals into save, the body of the +// request. It closes the body of the request after parsing. +// Since it uses json.Unmarshal, save must be of a pointer type +// or compatible with json.Unmarshal. +func ParseRequestJSON(r *http.Request, save interface{}) error { + if r == nil || r.Body == nil { + return errNilBody + } + defer r.Body.Close() + + return FparseJSON(r.Body, save) +} + +// ParseRequestAndValidateJSON unmarshals into save, the body of the +// request and invokes a validator on the saved content. To ensure +// validation, make sure to set tags "validate" on your struct as +// per https://godoc.org/gopkg.in/go-playground/validator.v9. +// It closes the body of the request after parsing. +// Since it uses json.Unmarshal, save must be of a pointer type +// or compatible with json.Unmarshal. +func ParseRequestAndValidateJSON(r *http.Request, save interface{}) error { + if r == nil || r.Body == nil { + return errNilBody + } + defer r.Body.Close() + + return FparseAndValidateJSON(r.Body, save) +} + +// FparseAndValidateJSON like FparseJSON unmarshals into save, +// the body of the provided reader. However, it invokes the validator +// to check the set validators on your struct fields as per +// per https://godoc.org/gopkg.in/go-playground/validator.v9. +// Since it uses json.Unmarshal, save must be of a pointer type +// or compatible with json.Unmarshal. +func FparseAndValidateJSON(r io.Reader, save interface{}) error { + if err := FparseJSON(r, save); err != nil { + return err + } + return validate(save) +} + +var theValidator = validator.New() + +func validate(obj interface{}) error { + return errors.Wrap(theValidator.Struct(obj), "Validate") +} + +// WriteSuccess JSON marshals the content provided, to an HTTP +// response, setting the provided status code and setting header +// "Content-Type" to "application/json". +func WriteSuccess(w http.ResponseWriter, data interface{}) { + WriteCode(w, data, 200) +} + +// WriteCode JSON marshals content, to an HTTP response, +// setting the provided status code, and setting header +// "Content-Type" to "application/json". If JSON marshalling fails +// with an error, WriteCode instead writes out the error invoking +// WriteError. +func WriteCode(w http.ResponseWriter, out interface{}, code int) { + blob, err := json.MarshalIndent(out, "", " ") + if err != nil { + WriteError(w, err) + } else { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + w.Write(blob) + } +} + +type httpCoder interface { + HTTPCode() int +} + +// WriteError is a convenience function to write out an +// error to an http.ResponseWriter, to send out an error +// that's structured as JSON i.e the form +// {"error": sss, "code": ddd} +// If err implements the interface HTTPCode() int, +// it will use that status code otherwise, it will +// set code to be http.StatusBadRequest +func WriteError(w http.ResponseWriter, err error) { + code := http.StatusBadRequest + if httpC, ok := err.(httpCoder); ok { + code = httpC.HTTPCode() + } + + WriteCode(w, ErrorWithCode(err, code), code) +} diff --git a/common/http_test.go b/common/http_test.go new file mode 100644 index 000000000..b207684b8 --- /dev/null +++ b/common/http_test.go @@ -0,0 +1,250 @@ +package common_test + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tmlibs/common" +) + +func TestWriteSuccess(t *testing.T) { + w := httptest.NewRecorder() + common.WriteSuccess(w, "foo") + assert.Equal(t, w.Code, 200, "should get a 200") +} + +var blankErrResponse = new(common.ErrorResponse) + +func TestWriteError(t *testing.T) { + tests := [...]struct { + msg string + code int + }{ + 0: { + msg: "this is a message", + code: 419, + }, + } + + for i, tt := range tests { + w := httptest.NewRecorder() + msg := tt.msg + + // First check without a defined code, should send back a 400 + common.WriteError(w, errors.New(msg)) + assert.Equal(t, w.Code, http.StatusBadRequest, "#%d: should get a 400", i) + blob, err := ioutil.ReadAll(w.Body) + if err != nil { + assert.Failf(t, "expecting a successful ioutil.ReadAll", "#%d", i) + continue + } + + recv := new(common.ErrorResponse) + if err := json.Unmarshal(blob, recv); err != nil { + assert.Failf(t, "expecting a successful json.Unmarshal", "#%d", i) + continue + } + + assert.Equal(t, reflect.DeepEqual(recv, blankErrResponse), false, "expecting a non-blank error response") + + // Now test with an error that's .HTTPCode() int conforming + + // Reset w + w = httptest.NewRecorder() + + common.WriteError(w, common.ErrorWithCode(errors.New("foo"), tt.code)) + assert.Equal(t, w.Code, tt.code, "case #%d", i) + } +} + +type marshalFailer struct{} + +var errFooFailed = errors.New("foo failed here") + +func (mf *marshalFailer) MarshalJSON() ([]byte, error) { + return nil, errFooFailed +} + +func TestWriteCode(t *testing.T) { + codes := [...]int{ + 0: http.StatusOK, + 1: http.StatusBadRequest, + 2: http.StatusUnauthorized, + 3: http.StatusInternalServerError, + } + + for i, code := range codes { + w := httptest.NewRecorder() + common.WriteCode(w, "foo", code) + assert.Equal(t, w.Code, code, "#%d", i) + + // Then for the failed JSON marshaling + w = httptest.NewRecorder() + common.WriteCode(w, &marshalFailer{}, code) + wantCode := http.StatusBadRequest + assert.Equal(t, w.Code, wantCode, "#%d", i) + assert.True(t, strings.Contains(string(w.Body.Bytes()), errFooFailed.Error()), + "#%d: expected %q in the error message", i, errFooFailed) + } +} + +type saver struct { + Foo int `json:"foo" validate:"min=10"` + Bar string `json:"bar"` +} + +type rcloser struct { + closeOnce sync.Once + body *bytes.Buffer + closeChan chan bool +} + +var errAlreadyClosed = errors.New("already closed") + +func (rc *rcloser) Close() error { + var err = errAlreadyClosed + rc.closeOnce.Do(func() { + err = nil + rc.closeChan <- true + close(rc.closeChan) + }) + return err +} + +func (rc *rcloser) Read(b []byte) (int, error) { + return rc.body.Read(b) +} + +var _ io.ReadCloser = (*rcloser)(nil) + +func makeReq(strBody string) (*http.Request, <-chan bool) { + closeChan := make(chan bool, 1) + buf := new(bytes.Buffer) + buf.Write([]byte(strBody)) + req := &http.Request{ + Header: make(http.Header), + Body: &rcloser{body: buf, closeChan: closeChan}, + } + return req, closeChan +} + +func TestParseRequestJSON(t *testing.T) { + tests := [...]struct { + body string + wantErr bool + useNil bool + }{ + 0: {wantErr: true, body: ``}, + 1: {body: `{}`}, + 2: {body: `{"foo": 2}`}, // Not that the validate tags don't matter here since we are just parsing + 3: {body: `{"foo": "abcd"}`, wantErr: true}, + 4: {useNil: true, wantErr: true}, + } + + for i, tt := range tests { + req, closeChan := makeReq(tt.body) + if tt.useNil { + req.Body = nil + } + sav := new(saver) + err := common.ParseRequestJSON(req, sav) + if tt.wantErr { + assert.NotEqual(t, err, nil, "#%d: want non-nil error", i) + continue + } + assert.Equal(t, err, nil, "#%d: want nil error", i) + wasClosed := <-closeChan + assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i) + } +} + +func TestFparseJSON(t *testing.T) { + r1 := strings.NewReader(`{"foo": 1}`) + sav := new(saver) + require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing") + r2 := strings.NewReader(`{"bar": "blockchain"}`) + require.Equal(t, common.FparseJSON(r2, sav), nil, "expecting successful parsing") + require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 1, Bar: "blockchain"}), true, "should have parsed both") + + // Now with a nil body + require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report") +} + +func TestFparseAndValidateJSON(t *testing.T) { + r1 := strings.NewReader(`{"foo": 1}`) + sav := new(saver) + require.NotEqual(t, common.FparseAndValidateJSON(r1, sav), nil, "expecting validation to fail") + r1 = strings.NewReader(`{"foo": 100}`) + require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing") + r2 := strings.NewReader(`{"bar": "blockchain"}`) + require.Equal(t, common.FparseAndValidateJSON(r2, sav), nil, "expecting successful parsing") + require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 100, Bar: "blockchain"}), true, "should have parsed both") + + // Now with a nil body + require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report") +} + +var blankSaver = new(saver) + +func TestParseAndValidateRequestJSON(t *testing.T) { + tests := [...]struct { + body string + wantErr bool + useNil bool + }{ + 0: {wantErr: true, body: ``}, + 1: {body: `{}`, wantErr: true}, // Here it should fail since Foo doesn't meet the minimum value + 2: {body: `{"foo": 2}`, wantErr: true}, // Here validation should fail + 3: {body: `{"foo": "abcd"}`, wantErr: true}, + 4: {useNil: true, wantErr: true}, + 5: {body: `{"foo": 100}`}, // Must succeed + } + + for i, tt := range tests { + req, closeChan := makeReq(tt.body) + if tt.useNil { + req.Body = nil + } + sav := new(saver) + err := common.ParseRequestAndValidateJSON(req, sav) + if tt.wantErr { + assert.NotEqual(t, err, nil, "#%d: want non-nil error", i) + continue + } + + assert.Equal(t, err, nil, "#%d: want nil error", i) + assert.False(t, reflect.DeepEqual(blankSaver, sav), "#%d: expecting a set saver", i) + + wasClosed := <-closeChan + assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i) + } +} + +func TestErrorWithCode(t *testing.T) { + tests := [...]struct { + code int + err error + }{ + 0: {code: 500, err: errors.New("funky")}, + 1: {code: 406, err: errors.New("purist")}, + } + + for i, tt := range tests { + errRes := common.ErrorWithCode(tt.err, tt.code) + assert.Equal(t, errRes.Error(), tt.err.Error(), "#%d: expecting the error values to be equal", i) + assert.Equal(t, errRes.Code, tt.code, "expecting the same status code", i) + assert.Equal(t, errRes.HTTPCode(), tt.code, "expecting the same status code", i) + } +} From b4a51871b91cdae3b1e1b742fcd92ef021de7b73 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Fri, 4 Aug 2017 02:03:46 -0600 Subject: [PATCH 199/515] common/IsDirEmpty: do not mask non-existance errors Currently IsDirEmpty returns true, err if it encounters any error after trying to os.Open the directory. I noticed this while studying the code and recalled a bug from an earlier project in which doing the exact same thing on code without permissions would trip out and falsely report that the directory was empty. Given demo.go in https://play.golang.org/p/vhTPU2RiCJ * Demo: ```shell $ mkdir -p sample-demo/1 && touch sample-demo/2 $ echo "1st round" && go run demo.go sample-demo $ sudo chown root sample-demo && sudo chmod 0700 sample-demo $ echo "2nd round" && go run demo.go sample-demo ``` That then prints out ```shell 1st round original:: empty: false err: updated:: empty: false err: 2nd round original:: empty: true err: open data/: permission denied updated:: empty: false err: open data/: permission denied ``` where in "2nd round", the original code falsely reports that the directory is empty but that's a permission error. I could write a code test for it, but that test requires me to change users and switch to root as a Go user so no point in complicating our tests, but otherwise it is a 1-to-1 translation between shell and Go. --- common/os.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/common/os.go b/common/os.go index 9dc81c579..c19322baa 100644 --- a/common/os.go +++ b/common/os.go @@ -48,7 +48,12 @@ func EnsureDir(dir string, mode os.FileMode) error { func IsDirEmpty(name string) (bool, error) { f, err := os.Open(name) if err != nil { - return true, err //folder is non-existent + if os.IsNotExist(err) { + return true, err + } + // Otherwise perhaps a permission + // error or some other error. + return false, err } defer f.Close() From fa990f08036b3f6bbb0d184c657b805e802d915c Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 10 Aug 2017 19:46:59 -0400 Subject: [PATCH 200/515] add test case for hex --- pubsub/query/parser_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pubsub/query/parser_test.go b/pubsub/query/parser_test.go index d96ac0821..165ddda7b 100644 --- a/pubsub/query/parser_test.go +++ b/pubsub/query/parser_test.go @@ -75,6 +75,9 @@ func TestParser(t *testing.T) { {"account.balance >= -300", false}, {"account.balance >>= 400", false}, {"account.balance=33.22.1", false}, + + {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, + {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, } for _, c := range cases { From 956966e6587aa6b8dd3a375d35c3744291c38d60 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 11 Aug 2017 16:36:26 -0400 Subject: [PATCH 201/515] add missing validator package to glide.yaml --- common/http_test.go | 4 ++-- glide.lock | 16 ++++++++++++---- glide.yaml | 1 + 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/common/http_test.go b/common/http_test.go index b207684b8..73761fb1e 100644 --- a/common/http_test.go +++ b/common/http_test.go @@ -47,13 +47,13 @@ func TestWriteError(t *testing.T) { assert.Equal(t, w.Code, http.StatusBadRequest, "#%d: should get a 400", i) blob, err := ioutil.ReadAll(w.Body) if err != nil { - assert.Failf(t, "expecting a successful ioutil.ReadAll", "#%d", i) + assert.Fail(t, "expecting a successful ioutil.ReadAll", "#%d", i) continue } recv := new(common.ErrorResponse) if err := json.Unmarshal(blob, recv); err != nil { - assert.Failf(t, "expecting a successful json.Unmarshal", "#%d", i) + assert.Fail(t, "expecting a successful json.Unmarshal", "#%d", i) continue } diff --git a/glide.lock b/glide.lock index b30f538a3..b0b3ff3c7 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 69359a39dbb6957c9f09167520317ad72d4bfa75f37a614b347e2510768c8a42 -updated: 2017-05-05T17:46:34.975369143Z +hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 +updated: 2017-08-11T20:28:34.550901198Z imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 @@ -11,6 +11,12 @@ imports: - log/term - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 +- name: github.com/go-playground/locales + version: 1e5f1161c6416a5ff48840eb8724a394e48cc534 + subpackages: + - currency +- name: github.com/go-playground/universal-translator + version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack version: 7a2f19628aabfe68f0766b59e74d6315f8347d22 - name: github.com/golang/snappy @@ -97,11 +103,13 @@ imports: subpackages: - transform - unicode/norm +- name: gopkg.in/go-playground/validator.v9 + version: d529ee1b0f30352444f507cc6cdac96bfd12decc - name: gopkg.in/yaml.v2 version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b testImports: - name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 subpackages: - spew - name: github.com/pmezard/go-difflib @@ -109,7 +117,7 @@ testImports: subpackages: - difflib - name: github.com/stretchr/testify - version: 4d4bfba8f1d1027c4fdbe371823030df51419987 + version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index d8bdd5872..22825a273 100644 --- a/glide.yaml +++ b/glide.yaml @@ -23,6 +23,7 @@ import: - package: golang.org/x/crypto subpackages: - ripemd160 +- package: gopkg.in/go-playground/validator.v9 testImport: - package: github.com/stretchr/testify version: ^1.1.4 From 8f1dea89f5a7b2c2d883a0310c5ef94d8980bcdb Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 25 Aug 2017 16:35:37 -0400 Subject: [PATCH 202/515] db: fix memdb iterator --- db/mem_db.go | 2 +- db/mem_db_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 db/mem_db_test.go diff --git a/db/mem_db.go b/db/mem_db.go index 286624294..db40227e8 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -82,7 +82,7 @@ func newMemDBIterator() *memDBIterator { } func (it *memDBIterator) Next() bool { - if it.last >= len(it.keys) { + if it.last >= len(it.keys)-1 { return false } it.last++ diff --git a/db/mem_db_test.go b/db/mem_db_test.go new file mode 100644 index 000000000..a76e10dc8 --- /dev/null +++ b/db/mem_db_test.go @@ -0,0 +1,28 @@ +package db + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMemDbIterator(t *testing.T) { + db := NewMemDB() + keys := make([][]byte, 100) + for i := 0; i < 100; i++ { + keys[i] = []byte{byte(i)} + } + + value := []byte{5} + for _, k := range keys { + db.Set(k, value) + } + + iter := db.Iterator() + i := 0 + for iter.Next() { + assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") + i += 1 + } + assert.Equal(t, i, len(db.db), "iterator didnt cover whole db") +} From bdfd978b688ef12c0b8a9f1eef5c515298b67654 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 25 Aug 2017 16:58:37 -0400 Subject: [PATCH 203/515] update changelog --- CHANGELOG.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fad695ff..ea7410d70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,17 +1,24 @@ # Changelog -## Develop-Branch changes (unreleased) +## 0.3.0 (August 25, 2017) BREAKING CHANGES: -- [run] NewBaseService takes the new logger +- [common] NewBaseService takes the new logger - [cli] RunCaptureWithArgs now captures stderr and stdout - +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) - -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) FEATURES: +- [common] various common HTTP functionality - [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") +- [pubsub] New package for publish-subscribe with more advanced filtering + +BUG FIXES: + +- [common] fix atomicity of WriteFileAtomic by calling fsync +- [db] fix memDb iteration index out of range ## 0.2.2 (June 16, 2017) From bfec1ff1cd7fda9f5b2d8b570e3bec163e5f9149 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 25 Aug 2017 16:58:59 -0400 Subject: [PATCH 204/515] bump version to 0.3.0 --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 42af8ff7a..ee59a7cad 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.2.2" +const Version = "0.3.0" From 3a36776d4a99aeedaef571df0c99e0daa65f7fa8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 10 Sep 2017 18:45:20 -0700 Subject: [PATCH 205/515] Reorder file for grokability --- log/filter.go | 76 ++++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/log/filter.go b/log/filter.go index 451088838..f6198b2bf 100644 --- a/log/filter.go +++ b/log/filter.go @@ -2,6 +2,26 @@ package log import "fmt" +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelError +) + +type filter struct { + next Logger + allowed level // XOR'd levels for default case + allowedKeyvals map[keyval]level // When key-value match, use this level + errNotAllowed error +} + +type keyval struct { + key interface{} + value interface{} +} + // NewFilter wraps next and implements filtering. See the commentary on the // Option functions for a detailed description of how to configure levels. If // no options are provided, all leveled log events created with Debug, Info or @@ -17,35 +37,6 @@ func NewFilter(next Logger, options ...Option) Logger { return l } -// AllowLevel returns an option for the given level or error if no option exist -// for such level. -func AllowLevel(lvl string) (Option, error) { - switch lvl { - case "debug": - return AllowDebug(), nil - case "info": - return AllowInfo(), nil - case "error": - return AllowError(), nil - case "none": - return AllowNone(), nil - default: - return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) - } -} - -type filter struct { - next Logger - allowed level - allowedKeyvals map[keyval]level - errNotAllowed error -} - -type keyval struct { - key interface{} - value interface{} -} - func (l *filter) Info(msg string, keyvals ...interface{}) error { levelAllowed := l.allowed&levelInfo != 0 if !levelAllowed { @@ -96,9 +87,28 @@ func (l *filter) With(keyvals ...interface{}) Logger { return &filter{next: l.next.With(keyvals...), allowed: l.allowed, errNotAllowed: l.errNotAllowed, allowedKeyvals: l.allowedKeyvals} } +//-------------------------------------------------------------------------------- + // Option sets a parameter for the filter. type Option func(*filter) +// AllowLevel returns an option for the given level or error if no option exist +// for such level. +func AllowLevel(lvl string) (Option, error) { + switch lvl { + case "debug": + return AllowDebug(), nil + case "info": + return AllowInfo(), nil + case "error": + return AllowError(), nil + case "none": + return AllowNone(), nil + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) + } +} + // AllowAll is an alias for AllowDebug. func AllowAll() Option { return AllowDebug() @@ -155,11 +165,3 @@ func AllowErrorWith(key interface{}, value interface{}) Option { func AllowNoneWith(key interface{}, value interface{}) Option { return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } } - -type level byte - -const ( - levelDebug level = 1 << iota - levelInfo - levelError -) From 9a2438e0dcd35318f839d23debf539193ff50a44 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 20 Sep 2017 02:48:04 -0400 Subject: [PATCH 206/515] common: Fingerprint comment --- common/byteslice.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/common/byteslice.go b/common/byteslice.go index be828f065..ceaf06bd3 100644 --- a/common/byteslice.go +++ b/common/byteslice.go @@ -4,6 +4,9 @@ import ( "bytes" ) +// Fingerprint returns the first 6 bytes of a byte slice. +// If the slice is less than 6 bytes, the fingerprint +// contains trailing zeroes. func Fingerprint(slice []byte) []byte { fingerprint := make([]byte, 6) copy(fingerprint, slice) From 65a07b80a33196e063601cb97b51e45dcbd7d66c Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 18 Sep 2017 18:01:14 -0700 Subject: [PATCH 207/515] change logger interface to not return errors (Refs #50) See https://github.com/go-kit/kit/issues/164 for discussion of why kitlog returns an error. ``` Package log is designed to be used for more than simple application info/warning/error logging; it's suitable for log-structured data in an e.g. Lambda architecture, where each invocation is important. I agree with you that if we were doing only application logging the error would be more noise than signal. But the scope of the package is larger than that. ``` Since we are doing only application logging and we're not checking errors, it is safe to get rid them. --- log/filter.go | 31 +++++++++++-------------------- log/filter_test.go | 18 ------------------ log/logger.go | 6 +++--- log/nop_logger.go | 14 +++----------- log/nop_logger_test.go | 18 ------------------ log/tm_logger.go | 12 ++++++------ log/tm_logger_test.go | 11 ----------- log/tracing_logger.go | 12 ++++++------ 8 files changed, 29 insertions(+), 93 deletions(-) delete mode 100644 log/nop_logger_test.go diff --git a/log/filter.go b/log/filter.go index f6198b2bf..768c09b85 100644 --- a/log/filter.go +++ b/log/filter.go @@ -14,7 +14,6 @@ type filter struct { next Logger allowed level // XOR'd levels for default case allowedKeyvals map[keyval]level // When key-value match, use this level - errNotAllowed error } type keyval struct { @@ -37,28 +36,28 @@ func NewFilter(next Logger, options ...Option) Logger { return l } -func (l *filter) Info(msg string, keyvals ...interface{}) error { +func (l *filter) Info(msg string, keyvals ...interface{}) { levelAllowed := l.allowed&levelInfo != 0 if !levelAllowed { - return l.errNotAllowed + return } - return l.next.Info(msg, keyvals...) + l.next.Info(msg, keyvals...) } -func (l *filter) Debug(msg string, keyvals ...interface{}) error { +func (l *filter) Debug(msg string, keyvals ...interface{}) { levelAllowed := l.allowed&levelDebug != 0 if !levelAllowed { - return l.errNotAllowed + return } - return l.next.Debug(msg, keyvals...) + l.next.Debug(msg, keyvals...) } -func (l *filter) Error(msg string, keyvals ...interface{}) error { +func (l *filter) Error(msg string, keyvals ...interface{}) { levelAllowed := l.allowed&levelError != 0 if !levelAllowed { - return l.errNotAllowed + return } - return l.next.Error(msg, keyvals...) + l.next.Error(msg, keyvals...) } // With implements Logger by constructing a new filter with a keyvals appended @@ -80,11 +79,11 @@ func (l *filter) With(keyvals ...interface{}) Logger { for i := len(keyvals) - 2; i >= 0; i -= 2 { for kv, allowed := range l.allowedKeyvals { if keyvals[i] == kv.key && keyvals[i+1] == kv.value { - return &filter{next: l.next.With(keyvals...), allowed: allowed, errNotAllowed: l.errNotAllowed, allowedKeyvals: l.allowedKeyvals} + return &filter{next: l.next.With(keyvals...), allowed: allowed, allowedKeyvals: l.allowedKeyvals} } } } - return &filter{next: l.next.With(keyvals...), allowed: l.allowed, errNotAllowed: l.errNotAllowed, allowedKeyvals: l.allowedKeyvals} + return &filter{next: l.next.With(keyvals...), allowed: l.allowed, allowedKeyvals: l.allowedKeyvals} } //-------------------------------------------------------------------------------- @@ -138,14 +137,6 @@ func allowed(allowed level) Option { return func(l *filter) { l.allowed = allowed } } -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return func(l *filter) { l.errNotAllowed = err } -} - // AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. func AllowDebugWith(key interface{}, value interface{}) Option { return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } diff --git a/log/filter_test.go b/log/filter_test.go index 4665db3df..fafafacb0 100644 --- a/log/filter_test.go +++ b/log/filter_test.go @@ -2,7 +2,6 @@ package log_test import ( "bytes" - "errors" "strings" "testing" @@ -71,23 +70,6 @@ func TestVariousLevels(t *testing.T) { } } -func TestErrNotAllowed(t *testing.T) { - myError := errors.New("squelched!") - opts := []log.Option{ - log.AllowError(), - log.ErrNotAllowed(myError), - } - logger := log.NewFilter(log.NewNopLogger(), opts...) - - if want, have := myError, logger.Info("foo", "bar", "baz"); want != have { - t.Errorf("want %#+v, have %#+v", want, have) - } - - if want, have := error(nil), logger.Error("foo", "bar", "baz"); want != have { - t.Errorf("want %#+v, have %#+v", want, have) - } -} - func TestLevelContext(t *testing.T) { var buf bytes.Buffer diff --git a/log/logger.go b/log/logger.go index be273f484..ddb187bc7 100644 --- a/log/logger.go +++ b/log/logger.go @@ -8,9 +8,9 @@ import ( // Logger is what any Tendermint library should take. type Logger interface { - Debug(msg string, keyvals ...interface{}) error - Info(msg string, keyvals ...interface{}) error - Error(msg string, keyvals ...interface{}) error + Debug(msg string, keyvals ...interface{}) + Info(msg string, keyvals ...interface{}) + Error(msg string, keyvals ...interface{}) With(keyvals ...interface{}) Logger } diff --git a/log/nop_logger.go b/log/nop_logger.go index 306a8405f..12d75abe6 100644 --- a/log/nop_logger.go +++ b/log/nop_logger.go @@ -8,17 +8,9 @@ var _ Logger = (*nopLogger)(nil) // NewNopLogger returns a logger that doesn't do anything. func NewNopLogger() Logger { return &nopLogger{} } -func (nopLogger) Info(string, ...interface{}) error { - return nil -} - -func (nopLogger) Debug(string, ...interface{}) error { - return nil -} - -func (nopLogger) Error(string, ...interface{}) error { - return nil -} +func (nopLogger) Info(string, ...interface{}) {} +func (nopLogger) Debug(string, ...interface{}) {} +func (nopLogger) Error(string, ...interface{}) {} func (l *nopLogger) With(...interface{}) Logger { return l diff --git a/log/nop_logger_test.go b/log/nop_logger_test.go deleted file mode 100644 index d2009fdf0..000000000 --- a/log/nop_logger_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package log_test - -import ( - "testing" - - "github.com/tendermint/tmlibs/log" -) - -func TestNopLogger(t *testing.T) { - t.Parallel() - logger := log.NewNopLogger() - if err := logger.Info("Hello", "abc", 123); err != nil { - t.Error(err) - } - if err := logger.With("def", "ghi").Debug(""); err != nil { - t.Error(err) - } -} diff --git a/log/tm_logger.go b/log/tm_logger.go index a903dbe8d..dc6932dd8 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -50,21 +50,21 @@ func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) te } // Info logs a message at level Info. -func (l *tmLogger) Info(msg string, keyvals ...interface{}) error { +func (l *tmLogger) Info(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Info(l.srcLogger) - return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) + kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) } // Debug logs a message at level Debug. -func (l *tmLogger) Debug(msg string, keyvals ...interface{}) error { +func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Debug(l.srcLogger) - return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) + kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) } // Error logs a message at level Error. -func (l *tmLogger) Error(msg string, keyvals ...interface{}) error { +func (l *tmLogger) Error(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Error(l.srcLogger) - return kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) + kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) } // With returns a new contextual logger with keyvals prepended to those passed diff --git a/log/tm_logger_test.go b/log/tm_logger_test.go index 15c940ce8..8cd2f8274 100644 --- a/log/tm_logger_test.go +++ b/log/tm_logger_test.go @@ -7,17 +7,6 @@ import ( "github.com/tendermint/tmlibs/log" ) -func TestTMLogger(t *testing.T) { - t.Parallel() - logger := log.NewTMLogger(ioutil.Discard) - if err := logger.Info("Hello", "abc", 123); err != nil { - t.Error(err) - } - if err := logger.With("def", "ghi").Debug(""); err != nil { - t.Error(err) - } -} - func BenchmarkTMLoggerSimple(b *testing.B) { benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) } diff --git a/log/tracing_logger.go b/log/tracing_logger.go index 794bdaeb8..d2a6ff44e 100644 --- a/log/tracing_logger.go +++ b/log/tracing_logger.go @@ -28,16 +28,16 @@ type tracingLogger struct { next Logger } -func (l *tracingLogger) Info(msg string, keyvals ...interface{}) error { - return l.next.Info(msg, formatErrors(keyvals)...) +func (l *tracingLogger) Info(msg string, keyvals ...interface{}) { + l.next.Info(msg, formatErrors(keyvals)...) } -func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) error { - return l.next.Debug(msg, formatErrors(keyvals)...) +func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) { + l.next.Debug(msg, formatErrors(keyvals)...) } -func (l *tracingLogger) Error(msg string, keyvals ...interface{}) error { - return l.next.Error(msg, formatErrors(keyvals)...) +func (l *tracingLogger) Error(msg string, keyvals ...interface{}) { + l.next.Error(msg, formatErrors(keyvals)...) } func (l *tracingLogger) With(keyvals ...interface{}) Logger { From 246082368a06552c217803f12aa04c41eed64c0d Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 18 Sep 2017 18:06:10 -0700 Subject: [PATCH 208/515] add changelog entry [ci skip] --- CHANGELOG.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea7410d70..02278c1ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,18 +1,19 @@ # Changelog -## 0.3.0 (August 25, 2017) +## 0.3.0 (September 21, 2017_ BREAKING CHANGES: +- [log] logger functions no longer returns an error - [common] NewBaseService takes the new logger - [cli] RunCaptureWithArgs now captures stderr and stdout - +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) - - -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) + - -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) FEATURES: - [common] various common HTTP functionality -- [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") +- [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") - [pubsub] New package for publish-subscribe with more advanced filtering BUG FIXES: @@ -30,7 +31,7 @@ FEATURES: IMPROVEMENTS: - [cli] Error handling for tests -- [cli] Support dashes in ENV variables +- [cli] Support dashes in ENV variables BUG FIXES: From d71d1394ec48f89fab50bf8e592e3de6200b5b94 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 21 Sep 2017 16:11:28 -0700 Subject: [PATCH 209/515] call fsync after flush (Refs #573) short: flushing the bufio buffer is not enough to ensure data consistency. long: Saving an entry to the WAL calls writeLine to append data to the autofile group backing the WAL, then calls group.Flush() to flush that data to persistent storage. group.Flush() in turn proxies to headBuf.flush(), flushing the active bufio.BufferedWriter. However, BufferedWriter wraps a Writer, not another BufferedWriter, and the way it flushes is by calling io.Writer.Write() to clear the BufferedWriter's buffer. The io.Writer we're wrapping here is AutoFile, whose Write method calls os.File.Write(), performing an unbuffered write to the operating system, where, I assume, it sits in the OS buffers awaiting sync. This means that Wal.Save does not, in fact, ensure the saved operation is synced to disk before returning. --- autofile/group.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/autofile/group.go b/autofile/group.go index 39f274e04..ce3e30009 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -153,7 +153,11 @@ func (g *Group) WriteLine(line string) error { func (g *Group) Flush() error { g.mtx.Lock() defer g.mtx.Unlock() - return g.headBuf.Flush() + err := g.headBuf.Flush() + if err == nil { + err = g.Head.Sync() + } + return err } func (g *Group) processTicks() { From bffe6744ec277d60f707ab442e25513617842f8e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 22 Sep 2017 09:38:58 -0400 Subject: [PATCH 210/515] changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02278c1ed..bf39e5441 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 0.3.0 (September 21, 2017_ +## 0.3.0 (September 22, 2017) BREAKING CHANGES: @@ -14,12 +14,14 @@ FEATURES: - [common] various common HTTP functionality - [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") +- [common] ProtocolAndAddress function - [pubsub] New package for publish-subscribe with more advanced filtering BUG FIXES: - [common] fix atomicity of WriteFileAtomic by calling fsync - [db] fix memDb iteration index out of range +- [autofile] fix Flush by calling fsync ## 0.2.2 (June 16, 2017) From d6e03d2368e9d1fa11ed19f9a4d54e4c578d727c Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 22 Sep 2017 11:42:29 -0400 Subject: [PATCH 211/515] linting: add to Makefile & do some fixes --- Makefile | 43 ++++++++++++++++++++++++++++++++++++- autofile/autofile_test.go | 4 +++- autofile/group.go | 4 +--- autofile/group_test.go | 3 +-- autofile/sighup_watcher.go | 2 +- cli/flags/log_level_test.go | 2 -- clist/clist_test.go | 6 ++++-- common/cmap.go | 4 ++-- common/errors.go | 2 +- common/http_test.go | 2 +- common/os.go | 10 +++------ common/string.go | 5 +---- events/events_test.go | 15 ++++++------- flowrate/io_test.go | 5 +---- log/filter_test.go | 6 ++---- log/tmfmt_logger.go | 7 +++--- log/tracing_logger_test.go | 3 +-- merkle/simple_tree.go | 3 ++- process/util.go | 4 ++-- pubsub/query/query.peg.go | 3 +++ 20 files changed, 82 insertions(+), 51 deletions(-) diff --git a/Makefile b/Makefile index 8e43dd11a..ba164ec1d 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,15 @@ .PHONY: all test get_vendor_deps ensure_tools GOTOOLS = \ - github.com/Masterminds/glide + github.com/Masterminds/glide \ + github.com/alecthomas/gometalinter + REPO:=github.com/tendermint/tmlibs all: test +NOVENDOR = go list github.com/tendermint/tmlibs/... | grep -v /vendor/ + test: go test `glide novendor` @@ -16,3 +20,40 @@ get_vendor_deps: ensure_tools ensure_tools: go get $(GOTOOLS) + +metalinter: ensure_tools + @gometalinter --install + gometalinter --vendor --deadline=600s --enable-all --disable=lll ./... + +metalinter_test: ensure_tools + @gometalinter --install + gometalinter --vendor --deadline=600s --disable-all \ + --enable=deadcode \ + --enable=gas \ + --enable=goconst \ + --enable=goimports \ + --enable=gosimple \ + --enable=gotype \ + --enable=ineffassign \ + --enable=megacheck \ + --enable=misspell \ + --enable=staticcheck \ + --enable=safesql \ + --enable=structcheck \ + --enable=unconvert \ + --enable=unused \ + --enable=varcheck \ + --enable=vetshadow \ + --enable=interfacer \ + --enable=unparam \ + --enable=vet \ + ./... + + #--enable=aligncheck \ + #--enable=dupl \ + #--enable=errcheck \ + #--enable=gocyclo \ + #--enable=golint \ <== comments on anything exported + #--enable=interfacer \ + #--enable=unparam \ + #--enable=vet \ diff --git a/autofile/autofile_test.go b/autofile/autofile_test.go index 8f8017e1b..c7aa93beb 100644 --- a/autofile/autofile_test.go +++ b/autofile/autofile_test.go @@ -1,12 +1,14 @@ +// nolint: goimports package autofile import ( - . "github.com/tendermint/tmlibs/common" "os" "sync/atomic" "syscall" "testing" "time" + + . "github.com/tendermint/tmlibs/common" ) func TestSIGHUP(t *testing.T) { diff --git a/autofile/group.go b/autofile/group.go index ce3e30009..689b1cb92 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -107,7 +107,6 @@ func (g *Group) OnStart() error { func (g *Group) OnStop() { g.BaseService.OnStop() g.ticker.Stop() - return } func (g *Group) SetHeadSizeLimit(limit int64) { @@ -568,8 +567,7 @@ func (gr *GroupReader) ReadLine() (string, error) { bytesRead, err := gr.curReader.ReadBytes('\n') if err == io.EOF { // Open the next file - err := gr.openFile(gr.curIndex + 1) - if err != nil { + if err := gr.openFile(gr.curIndex + 1); err != nil { return "", err } if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { diff --git a/autofile/group_test.go b/autofile/group_test.go index 92e259701..91c6a0bbd 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -100,8 +100,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { - err := g.WriteLine(RandStr(999)) - if err != nil { + if err := g.WriteLine(RandStr(999)); err != nil { t.Fatal("Error appending to head", err) } } diff --git a/autofile/sighup_watcher.go b/autofile/sighup_watcher.go index facc238d5..56fbd4d86 100644 --- a/autofile/sighup_watcher.go +++ b/autofile/sighup_watcher.go @@ -22,7 +22,7 @@ func initSighupWatcher() { signal.Notify(c, syscall.SIGHUP) go func() { - for _ = range c { + for range c { sighupWatchers.closeAll() atomic.AddInt32(&sighupCounter, 1) } diff --git a/cli/flags/log_level_test.go b/cli/flags/log_level_test.go index 458a9e24d..faf9b19db 100644 --- a/cli/flags/log_level_test.go +++ b/cli/flags/log_level_test.go @@ -49,8 +49,6 @@ func TestParseLogLevel(t *testing.T) { t.Fatal(err) } - logger = logger - buf.Reset() logger.With("module", "wire").Debug("Kingpin") diff --git a/clist/clist_test.go b/clist/clist_test.go index ab5cf4b26..2063cf465 100644 --- a/clist/clist_test.go +++ b/clist/clist_test.go @@ -55,6 +55,7 @@ func TestSmall(t *testing.T) { This test is quite hacky because it relies on SetFinalizer which isn't guaranteed to run at all. */ +// nolint: megacheck func _TestGCFifo(t *testing.T) { const numElements = 1000000 @@ -102,6 +103,7 @@ func _TestGCFifo(t *testing.T) { This test is quite hacky because it relies on SetFinalizer which isn't guaranteed to run at all. */ +// nolint: megacheck func _TestGCRandom(t *testing.T) { const numElements = 1000000 @@ -132,7 +134,7 @@ func _TestGCRandom(t *testing.T) { for _, i := range rand.Perm(numElements) { el := els[i] l.Remove(el) - el = el.Next() + _ = el.Next() } runtime.GC() @@ -153,7 +155,7 @@ func TestScanRightDeleteRandom(t *testing.T) { l := New() stop := make(chan struct{}) - els := make([]*CElement, numElements, numElements) + els := make([]*CElement, numElements) for i := 0; i < numElements; i++ { el := l.PushBack(i) els[i] = el diff --git a/common/cmap.go b/common/cmap.go index 5de6fa2fa..e2a140dd0 100644 --- a/common/cmap.go +++ b/common/cmap.go @@ -10,7 +10,7 @@ type CMap struct { func NewCMap() *CMap { return &CMap{ - m: make(map[string]interface{}, 0), + m: make(map[string]interface{}), } } @@ -48,7 +48,7 @@ func (cm *CMap) Size() int { func (cm *CMap) Clear() { cm.l.Lock() defer cm.l.Unlock() - cm.m = make(map[string]interface{}, 0) + cm.m = make(map[string]interface{}) } func (cm *CMap) Values() []interface{} { diff --git a/common/errors.go b/common/errors.go index 3a1b09542..039342a67 100644 --- a/common/errors.go +++ b/common/errors.go @@ -21,7 +21,7 @@ func (se StackError) Error() string { // panic wrappers // A panic resulting from a sanity check means there is a programmer error -// and some gaurantee is not satisfied. +// and some guarantee is not satisfied. func PanicSanity(v interface{}) { panic(Fmt("Panicked on a Sanity Check: %v", v)) } diff --git a/common/http_test.go b/common/http_test.go index 73761fb1e..4272f6062 100644 --- a/common/http_test.go +++ b/common/http_test.go @@ -95,7 +95,7 @@ func TestWriteCode(t *testing.T) { common.WriteCode(w, &marshalFailer{}, code) wantCode := http.StatusBadRequest assert.Equal(t, w.Code, wantCode, "#%d", i) - assert.True(t, strings.Contains(string(w.Body.Bytes()), errFooFailed.Error()), + assert.True(t, strings.Contains(w.Body.String(), errFooFailed.Error()), "#%d: expected %q in the error message", i, errFooFailed) } } diff --git a/common/os.go b/common/os.go index 9c2bda508..8b7143f5a 100644 --- a/common/os.go +++ b/common/os.go @@ -8,6 +8,7 @@ import ( "os" "os/signal" "strings" + "syscall" ) var ( @@ -17,7 +18,7 @@ var ( func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) - signal.Notify(c, os.Kill) + signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { fmt.Printf("captured %v, exiting...\n", sig) @@ -83,12 +84,7 @@ func MustReadFile(filePath string) []byte { } func WriteFile(filePath string, contents []byte, mode os.FileMode) error { - err := ioutil.WriteFile(filePath, contents, mode) - if err != nil { - return err - } - // fmt.Printf("File written to %v.\n", filePath) - return nil + return ioutil.WriteFile(filePath, contents, mode) } func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { diff --git a/common/string.go b/common/string.go index 2818f5ed5..1ab91f15a 100644 --- a/common/string.go +++ b/common/string.go @@ -31,10 +31,7 @@ func LeftPadString(s string, totalLength int) string { func IsHex(s string) bool { if len(s) > 2 && s[:2] == "0x" { _, err := hex.DecodeString(s[2:]) - if err != nil { - return false - } - return true + return err == nil } return false } diff --git a/events/events_test.go b/events/events_test.go index c1b48b16f..dee50e5bd 100644 --- a/events/events_test.go +++ b/events/events_test.go @@ -14,7 +14,7 @@ import ( func TestAddListenerForEventFireOnce(t *testing.T) { evsw := NewEventSwitch() started, err := evsw.Start() - if started == false || err != nil { + if !started || err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } messages := make(chan EventData) @@ -34,7 +34,7 @@ func TestAddListenerForEventFireOnce(t *testing.T) { func TestAddListenerForEventFireMany(t *testing.T) { evsw := NewEventSwitch() started, err := evsw.Start() - if started == false || err != nil { + if !started || err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum := make(chan uint64) @@ -63,7 +63,7 @@ func TestAddListenerForEventFireMany(t *testing.T) { func TestAddListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() started, err := evsw.Start() - if started == false || err != nil { + if !started || err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum := make(chan uint64) @@ -108,7 +108,7 @@ func TestAddListenerForDifferentEvents(t *testing.T) { func TestAddDifferentListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() started, err := evsw.Start() - if started == false || err != nil { + if !started || err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum1 := make(chan uint64) @@ -168,7 +168,7 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { func TestAddAndRemoveListener(t *testing.T) { evsw := NewEventSwitch() started, err := evsw.Start() - if started == false || err != nil { + if !started || err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum1 := make(chan uint64) @@ -213,7 +213,7 @@ func TestAddAndRemoveListener(t *testing.T) { func TestRemoveListener(t *testing.T) { evsw := NewEventSwitch() started, err := evsw.Start() - if started == false || err != nil { + if !started || err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } count := 10 @@ -266,7 +266,7 @@ func TestRemoveListener(t *testing.T) { func TestRemoveListenersAsync(t *testing.T) { evsw := NewEventSwitch() started, err := evsw.Start() - if started == false || err != nil { + if !started || err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum1 := make(chan uint64) @@ -377,5 +377,4 @@ func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, } doneChan <- sentSum close(doneChan) - return } diff --git a/flowrate/io_test.go b/flowrate/io_test.go index 6d4934a8a..db40337c9 100644 --- a/flowrate/io_test.go +++ b/flowrate/io_test.go @@ -171,10 +171,7 @@ func statusesAreEqual(s1 *Status, s2 *Status) bool { } func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { - if d2-d1 <= maxDeviation { - return true - } - return false + return d2-d1 <= maxDeviation } func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { diff --git a/log/filter_test.go b/log/filter_test.go index fafafacb0..8d8b3b27c 100644 --- a/log/filter_test.go +++ b/log/filter_test.go @@ -73,8 +73,7 @@ func TestVariousLevels(t *testing.T) { func TestLevelContext(t *testing.T) { var buf bytes.Buffer - var logger log.Logger - logger = log.NewTMJSONLogger(&buf) + logger := log.NewTMJSONLogger(&buf) logger = log.NewFilter(logger, log.AllowError()) logger = logger.With("context", "value") @@ -93,8 +92,7 @@ func TestLevelContext(t *testing.T) { func TestVariousAllowWith(t *testing.T) { var buf bytes.Buffer - var logger log.Logger - logger = log.NewTMJSONLogger(&buf) + logger := log.NewTMJSONLogger(&buf) logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) logger1.With("context", "value").Info("foo", "bar", "baz") diff --git a/log/tmfmt_logger.go b/log/tmfmt_logger.go index 14028d756..2b464a6b0 100644 --- a/log/tmfmt_logger.go +++ b/log/tmfmt_logger.go @@ -49,9 +49,10 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { enc.Reset() defer tmfmtEncoderPool.Put(enc) + const unknown = "unknown" lvl := "none" - msg := "unknown" - module := "unknown" + msg := unknown + module := unknown // indexes of keys to skip while encoding later excludeIndexes := make([]int, 0) @@ -90,7 +91,7 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { // Stopping ... - message enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) - if module != "unknown" { + if module != unknown { enc.buf.WriteString("module=" + module + " ") } diff --git a/log/tracing_logger_test.go b/log/tracing_logger_test.go index 584b34bef..6b0838ca8 100644 --- a/log/tracing_logger_test.go +++ b/log/tracing_logger_test.go @@ -14,8 +14,7 @@ import ( func TestTracingLogger(t *testing.T) { var buf bytes.Buffer - var logger log.Logger - logger = log.NewTMJSONLogger(&buf) + logger := log.NewTMJSONLogger(&buf) logger1 := log.NewTracingLogger(logger) err1 := errors.New("Courage is grace under pressure.") diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index b5520f723..b373743fc 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -22,6 +22,7 @@ For larger datasets, use IAVLTree. */ +// nolint: goimports package merkle import ( @@ -31,8 +32,8 @@ import ( "golang.org/x/crypto/ripemd160" - . "github.com/tendermint/tmlibs/common" "github.com/tendermint/go-wire" + . "github.com/tendermint/tmlibs/common" ) func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { diff --git a/process/util.go b/process/util.go index b3e0aef11..24cf35280 100644 --- a/process/util.go +++ b/process/util.go @@ -15,8 +15,8 @@ func Run(dir string, command string, args []string) (string, bool, error) { <-proc.WaitCh if proc.ExitState.Success() { - return string(outFile.Bytes()), true, nil + return outFile.String(), true, nil } else { - return string(outFile.Bytes()), false, nil + return outFile.String(), false, nil } } diff --git a/pubsub/query/query.peg.go b/pubsub/query/query.peg.go index 37ce75cd9..8c3e83efc 100644 --- a/pubsub/query/query.peg.go +++ b/pubsub/query/query.peg.go @@ -1,3 +1,6 @@ +// nolint: megacheck +// nolint: varcheck +// nolint: deadcode package query import ( From 3c57c24921f8197343cf6e592de31b5fd31509cc Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 22 Sep 2017 12:14:27 -0400 Subject: [PATCH 212/515] linting: next round of fixes --- Makefile | 1 - autofile/group.go | 4 ++-- autofile/group_test.go | 8 +++----- common/service.go | 2 +- db/c_level_db_test.go | 2 +- db/go_level_db_test.go | 2 +- pubsub/query/query.peg.go | 4 +--- 7 files changed, 9 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index ba164ec1d..6b2c7463a 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,6 @@ metalinter_test: ensure_tools --enable=varcheck \ --enable=vetshadow \ --enable=interfacer \ - --enable=unparam \ --enable=vet \ ./... diff --git a/autofile/group.go b/autofile/group.go index 689b1cb92..eedb67b50 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -567,8 +567,8 @@ func (gr *GroupReader) ReadLine() (string, error) { bytesRead, err := gr.curReader.ReadBytes('\n') if err == io.EOF { // Open the next file - if err := gr.openFile(gr.curIndex + 1); err != nil { - return "", err + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return "", err1 } if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil diff --git a/autofile/group_test.go b/autofile/group_test.go index 91c6a0bbd..0cfcef72f 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -77,8 +77,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Write 1000 more bytes. - err := g.WriteLine(RandStr(999)) - if err != nil { + if err := g.WriteLine(RandStr(999)); err != nil { t.Fatal("Error appending to head", err) } g.Flush() @@ -88,8 +87,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) // Write 1000 more bytes. - err = g.WriteLine(RandStr(999)) - if err != nil { + if err := g.WriteLine(RandStr(999)); err != nil { t.Fatal("Error appending to head", err) } g.Flush() @@ -112,7 +110,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) // Write 1000 more bytes. - _, err = g.Head.Write([]byte(RandStr(999) + "\n")) + _, err := g.Head.Write([]byte(RandStr(999) + "\n")) if err != nil { t.Fatal("Error appending to head", err) } diff --git a/common/service.go b/common/service.go index 71fc03cb9..5ac386316 100644 --- a/common/service.go +++ b/common/service.go @@ -151,7 +151,7 @@ func (bs *BaseService) Reset() (bool, error) { return false, nil } // never happens - return false, nil + return false, nil // nolint: vet } // Implements Service diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 0ee6d6414..e7336cc5f 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -50,7 +50,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) if val == 0 { if !bytes.Equal(valBytes, nil) { - b.Errorf("Expected %X for %v, got %X", + b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes) break } diff --git a/db/go_level_db_test.go b/db/go_level_db_test.go index 0603b2d4f..2cd3192c3 100644 --- a/db/go_level_db_test.go +++ b/db/go_level_db_test.go @@ -49,7 +49,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) if val == 0 { if !bytes.Equal(valBytes, nil) { - b.Errorf("Expected %X for %v, got %X", + b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes) break } diff --git a/pubsub/query/query.peg.go b/pubsub/query/query.peg.go index 8c3e83efc..c86e4a47f 100644 --- a/pubsub/query/query.peg.go +++ b/pubsub/query/query.peg.go @@ -1,6 +1,4 @@ -// nolint: megacheck -// nolint: varcheck -// nolint: deadcode +// nolint package query import ( From 2681f32bddcc061e8ae062734404e73cbe6ea5d0 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 22 Sep 2017 12:35:52 -0400 Subject: [PATCH 213/515] circle: add metalinter to test --- Makefile | 4 +--- circle.yml | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 6b2c7463a..902197e77 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,7 @@ metalinter_test: ensure_tools --enable=gosimple \ --enable=gotype \ --enable=ineffassign \ + --enable=interfacer \ --enable=megacheck \ --enable=misspell \ --enable=staticcheck \ @@ -44,7 +45,6 @@ metalinter_test: ensure_tools --enable=unused \ --enable=varcheck \ --enable=vetshadow \ - --enable=interfacer \ --enable=vet \ ./... @@ -53,6 +53,4 @@ metalinter_test: ensure_tools #--enable=errcheck \ #--enable=gocyclo \ #--enable=golint \ <== comments on anything exported - #--enable=interfacer \ #--enable=unparam \ - #--enable=vet \ diff --git a/circle.yml b/circle.yml index 23ac4bd9f..8e3ad168b 100644 --- a/circle.yml +++ b/circle.yml @@ -18,4 +18,4 @@ dependencies: test: override: - "go version" - - "cd $PROJECT_PATH && make get_vendor_deps && make test" + - "cd $PROJECT_PATH && make get_vendor_deps && make metalinter_test && make test" From 3d98504c4c2d3d0426855a708baa0b975d6bf406 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 22 Sep 2017 13:20:13 -0400 Subject: [PATCH 214/515] common: WriteFileAtomic use tempfile in current dir --- common/os.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/common/os.go b/common/os.go index 9c2bda508..71ee88422 100644 --- a/common/os.go +++ b/common/os.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "os/signal" + "path/filepath" "strings" ) @@ -101,7 +102,8 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { // WriteFileAtomic writes newBytes to temp and atomically moves to filePath // when everything else succeeds. func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { - f, err := ioutil.TempFile("", "") + dir := filepath.Dir(filePath) + f, err := ioutil.TempFile(dir, "") if err != nil { return err } From 35838b6af8238c793684e1249693c7dfe22793db Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 22 Sep 2017 13:22:02 -0400 Subject: [PATCH 215/515] changeloge, version --- CHANGELOG.md | 6 ++++++ version/version.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf39e5441..4f5bd59ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 0.3.1 (September 22, 2017) + +BUG FIXES: + +- [common] fix WriteFileAtomic to not use /tmp, which can be on another device + ## 0.3.0 (September 22, 2017) BREAKING CHANGES: diff --git a/version/version.go b/version/version.go index ee59a7cad..6e030624e 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.3.0" +const Version = "0.3.1" From 8be8127351396593af941395f59c42e136f3e698 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Sun, 24 Sep 2017 20:00:42 -0600 Subject: [PATCH 216/515] db: fix MemDB.Close Fixes https://github.com/tendermint/tmlibs/issues/55 MemDB previously mistakenly set the actual DB pointer to nil although that side effect is not visible to the outside world since p is an identifier within the scope of just that function call. However, @melekes and I had a discussion in which we came to the conclusion that Close for an in-memory DB should instead be a noop and not cause any data loss. See the discussion on https://github.com/tendermint/tmlibs/pull/56. --- db/mem_db.go | 8 +++++--- db/mem_db_test.go | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/db/mem_db.go b/db/mem_db.go index db40227e8..58e74895b 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -52,9 +52,11 @@ func (db *MemDB) DeleteSync(key []byte) { } func (db *MemDB) Close() { - db.mtx.Lock() - defer db.mtx.Unlock() - db = nil + // Close is a noop since for an in-memory + // database, we don't have a destination + // to flush contents to nor do we want + // any data loss on invoking Close() + // See the discussion in https://github.com/tendermint/tmlibs/pull/56 } func (db *MemDB) Print() { diff --git a/db/mem_db_test.go b/db/mem_db_test.go index a76e10dc8..503e361f1 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMemDbIterator(t *testing.T) { @@ -26,3 +27,22 @@ func TestMemDbIterator(t *testing.T) { } assert.Equal(t, i, len(db.db), "iterator didnt cover whole db") } + +func TestMemDBClose(t *testing.T) { + db := NewMemDB() + copyDB := func(orig map[string][]byte) map[string][]byte { + copy := make(map[string][]byte) + for k, v := range orig { + copy[k] = v + } + return copy + } + k, v := []byte("foo"), []byte("bar") + db.Set(k, v) + require.Equal(t, db.Get(k), v, "expecting a successful get") + copyBefore := copyDB(db.db) + db.Close() + require.Equal(t, db.Get(k), v, "Close is a noop, expecting a successful get") + copyAfter := copyDB(db.db) + require.Equal(t, copyBefore, copyAfter, "Close is a noop and shouldn't modify any internal data") +} From 296d2235455dc62627d9c3feaf31306497fd4aae Mon Sep 17 00:00:00 2001 From: Alexis Sellier Date: Thu, 28 Sep 2017 17:26:24 +0200 Subject: [PATCH 217/515] Add Release & Error methods to Iterator We need this to properly support LevelDB backend, which needs to be released if we don't want to leak memory. --- db/db.go | 5 ++++- db/mem_db.go | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/db/db.go b/db/db.go index aa8ff48a8..51283ac2a 100644 --- a/db/db.go +++ b/db/db.go @@ -10,10 +10,10 @@ type DB interface { DeleteSync([]byte) Close() NewBatch() Batch + Iterator() Iterator // For debugging Print() - Iterator() Iterator Stats() map[string]string } @@ -28,6 +28,9 @@ type Iterator interface { Key() []byte Value() []byte + + Release() + Error() error } //----------------------------------------------------------------------------- diff --git a/db/mem_db.go b/db/mem_db.go index db40227e8..04d48cac0 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -97,6 +97,16 @@ func (it *memDBIterator) Value() []byte { return it.db.Get(it.Key()) } +func (it *memDBIterator) Release() { + it.db = nil + it.keys = nil + return +} + +func (it *memDBIterator) Error() error { + return nil +} + func (db *MemDB) Iterator() Iterator { it := newMemDBIterator() it.db = db From 219d4e8427b2ddb0e9146ca7319bce2e1f22a6ce Mon Sep 17 00:00:00 2001 From: Alexis Sellier Date: Thu, 28 Sep 2017 17:35:01 +0200 Subject: [PATCH 218/515] Add IteratorPrefix method to Iterator --- db/db.go | 1 + db/go_level_db.go | 5 +++++ db/mem_db.go | 9 ++++++++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/db/db.go b/db/db.go index 51283ac2a..8156c1e92 100644 --- a/db/db.go +++ b/db/db.go @@ -11,6 +11,7 @@ type DB interface { Close() NewBatch() Batch Iterator() Iterator + IteratorPrefix([]byte) Iterator // For debugging Print() diff --git a/db/go_level_db.go b/db/go_level_db.go index 54ae1149f..31c85ce86 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -7,6 +7,7 @@ import ( "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -119,6 +120,10 @@ func (db *GoLevelDB) Iterator() Iterator { return db.db.NewIterator(nil, nil) } +func (db *GoLevelDB) IteratorPrefix(prefix []byte) Iterator { + return db.db.NewIterator(util.BytesPrefix(prefix), nil) +} + func (db *GoLevelDB) NewBatch() Batch { batch := new(leveldb.Batch) return &goLevelDBBatch{db, batch} diff --git a/db/mem_db.go b/db/mem_db.go index 04d48cac0..561633291 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -2,6 +2,7 @@ package db import ( "fmt" + "strings" "sync" ) @@ -108,6 +109,10 @@ func (it *memDBIterator) Error() error { } func (db *MemDB) Iterator() Iterator { + return db.IteratorPrefix([]byte{}) +} + +func (db *MemDB) IteratorPrefix(prefix []byte) Iterator { it := newMemDBIterator() it.db = db it.last = -1 @@ -117,7 +122,9 @@ func (db *MemDB) Iterator() Iterator { // unfortunately we need a copy of all of the keys for key, _ := range db.db { - it.keys = append(it.keys, key) + if strings.HasPrefix(key, string(prefix)) { + it.keys = append(it.keys, key) + } } return it } From e9e6ec3a2c22b3139c5787dd3e6c8a61960ec522 Mon Sep 17 00:00:00 2001 From: Alexis Sellier Date: Thu, 28 Sep 2017 17:43:47 +0200 Subject: [PATCH 219/515] Implement Key/Value on Iterator for GoLevelDB This is needed because leveldb reuses the keys on each iteration. In our wrapper, we copy the key/value so that it is safe to store. --- db/go_level_db.go | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/db/go_level_db.go b/db/go_level_db.go index 31c85ce86..2d2ba44e3 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -6,6 +6,7 @@ import ( "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/util" @@ -116,12 +117,32 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } +type goLevelDBIterator struct { + iterator.Iterator +} + +func (it *goLevelDBIterator) Key() []byte { + key := it.Key() + k := make([]byte, len(key)) + copy(k, key) + + return k +} + +func (it *goLevelDBIterator) Value() []byte { + val := it.Value() + v := make([]byte, len(val)) + copy(v, val) + + return v +} + func (db *GoLevelDB) Iterator() Iterator { - return db.db.NewIterator(nil, nil) + return &goLevelDBIterator{db.db.NewIterator(nil, nil)} } func (db *GoLevelDB) IteratorPrefix(prefix []byte) Iterator { - return db.db.NewIterator(util.BytesPrefix(prefix), nil) + return &goLevelDBIterator{db.db.NewIterator(util.BytesPrefix(prefix), nil)} } func (db *GoLevelDB) NewBatch() Batch { From 0948343a6fd4efce07894c312d8e96c5f277c608 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Oct 2017 14:17:16 -0400 Subject: [PATCH 220/515] autofile: ensure file is open in Sync --- autofile/autofile.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/autofile/autofile.go b/autofile/autofile.go index 5d6bc7261..05fb0d677 100644 --- a/autofile/autofile.go +++ b/autofile/autofile.go @@ -103,6 +103,11 @@ func (af *AutoFile) Sync() error { af.mtx.Lock() defer af.mtx.Unlock() + if af.file == nil { + if err := af.openFile(); err != nil { + return err + } + } return af.file.Sync() } From e9c83b30058f60c8bf6810d52ab491712b9cedf7 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Oct 2017 23:26:45 -0400 Subject: [PATCH 221/515] version and changelog --- CHANGELOG.md | 8 ++++++++ version/version.go | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f5bd59ca..e36a02d9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.3.2 (October 2, 2017) + +BUG FIXES: + +- [autofile] fix AutoFile.Sync() to open file if it's been closed +- [db] fix MemDb.Close() to not empty the database (ie. its just a noop) + + ## 0.3.1 (September 22, 2017) BUG FIXES: diff --git a/version/version.go b/version/version.go index 6e030624e..77580b5ad 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.3.1" +const Version = "0.3.2" From cf49ba876fe8e7734acccd4c29289b77ea5829a5 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Tue, 3 Oct 2017 12:18:21 -0400 Subject: [PATCH 222/515] linter: couple fixes --- Makefile | 4 ++-- autofile/autofile_test.go | 21 ++++++++------------- common/os.go | 3 +-- common/service.go | 16 +++++++--------- merkle/simple_tree.go | 1 - 5 files changed, 18 insertions(+), 27 deletions(-) diff --git a/Makefile b/Makefile index 902197e77..25773ed36 100644 --- a/Makefile +++ b/Makefile @@ -31,9 +31,7 @@ metalinter_test: ensure_tools --enable=deadcode \ --enable=gas \ --enable=goconst \ - --enable=goimports \ --enable=gosimple \ - --enable=gotype \ --enable=ineffassign \ --enable=interfacer \ --enable=megacheck \ @@ -52,5 +50,7 @@ metalinter_test: ensure_tools #--enable=dupl \ #--enable=errcheck \ #--enable=gocyclo \ + #--enable=goimports \ #--enable=golint \ <== comments on anything exported + #--enable=gotype \ #--enable=unparam \ diff --git a/autofile/autofile_test.go b/autofile/autofile_test.go index c7aa93beb..05152219c 100644 --- a/autofile/autofile_test.go +++ b/autofile/autofile_test.go @@ -1,4 +1,3 @@ -// nolint: goimports package autofile import ( @@ -8,19 +7,18 @@ import ( "testing" "time" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) func TestSIGHUP(t *testing.T) { // First, create an AutoFile writing to a tempfile dir - file, name := Tempfile("sighup_test") - err := file.Close() - if err != nil { + file, name := cmn.Tempfile("sighup_test") + if err := file.Close(); err != nil { t.Fatalf("Error creating tempfile: %v", err) } // Here is the actual AutoFile - af, err := OpenAutoFile(name) + af, err := cmn.OpenAutoFile(name) if err != nil { t.Fatalf("Error creating autofile: %v", err) } @@ -36,8 +34,7 @@ func TestSIGHUP(t *testing.T) { } // Move the file over - err = os.Rename(name, name+"_old") - if err != nil { + if err := os.Rename(name, name+"_old"); err != nil { t.Fatalf("Error moving autofile: %v", err) } @@ -59,17 +56,15 @@ func TestSIGHUP(t *testing.T) { if err != nil { t.Fatalf("Error writing to autofile: %v", err) } - err = af.Close() - if err != nil { + if err := af.Close(); err != nil { t.Fatalf("Error closing autofile") } // Both files should exist - if body := MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { + if body := cmn.MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { t.Errorf("Unexpected body %s", body) } - if body := MustReadFile(name); string(body) != "Line 3\nLine 4\n" { + if body := cmn.MustReadFile(name); string(body) != "Line 3\nLine 4\n" { t.Errorf("Unexpected body %s", body) } - } diff --git a/common/os.go b/common/os.go index 8b7143f5a..19aa479f9 100644 --- a/common/os.go +++ b/common/os.go @@ -8,7 +8,6 @@ import ( "os" "os/signal" "strings" - "syscall" ) var ( @@ -18,7 +17,7 @@ var ( func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) - signal.Notify(c, syscall.SIGTERM) + signal.Notify(c, os.Kill) // nolint: megacheck go func() { for sig := range c { fmt.Printf("captured %v, exiting...\n", sig) diff --git a/common/service.go b/common/service.go index 5ac386316..2d86baafe 100644 --- a/common/service.go +++ b/common/service.go @@ -140,18 +140,16 @@ func (bs *BaseService) OnStop() {} // Implements Service func (bs *BaseService) Reset() (bool, error) { - if atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - // whether or not we've started, we can reset - atomic.CompareAndSwapUint32(&bs.started, 1, 0) - - bs.Quit = make(chan struct{}) - return true, bs.impl.OnReset() - } else { + if stopped := atomic.CompareAndSwapUint32(&bs.stopped, 1, 0); !stopped { bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) return false, nil } - // never happens - return false, nil // nolint: vet + + // whether or not we've started, we can reset + atomic.CompareAndSwapUint32(&bs.started, 1, 0) + + bs.Quit = make(chan struct{}) + return true, bs.impl.OnReset() } // Implements Service diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index b373743fc..8106246d6 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -22,7 +22,6 @@ For larger datasets, use IAVLTree. */ -// nolint: goimports package merkle import ( From c8805fd7deb52c9565f1f0c5e465b480dcf13f2c Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 4 Oct 2017 00:13:58 -0400 Subject: [PATCH 223/515] metalinter fixes from review --- autofile/autofile_test.go | 5 +++-- common/os.go | 4 ++-- common/service.go | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/autofile/autofile_test.go b/autofile/autofile_test.go index 05152219c..8f453dd07 100644 --- a/autofile/autofile_test.go +++ b/autofile/autofile_test.go @@ -18,7 +18,7 @@ func TestSIGHUP(t *testing.T) { t.Fatalf("Error creating tempfile: %v", err) } // Here is the actual AutoFile - af, err := cmn.OpenAutoFile(name) + af, err := OpenAutoFile(name) if err != nil { t.Fatalf("Error creating autofile: %v", err) } @@ -34,7 +34,8 @@ func TestSIGHUP(t *testing.T) { } // Move the file over - if err := os.Rename(name, name+"_old"); err != nil { + err = os.Rename(name, name+"_old") + if err != nil { t.Fatalf("Error moving autofile: %v", err) } diff --git a/common/os.go b/common/os.go index 19aa479f9..625d6ae16 100644 --- a/common/os.go +++ b/common/os.go @@ -8,6 +8,7 @@ import ( "os" "os/signal" "strings" + "syscall" ) var ( @@ -16,8 +17,7 @@ var ( func TrapSignal(cb func()) { c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - signal.Notify(c, os.Kill) // nolint: megacheck + signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { for sig := range c { fmt.Printf("captured %v, exiting...\n", sig) diff --git a/common/service.go b/common/service.go index 2d86baafe..8d4de30a8 100644 --- a/common/service.go +++ b/common/service.go @@ -140,7 +140,7 @@ func (bs *BaseService) OnStop() {} // Implements Service func (bs *BaseService) Reset() (bool, error) { - if stopped := atomic.CompareAndSwapUint32(&bs.stopped, 1, 0); !stopped { + if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) return false, nil } From 35e38e8932e69dcf3038729c611358f9c2ac960d Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 11 Oct 2017 12:42:54 +0400 Subject: [PATCH 224/515] call go env GOPATH if env var is not found (Refs #60) --- common/os.go | 16 +++++++++++++++- common/os_test.go | 23 +++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/common/os.go b/common/os.go index e0a009264..8af6cd22e 100644 --- a/common/os.go +++ b/common/os.go @@ -6,6 +6,7 @@ import ( "io" "io/ioutil" "os" + "os/exec" "os/signal" "path/filepath" "strings" @@ -13,9 +14,22 @@ import ( ) var ( - GoPath = os.Getenv("GOPATH") + GoPath = gopath() ) +func gopath() string { + path := os.Getenv("GOPATH") + if len(path) == 0 { + goCmd := exec.Command("go", "env", "GOPATH") + out, err := goCmd.Output() + if err != nil { + panic(fmt.Sprintf("failed to determine gopath: %v", err)) + } + path = string(out) + } + return path +} + func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) diff --git a/common/os_test.go b/common/os_test.go index 05359e36e..168eb438c 100644 --- a/common/os_test.go +++ b/common/os_test.go @@ -27,3 +27,26 @@ func TestWriteFileAtomic(t *testing.T) { t.Fatal(err) } } + +func TestGoPath(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + err := os.Setenv("GOPATH", "~/testgopath") + if err != nil { + t.Fatal(err) + } + path = gopath() + if path != "~/testgopath" { + t.Fatalf("gopath should return GOPATH env var if set, got %v", path) + } + os.Unsetenv("GOPATH") + + path = gopath() + if path == "~/testgopath" || path == "" { + t.Fatalf("gopath should return go env GOPATH result if env var does not exist, got %v", path) + } +} From 7166252a521951eb8b6bd26db28b2b90586941a9 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 11 Oct 2017 12:48:05 +0400 Subject: [PATCH 225/515] add codeowners file [ci skip] --- CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..d2dddf85a --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +* @melekes @ebuchman +*.md @zramsay +*.rst @zramsay From 6b1e3bcee3cf69fbac004cef8f8f3788f12cd58e Mon Sep 17 00:00:00 2001 From: Alexis Sellier Date: Fri, 13 Oct 2017 13:03:43 +0200 Subject: [PATCH 226/515] Add comments about copying --- db/go_level_db.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/db/go_level_db.go b/db/go_level_db.go index 2d2ba44e3..f3ed79e30 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -121,6 +121,7 @@ type goLevelDBIterator struct { iterator.Iterator } +// Key returns a copy of the current key. func (it *goLevelDBIterator) Key() []byte { key := it.Key() k := make([]byte, len(key)) @@ -129,6 +130,7 @@ func (it *goLevelDBIterator) Key() []byte { return k } +// Value returns a copy of the current value. func (it *goLevelDBIterator) Value() []byte { val := it.Value() v := make([]byte, len(val)) From 0b22b27bbb08947e3875dcfe22f95b983635eb0a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 16 Oct 2017 22:38:37 +0400 Subject: [PATCH 227/515] avoid infinite recursion by proxying to iterator ``` WARNING: staticcheck, gosimple and unused are all set, using megacheck instead db/go_level_db.go:126:15:warning: infinite recursive call (SA5007) (megacheck) db/go_level_db.go:135:17:warning: infinite recursive call (SA5007) (megacheck) db/mem_db.go:106:2:warning: redundant return statement (S1023) (megacheck) ``` https://circleci.com/gh/tendermint/tmlibs/220 Also remove unnecessary return statement --- db/go_level_db.go | 18 +++++++++++++++--- db/mem_db.go | 1 - 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/db/go_level_db.go b/db/go_level_db.go index f3ed79e30..4abd76112 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -118,12 +118,12 @@ func (db *GoLevelDB) Stats() map[string]string { } type goLevelDBIterator struct { - iterator.Iterator + source iterator.Iterator } // Key returns a copy of the current key. func (it *goLevelDBIterator) Key() []byte { - key := it.Key() + key := it.source.Key() k := make([]byte, len(key)) copy(k, key) @@ -132,13 +132,25 @@ func (it *goLevelDBIterator) Key() []byte { // Value returns a copy of the current value. func (it *goLevelDBIterator) Value() []byte { - val := it.Value() + val := it.source.Value() v := make([]byte, len(val)) copy(v, val) return v } +func (it *goLevelDBIterator) Error() error { + return it.source.Error() +} + +func (it *goLevelDBIterator) Next() bool { + return it.source.Next() +} + +func (it *goLevelDBIterator) Release() { + it.source.Release() +} + func (db *GoLevelDB) Iterator() Iterator { return &goLevelDBIterator{db.db.NewIterator(nil, nil)} } diff --git a/db/mem_db.go b/db/mem_db.go index 55d594fa6..077427509 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -103,7 +103,6 @@ func (it *memDBIterator) Value() []byte { func (it *memDBIterator) Release() { it.db = nil it.keys = nil - return } func (it *memDBIterator) Error() error { From 498fb1134a550761f2cc0acef5cf9fbc76fb2562 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 17 Oct 2017 16:26:52 +0400 Subject: [PATCH 228/515] write docs for autofile/group --- autofile/group.go | 73 +++++++++++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 27 deletions(-) diff --git a/autofile/group.go b/autofile/group.go index eedb67b50..b66b5c692 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -18,6 +18,13 @@ import ( . "github.com/tendermint/tmlibs/common" ) +const ( + groupCheckDuration = 5000 * time.Millisecond + defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB + defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB + maxFilesToRemove = 4 // needs to be greater than 1 +) + /* You can open a Group to keep restrictions on an AutoFile, like the maximum size of each chunk, and/or the total amount of bytes @@ -25,33 +32,27 @@ stored in the group. The first file to be written in the Group.Dir is the head file. - Dir/ - - + Dir/ + - Once the Head file reaches the size limit, it will be rotated. - Dir/ - - .000 // First rolled file - - // New head path, starts empty. - // The implicit index is 001. + Dir/ + - .000 // First rolled file + - // New head path, starts empty. + // The implicit index is 001. As more files are written, the index numbers grow... - Dir/ - - .000 // First rolled file - - .001 // Second rolled file - - ... - - // New head path + Dir/ + - .000 // First rolled file + - .001 // Second rolled file + - ... + - // New head path The Group can also be used to binary-search for some line, assuming that marker lines are written occasionally. */ - -const groupCheckDuration = 5000 * time.Millisecond -const defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB -const defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB -const maxFilesToRemove = 4 // needs to be greater than 1 - type Group struct { BaseService @@ -109,37 +110,43 @@ func (g *Group) OnStop() { g.ticker.Stop() } +// SetHeadSizeLimit allows you to overwrite default head size limit - 10MB. func (g *Group) SetHeadSizeLimit(limit int64) { g.mtx.Lock() g.headSizeLimit = limit g.mtx.Unlock() } +// HeadSizeLimit returns the current head size limit. func (g *Group) HeadSizeLimit() int64 { g.mtx.Lock() defer g.mtx.Unlock() return g.headSizeLimit } +// SetTotalSizeLimit allows you to overwrite default total size limit of the +// group - 1GB. func (g *Group) SetTotalSizeLimit(limit int64) { g.mtx.Lock() g.totalSizeLimit = limit g.mtx.Unlock() } +// TotalSizeLimit returns total size limit of the group. func (g *Group) TotalSizeLimit() int64 { g.mtx.Lock() defer g.mtx.Unlock() return g.totalSizeLimit } +// MaxIndex returns index of the last file in the group. func (g *Group) MaxIndex() int { g.mtx.Lock() defer g.mtx.Unlock() return g.maxIndex } -// Auto appends "\n" +// WriteLine writes line into the current head of the group. It also appends "\n". // NOTE: Writes are buffered so they don't write synchronously // TODO: Make it halt if space is unavailable func (g *Group) WriteLine(line string) error { @@ -149,6 +156,8 @@ func (g *Group) WriteLine(line string) error { return err } +// Flush writes any buffered data to the underlying file and commits the +// current content of the file to stable storage. func (g *Group) Flush() error { g.mtx.Lock() defer g.mtx.Unlock() @@ -223,6 +232,8 @@ func (g *Group) checkTotalSizeLimit() { } } +// RotateFile causes group to close the current head and assign it some index. +// Note it does not create a new head. func (g *Group) RotateFile() { g.mtx.Lock() defer g.mtx.Unlock() @@ -241,8 +252,8 @@ func (g *Group) RotateFile() { g.maxIndex += 1 } -// NOTE: if error, returns no GroupReader. -// CONTRACT: Caller must close the returned GroupReader +// NewReader returns a new group reader. +// CONTRACT: Caller must close the returned GroupReader. func (g *Group) NewReader(index int) (*GroupReader, error) { r := newGroupReader(g) err := r.SetIndex(index) @@ -423,14 +434,15 @@ GROUP_LOOP: return } +// GroupInfo holds information about the group. type GroupInfo struct { - MinIndex int - MaxIndex int - TotalSize int64 - HeadSize int64 + MinIndex int // index of the first file in the group, including head + MaxIndex int // index of the last file in the group, including head + TotalSize int64 // total size of the group + HeadSize int64 // size of the head } -// Returns info after scanning all files in g.Head's dir +// Returns info after scanning all files in g.Head's dir. func (g *Group) ReadGroupInfo() GroupInfo { g.mtx.Lock() defer g.mtx.Unlock() @@ -505,6 +517,7 @@ func filePathForIndex(headPath string, index int, maxIndex int) string { //-------------------------------------------------------------------------------- +// GroupReader provides an interface for reading from a Group. type GroupReader struct { *Group mtx sync.Mutex @@ -524,6 +537,7 @@ func newGroupReader(g *Group) *GroupReader { } } +// Close closes the GroupReader by closing the cursor file. func (gr *GroupReader) Close() error { gr.mtx.Lock() defer gr.mtx.Unlock() @@ -540,7 +554,7 @@ func (gr *GroupReader) Close() error { } } -// Reads a line (without delimiter) +// ReadLine reads a line (without delimiter). // just return io.EOF if no new lines found. func (gr *GroupReader) ReadLine() (string, error) { gr.mtx.Lock() @@ -613,6 +627,9 @@ func (gr *GroupReader) openFile(index int) error { return nil } +// PushLine makes the given line the current one, so the next time somebody +// calls ReadLine, this line will be returned. +// panics if called twice without calling ReadLine. func (gr *GroupReader) PushLine(line string) { gr.mtx.Lock() defer gr.mtx.Unlock() @@ -624,13 +641,15 @@ func (gr *GroupReader) PushLine(line string) { } } -// Cursor's file index. +// CurIndex returns cursor's file index. func (gr *GroupReader) CurIndex() int { gr.mtx.Lock() defer gr.mtx.Unlock() return gr.curIndex } +// SetIndex sets the cursor's file index to index by opening a file at this +// position. func (gr *GroupReader) SetIndex(index int) error { gr.mtx.Lock() defer gr.mtx.Unlock() From 45095e83e790624980240e83628d6971cafa5495 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 17 Oct 2017 16:48:44 +0400 Subject: [PATCH 229/515] add Write method to autofile/Group --- autofile/group.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/autofile/group.go b/autofile/group.go index b66b5c692..947c295f8 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -146,6 +146,17 @@ func (g *Group) MaxIndex() int { return g.maxIndex } +// Write writes the contents of p into the current head of the group. It +// returns the number of bytes written. If nn < len(p), it also returns an +// error explaining why the write is short. +// NOTE: Writes are buffered so they don't write synchronously +// TODO: Make it halt if space is unavailable +func (g *Group) Write(p []byte) (nn int, err error) { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headBuf.Write(p) +} + // WriteLine writes line into the current head of the group. It also appends "\n". // NOTE: Writes are buffered so they don't write synchronously // TODO: Make it halt if space is unavailable From aace56018a5f70c09a2ab26b280c943a85aba5d7 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 20 Oct 2017 12:38:45 +0400 Subject: [PATCH 230/515] add Read method to GroupReader --- autofile/group.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/autofile/group.go b/autofile/group.go index 947c295f8..2c6aa6109 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -565,6 +565,42 @@ func (gr *GroupReader) Close() error { } } +// Read implements io.Reader, reading bytes from the current Reader +// incrementing index until enough bytes are read. +func (gr *GroupReader) Read(p []byte) (n int, err error) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + // Open file if not open yet + if gr.curReader == nil { + if err = gr.openFile(gr.curIndex); err != nil { + return 0, err + } + } + + // Iterate over files until enough bytes are read + lenP := len(p) + for { + nn, err := gr.curReader.Read(p[n:]) + n += nn + if err == io.EOF { + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return n, err1 + } + if n >= lenP { + return n, nil + } else { + continue + } + } else if err != nil { + return n, err + } + } + + return n, err +} + // ReadLine reads a line (without delimiter). // just return io.EOF if no new lines found. func (gr *GroupReader) ReadLine() (string, error) { From 35e81018e9bd183be7121b6b900dff3d49e234d5 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 20 Oct 2017 13:09:11 +0400 Subject: [PATCH 231/515] add MinIndex method to Group --- autofile/group.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/autofile/group.go b/autofile/group.go index 2c6aa6109..d5797d087 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -146,6 +146,13 @@ func (g *Group) MaxIndex() int { return g.maxIndex } +// MinIndex returns index of the first file in the group. +func (g *Group) MinIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.minIndex +} + // Write writes the contents of p into the current head of the group. It // returns the number of bytes written. If nn < len(p), it also returns an // error explaining why the write is short. From c75ddd0fa3f669c1b391291a10361ddf8c5170bf Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 23 Oct 2017 13:02:02 +0400 Subject: [PATCH 232/515] return err if empty slice given --- autofile/group.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/autofile/group.go b/autofile/group.go index d5797d087..4b3cd6565 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -575,6 +575,11 @@ func (gr *GroupReader) Close() error { // Read implements io.Reader, reading bytes from the current Reader // incrementing index until enough bytes are read. func (gr *GroupReader) Read(p []byte) (n int, err error) { + lenP := len(p) + if lenP == 0 { + return 0, errors.New("given empty slice") + } + gr.mtx.Lock() defer gr.mtx.Unlock() @@ -586,7 +591,6 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { } // Iterate over files until enough bytes are read - lenP := len(p) for { nn, err := gr.curReader.Read(p[n:]) n += nn From 21b2c26fb1b26edf5846792890e01eaa8a472508 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 23 Oct 2017 13:02:14 +0400 Subject: [PATCH 233/515] GroupReader#Read: return io.EOF if file is empty --- autofile/group.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/autofile/group.go b/autofile/group.go index 4b3cd6565..6d70a3dbb 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -606,6 +606,8 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { } } else if err != nil { return n, err + } else if nn == 0 { // empty file + return n, err } } From 0eff425bc7e3d6137ea3c59ad7436eafe5ef55d2 Mon Sep 17 00:00:00 2001 From: Silas Davis Date: Mon, 23 Oct 2017 18:51:49 +0100 Subject: [PATCH 234/515] fix zeroed buffer getting flushed to the empty event --- events/event_cache.go | 12 ++++-------- events/event_cache_test.go | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 8 deletions(-) create mode 100644 events/event_cache_test.go diff --git a/events/event_cache.go b/events/event_cache.go index 905f1096a..f508e873d 100644 --- a/events/event_cache.go +++ b/events/event_cache.go @@ -1,9 +1,5 @@ package events -const ( - eventsBufferSize = 1000 -) - // An EventCache buffers events for a Fireable // All events are cached. Filtering happens on Flush type EventCache struct { @@ -14,8 +10,7 @@ type EventCache struct { // Create a new EventCache with an EventSwitch as backend func NewEventCache(evsw Fireable) *EventCache { return &EventCache{ - evsw: evsw, - events: make([]eventInfo, eventsBufferSize), + evsw: evsw, } } @@ -27,7 +22,7 @@ type eventInfo struct { // Cache an event to be fired upon finality. func (evc *EventCache) FireEvent(event string, data EventData) { - // append to list + // append to list (go will grow our backing array exponentially) evc.events = append(evc.events, eventInfo{event, data}) } @@ -37,5 +32,6 @@ func (evc *EventCache) Flush() { for _, ei := range evc.events { evc.evsw.FireEvent(ei.event, ei.data) } - evc.events = make([]eventInfo, eventsBufferSize) + // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation + evc.events = nil } diff --git a/events/event_cache_test.go b/events/event_cache_test.go new file mode 100644 index 000000000..ab321da3a --- /dev/null +++ b/events/event_cache_test.go @@ -0,0 +1,35 @@ +package events + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEventCache_Flush(t *testing.T) { + evsw := NewEventSwitch() + evsw.Start() + evsw.AddListenerForEvent("nothingness", "", func(data EventData) { + // Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache + require.FailNow(t, "We should never receive a message on this switch since none are fired") + }) + evc := NewEventCache(evsw) + evc.Flush() + // Check after reset + evc.Flush() + fail := true + pass := false + evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { + if fail { + require.FailNow(t, "Shouldn't see a message until flushed") + } + pass = true + }) + evc.FireEvent("something", struct{ int }{1}) + evc.FireEvent("something", struct{ int }{2}) + evc.FireEvent("something", struct{ int }{3}) + fail = false + evc.Flush() + assert.True(t, pass) +} From 81591e288e87eba7735df53207f74a09ba5f289a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 24 Oct 2017 23:19:53 +0400 Subject: [PATCH 235/515] fix metalinter warnings --- autofile/group.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/autofile/group.go b/autofile/group.go index 6d70a3dbb..bbf77d27e 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -591,8 +591,9 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { } // Iterate over files until enough bytes are read + var nn int for { - nn, err := gr.curReader.Read(p[n:]) + nn, err = gr.curReader.Read(p[n:]) n += nn if err == io.EOF { // Open the next file @@ -610,8 +611,6 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { return n, err } } - - return n, err } // ReadLine reads a line (without delimiter). From 103fee61921ee8bebd055bedd0815ddc71e03d90 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 24 Oct 2017 23:20:17 +0400 Subject: [PATCH 236/515] add tests for autofile group Write, reader#Read --- autofile/group_test.go | 91 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/autofile/group_test.go b/autofile/group_test.go index 0cfcef72f..398ea3ae9 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -1,6 +1,7 @@ package autofile import ( + "bytes" "errors" "io" "io/ioutil" @@ -400,3 +401,93 @@ func TestFindLast4(t *testing.T) { // Cleanup destroyTestGroup(t, g) } + +func TestWrite(t *testing.T) { + g := createTestGroup(t, 0) + + written := []byte("Medusa") + g.Write(written) + g.Flush() + + read := make([]byte, len(written)) + gr, err := g.NewReader(0) + if err != nil { + t.Fatalf("Failed to create reader: %v", err) + } + _, err = gr.Read(read) + if err != nil { + t.Fatalf("Failed to read data: %v", err) + } + + if !bytes.Equal(written, read) { + t.Errorf("%s, %s should be equal", string(written), string(read)) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestGroupReaderRead(t *testing.T) { + g := createTestGroup(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + g.Write(frankenstein) + g.Flush() + + totalWrittenLength := len(professor) + len(frankenstein) + read := make([]byte, totalWrittenLength) + gr, err := g.NewReader(0) + if err != nil { + t.Fatalf("Failed to create reader: %v", err) + } + n, err := gr.Read(read) + if err != nil { + t.Fatalf("Failed to read data: %v", err) + } + if n != totalWrittenLength { + t.Errorf("Failed to read enough bytes: wanted %d, but read %d", totalWrittenLength, n) + } + + professorPlusFrankenstein := professor + professorPlusFrankenstein = append(professorPlusFrankenstein, frankenstein...) + if !bytes.Equal(read, professorPlusFrankenstein) { + t.Errorf("%s, %s should be equal", string(professorPlusFrankenstein), string(read)) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMinIndex(t *testing.T) { + g := createTestGroup(t, 0) + + if g.MinIndex() != 0 { + t.Error("MinIndex should be zero at the beginning") + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMaxIndex(t *testing.T) { + g := createTestGroup(t, 0) + + if g.MaxIndex() != 0 { + t.Error("MaxIndex should be zero at the beginning") + } + + g.WriteLine("Line 1") + g.Flush() + g.RotateFile() + + if g.MaxIndex() != 1 { + t.Error("MaxIndex should point to the last file") + } + + // Cleanup + destroyTestGroup(t, g) +} From f99c73502ca32782c0e68258848cc5c365e133b6 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 24 Oct 2017 23:30:05 +0400 Subject: [PATCH 237/515] add codecov --- circle.yml | 10 +++++----- test.sh | 12 ++++++++++++ 2 files changed, 17 insertions(+), 5 deletions(-) create mode 100755 test.sh diff --git a/circle.yml b/circle.yml index 8e3ad168b..3dba976be 100644 --- a/circle.yml +++ b/circle.yml @@ -1,11 +1,9 @@ machine: environment: - GOPATH: /home/ubuntu/.go_workspace + GOPATH: "${HOME}/.go_workspace" PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - GO15VENDOREXPERIMENT: 1 hosts: - circlehost: 127.0.0.1 localhost: 127.0.0.1 dependencies: @@ -17,5 +15,7 @@ dependencies: test: override: - - "go version" - - "cd $PROJECT_PATH && make get_vendor_deps && make metalinter_test && make test" + - cd $PROJECT_PATH && make get_vendor_deps && make metalinter_test && bash ./test.sh + post: + - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt + - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/test.sh b/test.sh new file mode 100755 index 000000000..012162b07 --- /dev/null +++ b/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done From bcf15e527d89444bf260fd83699ef70fde2fb6e1 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 25 Oct 2017 11:01:52 +0400 Subject: [PATCH 238/515] make GoPath a function otherwise it could try to execute go binary and panic if no go binary found. See https://github.com/tendermint/tendermint/issues/782 --- common/os.go | 13 +++++++++---- common/os_test.go | 27 ++++++++++++++++++++++----- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/common/os.go b/common/os.go index 8af6cd22e..6b591ce59 100644 --- a/common/os.go +++ b/common/os.go @@ -13,11 +13,15 @@ import ( "syscall" ) -var ( - GoPath = gopath() -) +var gopath string + +// GoPath returns GOPATH env variable value. If it is not set, this function +// will try to call `go env GOPATH` subcommand. +func GoPath() string { + if gopath != "" { + return gopath + } -func gopath() string { path := os.Getenv("GOPATH") if len(path) == 0 { goCmd := exec.Command("go", "env", "GOPATH") @@ -27,6 +31,7 @@ func gopath() string { } path = string(out) } + gopath = path return path } diff --git a/common/os_test.go b/common/os_test.go index 168eb438c..126723aa6 100644 --- a/common/os_test.go +++ b/common/os_test.go @@ -39,14 +39,31 @@ func TestGoPath(t *testing.T) { if err != nil { t.Fatal(err) } - path = gopath() + path = GoPath() if path != "~/testgopath" { - t.Fatalf("gopath should return GOPATH env var if set, got %v", path) + t.Fatalf("should get GOPATH env var value, got %v", path) } os.Unsetenv("GOPATH") - path = gopath() - if path == "~/testgopath" || path == "" { - t.Fatalf("gopath should return go env GOPATH result if env var does not exist, got %v", path) + path = GoPath() + if path != "~/testgopath" { + t.Fatalf("subsequent calls should return the same value, got %v", path) + } +} + +func TestGoPathWithoutEnvVar(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + os.Unsetenv("GOPATH") + // reset cache + gopath = "" + + path = GoPath() + if path == "" || path == "~/testgopath" { + t.Fatalf("should get nonempty result of calling go env GOPATH, got %v", path) } } From 42145a82bd953e4a666bcc01a37e2d140b9c1e8e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 26 Oct 2017 20:57:33 -0400 Subject: [PATCH 239/515] version and changelog --- CHANGELOG.md | 15 +++++++++++++++ version/version.go | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e36a02d9a..3a21dd5dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 0.4.0 (October 26, 2017) + +BREAKING: + - [common] GoPath is now a function + - [db] `DB` and `Iterator` interfaces have new methods to better support iteration + +FEATURES: + - [autofile] `Read([]byte)` and `Write([]byte)` methods on `Group` to support binary WAL + +IMPROVEMENTS: + - comments and linting + +BUG FIXES: + - [events] fix allocation error prefixing cache with 1000 empty events + ## 0.3.2 (October 2, 2017) BUG FIXES: diff --git a/version/version.go b/version/version.go index 77580b5ad..c1635d202 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.3.2" +const Version = "0.4.0" From 092eb701c7276907cdbed258750e22ce895b6735 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 27 Oct 2017 11:01:40 -0400 Subject: [PATCH 240/515] cmn: Kill --- common/os.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/common/os.go b/common/os.go index 6b591ce59..81f703c7d 100644 --- a/common/os.go +++ b/common/os.go @@ -50,6 +50,12 @@ func TrapSignal(cb func()) { select {} } +// Kill the running process by sending itself SIGTERM +func Kill() error { + pid := os.Getpid() + return syscall.Kill(pid, syscall.SIGTERM) +} + func Exit(s string) { fmt.Printf(s + "\n") os.Exit(1) From b14c99669810fdb9bdd305fe0e09f72c86df815b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 27 Oct 2017 11:52:10 -0400 Subject: [PATCH 241/515] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a21dd5dd..c380fdcd0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ BREAKING: FEATURES: - [autofile] `Read([]byte)` and `Write([]byte)` methods on `Group` to support binary WAL + - [common] `Kill()` sends SIGTERM to the current process IMPROVEMENTS: - comments and linting From 668698584d8d8ac977b3343f0c174f6291e89878 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 30 Oct 2017 12:48:51 -0500 Subject: [PATCH 242/515] [autofile] test GroupReader more extensively (Refs #69) --- autofile/group.go | 10 +- autofile/group_test.go | 259 ++++++++++++++++------------------------- 2 files changed, 106 insertions(+), 163 deletions(-) diff --git a/autofile/group.go b/autofile/group.go index bbf77d27e..f2d0f2bae 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -596,14 +596,12 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { nn, err = gr.curReader.Read(p[n:]) n += nn if err == io.EOF { - // Open the next file - if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { - return n, err1 - } if n >= lenP { return n, nil - } else { - continue + } else { // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return n, err1 + } } } else if err != nil { return n, err diff --git a/autofile/group_test.go b/autofile/group_test.go index 398ea3ae9..68baba824 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -1,8 +1,8 @@ package autofile import ( - "bytes" "errors" + "fmt" "io" "io/ioutil" "os" @@ -10,51 +10,38 @@ import ( "strings" "testing" - . "github.com/tendermint/tmlibs/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tmlibs/common" ) // NOTE: Returned group has ticker stopped func createTestGroup(t *testing.T, headSizeLimit int64) *Group { - testID := RandStr(12) + testID := cmn.RandStr(12) testDir := "_test_" + testID - err := EnsureDir(testDir, 0700) - if err != nil { - t.Fatal("Error creating dir", err) - } + err := cmn.EnsureDir(testDir, 0700) + require.NoError(t, err, "Error creating dir") headPath := testDir + "/myfile" g, err := OpenGroup(headPath) - if err != nil { - t.Fatal("Error opening Group", err) - } + require.NoError(t, err, "Error opening Group") g.SetHeadSizeLimit(headSizeLimit) g.stopTicker() - - if g == nil { - t.Fatal("Failed to create Group") - } + require.NotEqual(t, nil, g, "Failed to create Group") return g } func destroyTestGroup(t *testing.T, g *Group) { err := os.RemoveAll(g.Dir) - if err != nil { - t.Fatal("Error removing test Group directory", err) - } + require.NoError(t, err, "Error removing test Group directory") } func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { - if gInfo.MinIndex != minIndex { - t.Errorf("GroupInfo MinIndex expected %v, got %v", minIndex, gInfo.MinIndex) - } - if gInfo.MaxIndex != maxIndex { - t.Errorf("GroupInfo MaxIndex expected %v, got %v", maxIndex, gInfo.MaxIndex) - } - if gInfo.TotalSize != totalSize { - t.Errorf("GroupInfo TotalSize expected %v, got %v", totalSize, gInfo.TotalSize) - } - if gInfo.HeadSize != headSize { - t.Errorf("GroupInfo HeadSize expected %v, got %v", headSize, gInfo.HeadSize) - } + assert := assert.New(t) + assert.Equal(minIndex, gInfo.MinIndex) + assert.Equal(maxIndex, gInfo.MaxIndex) + assert.Equal(totalSize, gInfo.TotalSize) + assert.Equal(headSize, gInfo.HeadSize) } func TestCheckHeadSizeLimit(t *testing.T) { @@ -65,10 +52,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { - err := g.WriteLine(RandStr(999)) - if err != nil { - t.Fatal("Error appending to head", err) - } + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") } g.Flush() assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) @@ -78,9 +63,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Write 1000 more bytes. - if err := g.WriteLine(RandStr(999)); err != nil { - t.Fatal("Error appending to head", err) - } + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") g.Flush() // Calling checkHeadSizeLimit this time rolls it. @@ -88,9 +72,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) // Write 1000 more bytes. - if err := g.WriteLine(RandStr(999)); err != nil { - t.Fatal("Error appending to head", err) - } + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") g.Flush() // Calling checkHeadSizeLimit does nothing. @@ -99,9 +82,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { - if err := g.WriteLine(RandStr(999)); err != nil { - t.Fatal("Error appending to head", err) - } + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") } g.Flush() assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) @@ -111,10 +93,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) // Write 1000 more bytes. - _, err := g.Head.Write([]byte(RandStr(999) + "\n")) - if err != nil { - t.Fatal("Error appending to head", err) - } + _, err = g.Head.Write([]byte(cmn.RandStr(999) + "\n")) + require.NoError(t, err, "Error appending to head") g.Flush() assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) @@ -134,16 +114,12 @@ func TestSearch(t *testing.T) { for i := 0; i < 100; i++ { // The random junk at the end ensures that this INFO linen // is equally likely to show up at the end. - _, err := g.Head.Write([]byte(Fmt("INFO %v %v\n", i, RandStr(123)))) - if err != nil { - t.Error("Failed to write to head") - } + _, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123)))) + require.NoError(t, err, "Failed to write to head") g.checkHeadSizeLimit() for j := 0; j < 10; j++ { - _, err := g.Head.Write([]byte(RandStr(123) + "\n")) - if err != nil { - t.Error("Failed to write to head") - } + _, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n")) + require.NoError(t, err1, "Failed to write to head") g.checkHeadSizeLimit() } } @@ -173,17 +149,11 @@ func TestSearch(t *testing.T) { for i := 0; i < 100; i++ { t.Log("Testing for i", i) gr, match, err := g.Search("INFO", makeSearchFunc(i)) - if err != nil { - t.Fatal("Failed to search for line:", err) - } - if !match { - t.Error("Expected Search to return exact match") - } + require.NoError(t, err, "Failed to search for line") + assert.True(t, match, "Expected Search to return exact match") line, err := gr.ReadLine() - if err != nil { - t.Fatal("Failed to read line after search", err) - } - if !strings.HasPrefix(line, Fmt("INFO %v ", i)) { + require.NoError(t, err, "Failed to read line after search") + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) { t.Fatal("Failed to get correct line") } // Make sure we can continue to read from there. @@ -203,7 +173,7 @@ func TestSearch(t *testing.T) { if !strings.HasPrefix(line, "INFO ") { continue } - if !strings.HasPrefix(line, Fmt("INFO %v ", cur)) { + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { t.Fatalf("Unexpected INFO #. Expected %v got:\n%v", cur, line) } cur += 1 @@ -215,35 +185,23 @@ func TestSearch(t *testing.T) { // We should get the first available line. { gr, match, err := g.Search("INFO", makeSearchFunc(-999)) - if err != nil { - t.Fatal("Failed to search for line:", err) - } - if match { - t.Error("Expected Search to not return exact match") - } + require.NoError(t, err, "Failed to search for line") + assert.False(t, match, "Expected Search to not return exact match") line, err := gr.ReadLine() - if err != nil { - t.Fatal("Failed to read line after search", err) - } + require.NoError(t, err, "Failed to read line after search") if !strings.HasPrefix(line, "INFO 0 ") { t.Error("Failed to fetch correct line, which is the earliest INFO") } err = gr.Close() - if err != nil { - t.Error("Failed to close GroupReader", err) - } + require.NoError(t, err, "Failed to close GroupReader") } // Now search for something that is too large. // We should get an EOF error. { gr, _, err := g.Search("INFO", makeSearchFunc(999)) - if err != io.EOF { - t.Error("Expected to get an EOF error") - } - if gr != nil { - t.Error("Expected to get nil GroupReader") - } + assert.Equal(t, io.EOF, err) + assert.Nil(t, gr) } // Cleanup @@ -264,18 +222,14 @@ func TestRotateFile(t *testing.T) { // Read g.Head.Path+"000" body1, err := ioutil.ReadFile(g.Head.Path + ".000") - if err != nil { - t.Error("Failed to read first rolled file") - } + assert.NoError(t, err, "Failed to read first rolled file") if string(body1) != "Line 1\nLine 2\nLine 3\n" { t.Errorf("Got unexpected contents: [%v]", string(body1)) } // Read g.Head.Path body2, err := ioutil.ReadFile(g.Head.Path) - if err != nil { - t.Error("Failed to read first rolled file") - } + assert.NoError(t, err, "Failed to read first rolled file") if string(body2) != "Line 4\nLine 5\nLine 6\n" { t.Errorf("Got unexpected contents: [%v]", string(body2)) } @@ -300,15 +254,9 @@ func TestFindLast1(t *testing.T) { g.Flush() match, found, err := g.FindLast("#") - if err != nil { - t.Error("Unexpected error", err) - } - if !found { - t.Error("Expected found=True") - } - if match != "# b" { - t.Errorf("Unexpected match: [%v]", match) - } + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) // Cleanup destroyTestGroup(t, g) @@ -330,15 +278,9 @@ func TestFindLast2(t *testing.T) { g.Flush() match, found, err := g.FindLast("#") - if err != nil { - t.Error("Unexpected error", err) - } - if !found { - t.Error("Expected found=True") - } - if match != "# b" { - t.Errorf("Unexpected match: [%v]", match) - } + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) // Cleanup destroyTestGroup(t, g) @@ -360,15 +302,9 @@ func TestFindLast3(t *testing.T) { g.Flush() match, found, err := g.FindLast("#") - if err != nil { - t.Error("Unexpected error", err) - } - if !found { - t.Error("Expected found=True") - } - if match != "# b" { - t.Errorf("Unexpected match: [%v]", match) - } + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) // Cleanup destroyTestGroup(t, g) @@ -388,15 +324,9 @@ func TestFindLast4(t *testing.T) { g.Flush() match, found, err := g.FindLast("#") - if err != nil { - t.Error("Unexpected error", err) - } - if found { - t.Error("Expected found=False") - } - if match != "" { - t.Errorf("Unexpected match: [%v]", match) - } + assert.NoError(t, err) + assert.False(t, found) + assert.Empty(t, match) // Cleanup destroyTestGroup(t, g) @@ -411,22 +341,18 @@ func TestWrite(t *testing.T) { read := make([]byte, len(written)) gr, err := g.NewReader(0) - if err != nil { - t.Fatalf("Failed to create reader: %v", err) - } - _, err = gr.Read(read) - if err != nil { - t.Fatalf("Failed to read data: %v", err) - } + require.NoError(t, err, "failed to create reader") - if !bytes.Equal(written, read) { - t.Errorf("%s, %s should be equal", string(written), string(read)) - } + _, err = gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, written, read) // Cleanup destroyTestGroup(t, g) } +// test that Read reads the required amount of bytes from all the files in the +// group and returns no error if n == size of the given slice. func TestGroupReaderRead(t *testing.T) { g := createTestGroup(t, 0) @@ -441,22 +367,47 @@ func TestGroupReaderRead(t *testing.T) { totalWrittenLength := len(professor) + len(frankenstein) read := make([]byte, totalWrittenLength) gr, err := g.NewReader(0) - if err != nil { - t.Fatalf("Failed to create reader: %v", err) - } - n, err := gr.Read(read) - if err != nil { - t.Fatalf("Failed to read data: %v", err) - } - if n != totalWrittenLength { - t.Errorf("Failed to read enough bytes: wanted %d, but read %d", totalWrittenLength, n) - } + require.NoError(t, err, "failed to create reader") + n, err := gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, totalWrittenLength, n, "not enough bytes read") professorPlusFrankenstein := professor professorPlusFrankenstein = append(professorPlusFrankenstein, frankenstein...) - if !bytes.Equal(read, professorPlusFrankenstein) { - t.Errorf("%s, %s should be equal", string(professorPlusFrankenstein), string(read)) - } + assert.Equal(t, professorPlusFrankenstein, read) + + // Cleanup + destroyTestGroup(t, g) +} + +// test that Read returns an error if number of bytes read < size of +// the given slice. Subsequent call should return 0, io.EOF. +func TestGroupReaderRead2(t *testing.T) { + g := createTestGroup(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + frankensteinPart := []byte("Frankenstein") + g.Write(frankensteinPart) // note writing only a part + g.Flush() + + totalLength := len(professor) + len(frankenstein) + read := make([]byte, totalLength) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + // 1) n < (size of the given slice), io.EOF + n, err := gr.Read(read) + assert.Equal(t, io.EOF, err) + assert.Equal(t, len(professor)+len(frankensteinPart), n, "Read more/less bytes than it is in the group") + + // 2) 0, io.EOF + n, err = gr.Read([]byte("0")) + assert.Equal(t, io.EOF, err) + assert.Equal(t, 0, n) // Cleanup destroyTestGroup(t, g) @@ -465,9 +416,7 @@ func TestGroupReaderRead(t *testing.T) { func TestMinIndex(t *testing.T) { g := createTestGroup(t, 0) - if g.MinIndex() != 0 { - t.Error("MinIndex should be zero at the beginning") - } + assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning") // Cleanup destroyTestGroup(t, g) @@ -476,17 +425,13 @@ func TestMinIndex(t *testing.T) { func TestMaxIndex(t *testing.T) { g := createTestGroup(t, 0) - if g.MaxIndex() != 0 { - t.Error("MaxIndex should be zero at the beginning") - } + assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning") g.WriteLine("Line 1") g.Flush() g.RotateFile() - if g.MaxIndex() != 1 { - t.Error("MaxIndex should point to the last file") - } + assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file") // Cleanup destroyTestGroup(t, g) From d8dd4970693ed840341fd04be9b07f27912a6864 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 30 Oct 2017 13:01:18 -0500 Subject: [PATCH 243/515] fix metalinter errors --- autofile/group_test.go | 9 ++++----- cli/setup_test.go | 44 +++++++++++++++++------------------------- 2 files changed, 22 insertions(+), 31 deletions(-) diff --git a/autofile/group_test.go b/autofile/group_test.go index 68baba824..c4f68f057 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -37,11 +37,10 @@ func destroyTestGroup(t *testing.T, g *Group) { } func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { - assert := assert.New(t) - assert.Equal(minIndex, gInfo.MinIndex) - assert.Equal(maxIndex, gInfo.MaxIndex) - assert.Equal(totalSize, gInfo.TotalSize) - assert.Equal(headSize, gInfo.HeadSize) + assert.Equal(t, minIndex, gInfo.MinIndex) + assert.Equal(t, maxIndex, gInfo.MaxIndex) + assert.Equal(t, totalSize, gInfo.TotalSize) + assert.Equal(t, headSize, gInfo.HeadSize) } func TestCheckHeadSizeLimit(t *testing.T) { diff --git a/cli/setup_test.go b/cli/setup_test.go index 4e606ac7a..692da26d3 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -14,8 +14,6 @@ import ( ) func TestSetupEnv(t *testing.T) { - assert, require := assert.New(t), require.New(t) - cases := []struct { args []string env map[string]string @@ -51,22 +49,20 @@ func TestSetupEnv(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(err, i) - assert.Equal(tc.expected, foo, i) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) } } func TestSetupConfig(t *testing.T) { - assert, require := assert.New(t), require.New(t) - // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "fubble", "wubble" conf1, err := WriteDemoConfig(map[string]string{"boo": cval1}) - require.Nil(err) + require.Nil(t, err) // make sure it handles dashed-words in the config, and ignores random info conf2, err := WriteDemoConfig(map[string]string{"boo": cval2, "foo": "bar", "two-words": "WORD"}) - require.Nil(err) + require.Nil(t, err) cases := []struct { args []string @@ -110,9 +106,9 @@ func TestSetupConfig(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(err, i) - assert.Equal(tc.expected, foo, i) - assert.Equal(tc.expectedTwo, two, i) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) + assert.Equal(t, tc.expectedTwo, two, i) } } @@ -123,16 +119,14 @@ type DemoConfig struct { } func TestSetupUnmarshal(t *testing.T) { - assert, require := assert.New(t), require.New(t) - // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "someone", "else" conf1, err := WriteDemoConfig(map[string]string{"name": cval1}) - require.Nil(err) + require.Nil(t, err) // even with some ignored fields, should be no problem conf2, err := WriteDemoConfig(map[string]string{"name": cval2, "foo": "bar"}) - require.Nil(err) + require.Nil(t, err) // unused is not declared on a flag and remains from base base := DemoConfig{ @@ -189,14 +183,12 @@ func TestSetupUnmarshal(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(err, i) - assert.Equal(tc.expected, cfg, i) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, cfg, i) } } func TestSetupTrace(t *testing.T) { - assert, require := assert.New(t), require.New(t) - cases := []struct { args []string env map[string]string @@ -224,16 +216,16 @@ func TestSetupTrace(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) - require.NotNil(err, i) - require.Equal("", stdout, i) - require.NotEqual("", stderr, i) + require.NotNil(t, err, i) + require.Equal(t, "", stdout, i) + require.NotEqual(t, "", stderr, i) msg := strings.Split(stderr, "\n") desired := fmt.Sprintf("ERROR: %s", tc.expected) - assert.Equal(desired, msg[0], i) - if tc.long && assert.True(len(msg) > 2, i) { + assert.Equal(t, desired, msg[0], i) + if tc.long && assert.True(t, len(msg) > 2, i) { // the next line starts the stack trace... - assert.Contains(msg[1], "TestSetupTrace", i) - assert.Contains(msg[2], "setup_test.go", i) + assert.Contains(t, msg[1], "TestSetupTrace", i) + assert.Contains(t, msg[2], "setup_test.go", i) } } } From 88481fc363af0b6c905868fa4ad70adf587638c5 Mon Sep 17 00:00:00 2001 From: Wolf Date: Sat, 4 Nov 2017 06:06:20 +0100 Subject: [PATCH 244/515] Make iterating over keys possible (#63) * Make iterating over keys possible * add test for cmap - test Keys() and Values() respectively * one cmap per test-case --- common/cmap.go | 11 ++++++++ common/cmap_test.go | 63 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) create mode 100644 common/cmap_test.go diff --git a/common/cmap.go b/common/cmap.go index e2a140dd0..c65c27d4c 100644 --- a/common/cmap.go +++ b/common/cmap.go @@ -51,6 +51,17 @@ func (cm *CMap) Clear() { cm.m = make(map[string]interface{}) } +func (cm *CMap) Keys() []string { + cm.l.Lock() + defer cm.l.Unlock() + + keys := []string{} + for k := range cm.m { + keys = append(keys, k) + } + return keys +} + func (cm *CMap) Values() []interface{} { cm.l.Lock() defer cm.l.Unlock() diff --git a/common/cmap_test.go b/common/cmap_test.go new file mode 100644 index 000000000..a04f0a7d2 --- /dev/null +++ b/common/cmap_test.go @@ -0,0 +1,63 @@ +package common + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIterateKeysWithValues(t *testing.T) { + cmap := NewCMap() + + for i := 1; i <= 10; i++ { + cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) + } + + // Testing size + assert.Equal(t, cmap.Size(), 10, "overall size should be 10") + assert.Equal(t, len(cmap.Keys()), 10, "should be 10 keys") + assert.Equal(t, len(cmap.Values()), 10, "should be 10 values") + + // Iterating Keys, checking for matching Value + for _, key := range cmap.Keys() { + val := strings.Replace(key, "key", "value", -1) + assert.Equal(t, cmap.Get(key), val) + } + + // Test if all keys are within []Keys() + keys := cmap.Keys() + for i := 1; i <= 10; i++ { + assert.True(t, contains(keys, fmt.Sprintf("key%d", i)), "cmap.Keys() should contain key") + } + + // Delete 1 Key + cmap.Delete("key1") + + assert.NotEqual(t, len(keys), len(cmap.Keys()), "[]keys and []Keys() should not be equal, they are copies, one item was removed") + +} + +func TestContains(t *testing.T) { + cmap := NewCMap() + + cmap.Set("key1", "value1") + + // Test for known values + assert.True(t, cmap.Has("key1"), "should contain key1") + assert.Equal(t, cmap.Get("key1"), "value1", "key1.value() should be value1") + + // Test for unknown values + assert.False(t, cmap.Has("key2"), "should not contain key2") + assert.Nil(t, cmap.Get("key2"), "does not contain key2") +} + +func contains(array []string, value string) (bool) { + for _, val := range array { + if val == value { + return true + } + } + return false +} From b658294a13c95e726d0c9d5b1b782ff0fee777aa Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 4 Nov 2017 00:09:16 -0500 Subject: [PATCH 245/515] use assert.Contains in cmap_test --- common/cmap_test.go | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/common/cmap_test.go b/common/cmap_test.go index a04f0a7d2..c665a7f3e 100644 --- a/common/cmap_test.go +++ b/common/cmap_test.go @@ -16,27 +16,26 @@ func TestIterateKeysWithValues(t *testing.T) { } // Testing size - assert.Equal(t, cmap.Size(), 10, "overall size should be 10") - assert.Equal(t, len(cmap.Keys()), 10, "should be 10 keys") - assert.Equal(t, len(cmap.Values()), 10, "should be 10 values") + assert.Equal(t, 10, cmap.Size()) + assert.Equal(t, 10, len(cmap.Keys())) + assert.Equal(t, 10, len(cmap.Values())) // Iterating Keys, checking for matching Value for _, key := range cmap.Keys() { val := strings.Replace(key, "key", "value", -1) - assert.Equal(t, cmap.Get(key), val) + assert.Equal(t, val, cmap.Get(key)) } // Test if all keys are within []Keys() keys := cmap.Keys() for i := 1; i <= 10; i++ { - assert.True(t, contains(keys, fmt.Sprintf("key%d", i)), "cmap.Keys() should contain key") + assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") } // Delete 1 Key cmap.Delete("key1") assert.NotEqual(t, len(keys), len(cmap.Keys()), "[]keys and []Keys() should not be equal, they are copies, one item was removed") - } func TestContains(t *testing.T) { @@ -45,19 +44,10 @@ func TestContains(t *testing.T) { cmap.Set("key1", "value1") // Test for known values - assert.True(t, cmap.Has("key1"), "should contain key1") - assert.Equal(t, cmap.Get("key1"), "value1", "key1.value() should be value1") + assert.True(t, cmap.Has("key1")) + assert.Equal(t, "value1", cmap.Get("key1")) // Test for unknown values - assert.False(t, cmap.Has("key2"), "should not contain key2") - assert.Nil(t, cmap.Get("key2"), "does not contain key2") -} - -func contains(array []string, value string) (bool) { - for _, val := range array { - if val == value { - return true - } - } - return false + assert.False(t, cmap.Has("key2")) + assert.Nil(t, cmap.Get("key2")) } From 49d75e223eded02597b7a45d877bc7001d4d66f0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 3 Nov 2017 23:51:39 -0500 Subject: [PATCH 246/515] use os.Process#Kill (Fixes #73) --- common/os.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/common/os.go b/common/os.go index 81f703c7d..36fc969fa 100644 --- a/common/os.go +++ b/common/os.go @@ -35,6 +35,8 @@ func GoPath() string { return path } +// TrapSignal catches the SIGTERM and executes cb function. After that it exits +// with code 1. func TrapSignal(cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) @@ -50,10 +52,13 @@ func TrapSignal(cb func()) { select {} } -// Kill the running process by sending itself SIGTERM +// Kill the running process by sending itself SIGTERM. func Kill() error { - pid := os.Getpid() - return syscall.Kill(pid, syscall.SIGTERM) + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + return p.Signal(syscall.SIGTERM) } func Exit(s string) { From 4123d54bf69cdd764bbc2b53ae545b4367267645 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 6 Nov 2017 12:18:04 -0500 Subject: [PATCH 247/515] change service#Start to return just error (Refs #45) ``` @melekes yeah, bool is superfluous @ethanfrey If I remember correctly when I was writing test code, if I call Start() on a Service that is already running, it returns (false, nil). Only if I try to legitimately start it, but it fails in startup do I get an error. The distinction is quite important to make it safe for reentrant calls. The other approach would be to have a special error type like ErrAlreadyStarted, then check for that in your code explicitly. Kind of like if I make a db call in gorm, and get an error, I check if it is a RecordNotFound error, or whether there was a real error with the db query. @melekes Ah, I see. Thanks. I must say I like ErrAlreadyStarted approach more (not just in Golang) ``` --- common/service.go | 18 ++++++++++++------ events/events_test.go | 28 ++++++++++++++-------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/common/service.go b/common/service.go index 8d4de30a8..3973adab9 100644 --- a/common/service.go +++ b/common/service.go @@ -1,13 +1,19 @@ package common import ( + "errors" "sync/atomic" "github.com/tendermint/tmlibs/log" ) +var ( + ErrAlreadyStarted = errors.New("already started") + ErrAlreadyStopped = errors.New("already stopped") +) + type Service interface { - Start() (bool, error) + Start() error OnStart() error Stop() bool @@ -94,11 +100,11 @@ func (bs *BaseService) SetLogger(l log.Logger) { } // Implements Servce -func (bs *BaseService) Start() (bool, error) { +func (bs *BaseService) Start() error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) - return false, nil + return ErrAlreadyStopped } else { bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) } @@ -106,12 +112,12 @@ func (bs *BaseService) Start() (bool, error) { if err != nil { // revert flag atomic.StoreUint32(&bs.started, 0) - return false, err + return err } - return true, err + return nil } else { bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) - return false, nil + return ErrAlreadyStarted } } diff --git a/events/events_test.go b/events/events_test.go index dee50e5bd..87db2a304 100644 --- a/events/events_test.go +++ b/events/events_test.go @@ -13,8 +13,8 @@ import ( // listener to an event, and sends a string "data". func TestAddListenerForEventFireOnce(t *testing.T) { evsw := NewEventSwitch() - started, err := evsw.Start() - if !started || err != nil { + err := evsw.Start() + if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } messages := make(chan EventData) @@ -33,8 +33,8 @@ func TestAddListenerForEventFireOnce(t *testing.T) { // listener to an event, and sends a thousand integers. func TestAddListenerForEventFireMany(t *testing.T) { evsw := NewEventSwitch() - started, err := evsw.Start() - if !started || err != nil { + err := evsw.Start() + if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum := make(chan uint64) @@ -62,8 +62,8 @@ func TestAddListenerForEventFireMany(t *testing.T) { // of the three events. func TestAddListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() - started, err := evsw.Start() - if !started || err != nil { + err := evsw.Start() + if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum := make(chan uint64) @@ -107,8 +107,8 @@ func TestAddListenerForDifferentEvents(t *testing.T) { // for each of the three events. func TestAddDifferentListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() - started, err := evsw.Start() - if !started || err != nil { + err := evsw.Start() + if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum1 := make(chan uint64) @@ -167,8 +167,8 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { // the listener and fires a thousand integers for the second event. func TestAddAndRemoveListener(t *testing.T) { evsw := NewEventSwitch() - started, err := evsw.Start() - if !started || err != nil { + err := evsw.Start() + if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum1 := make(chan uint64) @@ -212,8 +212,8 @@ func TestAddAndRemoveListener(t *testing.T) { // TestRemoveListener does basic tests on adding and removing func TestRemoveListener(t *testing.T) { evsw := NewEventSwitch() - started, err := evsw.Start() - if !started || err != nil { + err := evsw.Start() + if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } count := 10 @@ -265,8 +265,8 @@ func TestRemoveListener(t *testing.T) { // `go test -race`, to examine for possible race conditions. func TestRemoveListenersAsync(t *testing.T) { evsw := NewEventSwitch() - started, err := evsw.Start() - if !started || err != nil { + err := evsw.Start() + if err != nil { t.Errorf("Failed to start EventSwitch, error: %v", err) } doneSum1 := make(chan uint64) From e6164d40528b8621b01cacdb82efe72dee01eeb0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 6 Nov 2017 12:47:23 -0500 Subject: [PATCH 248/515] change service#Stop to be similar to Start --- common/service.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/common/service.go b/common/service.go index 3973adab9..32f531d1a 100644 --- a/common/service.go +++ b/common/service.go @@ -16,7 +16,7 @@ type Service interface { Start() error OnStart() error - Stop() bool + Stop() error OnStop() Reset() (bool, error) @@ -127,15 +127,15 @@ func (bs *BaseService) Start() error { func (bs *BaseService) OnStart() error { return nil } // Implements Service -func (bs *BaseService) Stop() bool { +func (bs *BaseService) Stop() error { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) bs.impl.OnStop() close(bs.Quit) - return true + return nil } else { bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) - return false + return ErrAlreadyStopped } } From 4b989151ed7008c2412b94ed37bf4c974b33b6f7 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 6 Nov 2017 14:18:42 -0500 Subject: [PATCH 249/515] log logger's errors (Refs #29) --- log/tm_logger.go | 15 ++++++++++++--- log/tm_logger_test.go | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/log/tm_logger.go b/log/tm_logger.go index dc6932dd8..d49e8d22b 100644 --- a/log/tm_logger.go +++ b/log/tm_logger.go @@ -52,19 +52,28 @@ func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) te // Info logs a message at level Info. func (l *tmLogger) Info(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Info(l.srcLogger) - kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } } // Debug logs a message at level Debug. func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Debug(l.srcLogger) - kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } } // Error logs a message at level Error. func (l *tmLogger) Error(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Error(l.srcLogger) - kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...) + lWithMsg := kitlog.With(lWithLevel, msgKey, msg) + if err := lWithMsg.Log(keyvals...); err != nil { + lWithMsg.Log("err", err) + } } // With returns a new contextual logger with keyvals prepended to those passed diff --git a/log/tm_logger_test.go b/log/tm_logger_test.go index 8cd2f8274..b2b600ad2 100644 --- a/log/tm_logger_test.go +++ b/log/tm_logger_test.go @@ -1,12 +1,26 @@ package log_test import ( + "bytes" "io/ioutil" + "strings" "testing" + "github.com/go-logfmt/logfmt" "github.com/tendermint/tmlibs/log" ) +func TestLoggerLogsItsErrors(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMLogger(&buf) + logger.Info("foo", "baz baz", "bar") + msg := strings.TrimSpace(buf.String()) + if !strings.Contains(msg, logfmt.ErrInvalidKey.Error()) { + t.Errorf("Expected logger msg to contain ErrInvalidKey, got %s", msg) + } +} + func BenchmarkTMLoggerSimple(b *testing.B) { benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) } From 69447564b8dedd3da5368e901b0afe9d76e2e9a3 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 6 Nov 2017 15:44:21 -0500 Subject: [PATCH 250/515] encode complex types as "%+v" (Refs #18) --- log/tmfmt_logger.go | 8 ++++++-- log/tmfmt_logger_test.go | 6 ++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/log/tmfmt_logger.go b/log/tmfmt_logger.go index 2b464a6b0..d03979718 100644 --- a/log/tmfmt_logger.go +++ b/log/tmfmt_logger.go @@ -35,7 +35,8 @@ type tmfmtLogger struct { } // NewTMFmtLogger returns a logger that encodes keyvals to the Writer in -// Tendermint custom format. +// Tendermint custom format. Note complex types (structs, maps, slices) +// formatted as "%+v". // // Each log event produces no more than one call to w.Write. // The passed Writer must be safe for concurrent use by multiple goroutines if @@ -103,7 +104,10 @@ KeyvalueLoop: } } - if err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]); err != nil { + err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) + if err == logfmt.ErrUnsupportedValueType { + enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) + } else if err != nil { return err } } diff --git a/log/tmfmt_logger_test.go b/log/tmfmt_logger_test.go index 62eb32a03..a07b323c6 100644 --- a/log/tmfmt_logger_test.go +++ b/log/tmfmt_logger_test.go @@ -30,8 +30,10 @@ func TestTMFmtLogger(t *testing.T) { assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) buf.Reset() - err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}) - assert.NotNil(t, err) + if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ std_map=map\[1:2\] my_map=special_behavior\n$`), buf.String()) buf.Reset() if err := logger.Log("level", "error"); err != nil { From 8481c49c824e2d71f9c2d00ff5a8d1ee7ad045d0 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 9 Nov 2017 17:42:32 -0500 Subject: [PATCH 251/515] CacheDB (#67) * Add CacheDB & SimpleMap * Generic memBatch; Fix cLevelDB tests * CacheWrap() for CacheDB and MemDB * Change Iterator to match LeviGo Iterator * Fixes from review * cacheWrapWriteMutex and some race fixes * Use tmlibs/common * NewCWWMutex is exposed. DB can be CacheWrap'd * Remove GetOK, not needed * Fsdb (#72) * Add FSDB * Review fixes from Anton * Review changes * Fixes from review --- .gitignore | 2 +- Makefile | 19 ++-- db/backend_test.go | 43 +++++++ db/c_level_db.go | 103 +++++++++++++---- db/c_level_db_test.go | 8 +- db/cache_db.go | 230 +++++++++++++++++++++++++++++++++++++ db/cache_db_test.go | 83 ++++++++++++++ db/common_test.go | 172 ++++++++++++++++++++++++++++ db/db.go | 57 +++++++++- db/fsdb.go | 231 ++++++++++++++++++++++++++++++++++++++ db/go_level_db.go | 115 +++++++++++++------ db/go_level_db_test.go | 8 +- db/mem_batch.go | 50 +++++++++ db/mem_db.go | 160 ++++++++++++++------------ db/mem_db_test.go | 2 +- db/stats.go | 7 ++ db/util.go | 82 ++++++++++++++ db/util_test.go | 209 ++++++++++++++++++++++++++++++++++ merkle/kvpairs.go | 48 ++++++++ merkle/simple_map.go | 26 +++++ merkle/simple_map_test.go | 47 ++++++++ merkle/simple_proof.go | 131 +++++++++++++++++++++ merkle/simple_tree.go | 184 ------------------------------ 23 files changed, 1681 insertions(+), 336 deletions(-) create mode 100644 db/backend_test.go create mode 100644 db/cache_db.go create mode 100644 db/cache_db_test.go create mode 100644 db/common_test.go create mode 100644 db/fsdb.go create mode 100644 db/mem_batch.go create mode 100644 db/stats.go create mode 100644 db/util.go create mode 100644 db/util_test.go create mode 100644 merkle/kvpairs.go create mode 100644 merkle/simple_map.go create mode 100644 merkle/simple_map_test.go create mode 100644 merkle/simple_proof.go diff --git a/.gitignore b/.gitignore index e0a06eaf6..a2ebfde29 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -*.swp +*.sw[opqr] vendor .glide diff --git a/Makefile b/Makefile index 25773ed36..a24306f32 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ all: test NOVENDOR = go list github.com/tendermint/tmlibs/... | grep -v /vendor/ test: - go test `glide novendor` + go test -tags gcc `glide novendor` get_vendor_deps: ensure_tools @rm -rf vendor/ @@ -32,20 +32,19 @@ metalinter_test: ensure_tools --enable=gas \ --enable=goconst \ --enable=gosimple \ - --enable=ineffassign \ - --enable=interfacer \ + --enable=ineffassign \ + --enable=interfacer \ --enable=megacheck \ - --enable=misspell \ - --enable=staticcheck \ + --enable=misspell \ + --enable=staticcheck \ --enable=safesql \ - --enable=structcheck \ - --enable=unconvert \ + --enable=structcheck \ + --enable=unconvert \ --enable=unused \ - --enable=varcheck \ + --enable=varcheck \ --enable=vetshadow \ --enable=vet \ ./... - #--enable=aligncheck \ #--enable=dupl \ #--enable=errcheck \ @@ -53,4 +52,4 @@ metalinter_test: ensure_tools #--enable=goimports \ #--enable=golint \ <== comments on anything exported #--enable=gotype \ - #--enable=unparam \ + #--enable=unparam \ diff --git a/db/backend_test.go b/db/backend_test.go new file mode 100644 index 000000000..b4ffecdc6 --- /dev/null +++ b/db/backend_test.go @@ -0,0 +1,43 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tmlibs/common" +) + +func testBackend(t *testing.T, backend string) { + // Default + dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) + defer dir.Close() + db := NewDB("testdb", backend, dirname) + require.Nil(t, db.Get([]byte(""))) + require.Nil(t, db.Get(nil)) + + // Set empty ("") + db.Set([]byte(""), []byte("")) + require.NotNil(t, db.Get([]byte(""))) + require.NotNil(t, db.Get(nil)) + require.Empty(t, db.Get([]byte(""))) + require.Empty(t, db.Get(nil)) + + // Set empty (nil) + db.Set([]byte(""), nil) + require.NotNil(t, db.Get([]byte(""))) + require.NotNil(t, db.Get(nil)) + require.Empty(t, db.Get([]byte(""))) + require.Empty(t, db.Get(nil)) + + // Delete + db.Delete([]byte("")) + require.Nil(t, db.Get([]byte(""))) + require.Nil(t, db.Get(nil)) +} + +func TestBackends(t *testing.T) { + testBackend(t, CLevelDBBackendStr) + testBackend(t, GoLevelDBBackendStr) + testBackend(t, MemDBBackendStr) +} diff --git a/db/c_level_db.go b/db/c_level_db.go index b1ae49a12..95651c0a2 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -7,8 +7,6 @@ import ( "path" "github.com/jmhodges/levigo" - - . "github.com/tendermint/tmlibs/common" ) func init() { @@ -24,6 +22,8 @@ type CLevelDB struct { ro *levigo.ReadOptions wo *levigo.WriteOptions woSync *levigo.WriteOptions + + cwwMutex } func NewCLevelDB(name string, dir string) (*CLevelDB, error) { @@ -45,6 +45,8 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { ro: ro, wo: wo, woSync: woSync, + + cwwMutex: NewCWWMutex(), } return database, nil } @@ -52,7 +54,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { func (db *CLevelDB) Get(key []byte) []byte { res, err := db.db.Get(db.ro, key) if err != nil { - PanicCrisis(err) + panic(err) } return res } @@ -60,28 +62,28 @@ func (db *CLevelDB) Get(key []byte) []byte { func (db *CLevelDB) Set(key []byte, value []byte) { err := db.db.Put(db.wo, key, value) if err != nil { - PanicCrisis(err) + panic(err) } } func (db *CLevelDB) SetSync(key []byte, value []byte) { err := db.db.Put(db.woSync, key, value) if err != nil { - PanicCrisis(err) + panic(err) } } func (db *CLevelDB) Delete(key []byte) { err := db.db.Delete(db.wo, key) if err != nil { - PanicCrisis(err) + panic(err) } } func (db *CLevelDB) DeleteSync(key []byte) { err := db.db.Delete(db.woSync, key) if err != nil { - PanicCrisis(err) + panic(err) } } @@ -97,11 +99,11 @@ func (db *CLevelDB) Close() { } func (db *CLevelDB) Print() { - iter := db.db.NewIterator(db.ro) - defer iter.Close() - for iter.Seek(nil); iter.Valid(); iter.Next() { - key := iter.Key() - value := iter.Value() + itr := db.Iterator() + defer itr.Close() + for itr.Seek(nil); itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() fmt.Printf("[%X]:\t[%X]\n", key, value) } } @@ -112,25 +114,24 @@ func (db *CLevelDB) Stats() map[string]string { stats := make(map[string]string) for _, key := range keys { - str, err := db.db.GetProperty(key) - if err == nil { - stats[key] = str - } + str := db.db.PropertyValue(key) + stats[key] = str } return stats } -func (db *CLevelDB) Iterator() Iterator { - return db.db.NewIterator(nil, nil) +func (db *CLevelDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) } +//---------------------------------------- +// Batch + func (db *CLevelDB) NewBatch() Batch { batch := levigo.NewWriteBatch() return &cLevelDBBatch{db, batch} } -//-------------------------------------------------------------------------------- - type cLevelDBBatch struct { db *CLevelDB batch *levigo.WriteBatch @@ -147,6 +148,66 @@ func (mBatch *cLevelDBBatch) Delete(key []byte) { func (mBatch *cLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) if err != nil { - PanicCrisis(err) + panic(err) } } + +//---------------------------------------- +// Iterator + +func (db *CLevelDB) Iterator() Iterator { + itr := db.db.NewIterator(db.ro) + itr.Seek([]byte{0x00}) + return cLevelDBIterator{itr} +} + +type cLevelDBIterator struct { + itr *levigo.Iterator +} + +func (c cLevelDBIterator) Seek(key []byte) { + if key == nil { + key = []byte{0x00} + } + c.itr.Seek(key) +} + +func (c cLevelDBIterator) Valid() bool { + return c.itr.Valid() +} + +func (c cLevelDBIterator) Key() []byte { + if !c.itr.Valid() { + panic("cLevelDBIterator Key() called when invalid") + } + return c.itr.Key() +} + +func (c cLevelDBIterator) Value() []byte { + if !c.itr.Valid() { + panic("cLevelDBIterator Value() called when invalid") + } + return c.itr.Value() +} + +func (c cLevelDBIterator) Next() { + if !c.itr.Valid() { + panic("cLevelDBIterator Next() called when invalid") + } + c.itr.Next() +} + +func (c cLevelDBIterator) Prev() { + if !c.itr.Valid() { + panic("cLevelDBIterator Prev() called when invalid") + } + c.itr.Prev() +} + +func (c cLevelDBIterator) Close() { + c.itr.Close() +} + +func (c cLevelDBIterator) GetError() error { + return c.itr.GetError() +} diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index e7336cc5f..864362332 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) func BenchmarkRandomReadsWrites2(b *testing.B) { @@ -18,7 +18,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewCLevelDB(Fmt("test_%x", RandStr(12)), "") + db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return @@ -30,7 +30,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < b.N; i++ { // Write something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) internal[idx] += 1 val := internal[idx] idxBytes := int642Bytes(int64(idx)) @@ -43,7 +43,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { } // Read something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) val := internal[idx] idxBytes := int642Bytes(int64(idx)) valBytes := db.Get(idxBytes) diff --git a/db/cache_db.go b/db/cache_db.go new file mode 100644 index 000000000..a41680c1b --- /dev/null +++ b/db/cache_db.go @@ -0,0 +1,230 @@ +package db + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" +) + +// If value is nil but deleted is false, +// it means the parent doesn't have the key. +// (No need to delete upon Write()) +type cDBValue struct { + value []byte + deleted bool + dirty bool +} + +// CacheDB wraps an in-memory cache around an underlying DB. +type CacheDB struct { + mtx sync.Mutex + cache map[string]cDBValue + parent DB + lockVersion interface{} + + cwwMutex +} + +// Needed by MultiStore.CacheWrap(). +var _ atomicSetDeleter = (*CacheDB)(nil) + +// Users should typically not be required to call NewCacheDB directly, as the +// DB implementations here provide a .CacheWrap() function already. +// `lockVersion` is typically provided by parent.GetWriteLockVersion(). +func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB { + db := &CacheDB{ + cache: make(map[string]cDBValue), + parent: parent, + lockVersion: lockVersion, + cwwMutex: NewCWWMutex(), + } + return db +} + +func (db *CacheDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + + dbValue, ok := db.cache[string(key)] + if !ok { + data := db.parent.Get(key) + dbValue = cDBValue{value: data, deleted: false, dirty: false} + db.cache[string(key)] = dbValue + } + return dbValue.value +} + +func (db *CacheDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *CacheDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *CacheDB) SetNoLock(key []byte, value []byte) { + db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true} +} + +func (db *CacheDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *CacheDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *CacheDB) DeleteNoLock(key []byte) { + db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true} +} + +func (db *CacheDB) Close() { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.parent.Close() +} + +func (db *CacheDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + fmt.Println("CacheDB\ncache:") + for key, value := range db.cache { + fmt.Printf("[%X]:\t[%v]\n", []byte(key), value) + } + fmt.Println("\nparent:") + db.parent.Print() +} + +func (db *CacheDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + stats := make(map[string]string) + stats["cache.size"] = fmt.Sprintf("%d", len(db.cache)) + stats["cache.lock_version"] = fmt.Sprintf("%v", db.lockVersion) + mergeStats(db.parent.Stats(), stats, "parent.") + return stats +} + +func (db *CacheDB) Iterator() Iterator { + panic("CacheDB.Iterator() not yet supported") +} + +func (db *CacheDB) NewBatch() Batch { + return &memBatch{db, nil} +} + +// Implements `atomicSetDeleter` for Batch support. +func (db *CacheDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +// Write writes pending updates to the parent database and clears the cache. +func (db *CacheDB) Write() { + db.mtx.Lock() + defer db.mtx.Unlock() + + // Optional sanity check to ensure that CacheDB is valid + if parent, ok := db.parent.(WriteLocker); ok { + if parent.TryWriteLock(db.lockVersion) { + // All good! + } else { + panic("CacheDB.Write() failed. Did this CacheDB expire?") + } + } + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys := make([]string, 0, len(db.cache)) + for key, dbValue := range db.cache { + if dbValue.dirty { + keys = append(keys, key) + } + } + sort.Strings(keys) + + batch := db.parent.NewBatch() + for _, key := range keys { + dbValue := db.cache[key] + if dbValue.deleted { + batch.Delete([]byte(key)) + } else if dbValue.value == nil { + // Skip, it already doesn't exist in parent. + } else { + batch.Set([]byte(key), dbValue.value) + } + } + batch.Write() + + // Clear the cache + db.cache = make(map[string]cDBValue) +} + +//---------------------------------------- +// To CacheWrap this CacheDB further. + +func (db *CacheDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) +} + +// If the parent parent DB implements this, (e.g. such as a CacheDB parent to a +// CacheDB child), CacheDB will call `parent.TryWriteLock()` before attempting +// to write. +type WriteLocker interface { + GetWriteLockVersion() (lockVersion interface{}) + TryWriteLock(lockVersion interface{}) bool +} + +// Implements TryWriteLocker. Embed this in DB structs if desired. +type cwwMutex struct { + mtx sync.Mutex + // CONTRACT: reading/writing to `*written` should use `atomic.*`. + // CONTRACT: replacing `written` with another *int32 should use `.mtx`. + written *int32 +} + +func NewCWWMutex() cwwMutex { + return cwwMutex{ + written: new(int32), + } +} + +func (cww *cwwMutex) GetWriteLockVersion() interface{} { + cww.mtx.Lock() + defer cww.mtx.Unlock() + + // `written` works as a "version" object because it gets replaced upon + // successful TryWriteLock. + return cww.written +} + +func (cww *cwwMutex) TryWriteLock(version interface{}) bool { + cww.mtx.Lock() + defer cww.mtx.Unlock() + + if version != cww.written { + return false // wrong "WriteLockVersion" + } + if !atomic.CompareAndSwapInt32(cww.written, 0, 1) { + return false // already written + } + + // New "WriteLockVersion" + cww.written = new(int32) + return true +} diff --git a/db/cache_db_test.go b/db/cache_db_test.go new file mode 100644 index 000000000..1de08e3f0 --- /dev/null +++ b/db/cache_db_test.go @@ -0,0 +1,83 @@ +package db + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func bz(s string) []byte { return []byte(s) } + +func TestCacheDB(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + + require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") + + mem.Set(bz("key1"), bz("value1")) + cdb.Set(bz("key1"), bz("value1")) + require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) + + cdb.Set(bz("key1"), bz("value2")) + require.Equal(t, bz("value2"), cdb.Get(bz("key1"))) + require.Equal(t, bz("value1"), mem.Get(bz("key1"))) + + cdb.Write() + require.Equal(t, bz("value2"), mem.Get(bz("key1"))) + + require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail") + + cdb = mem.CacheWrap().(*CacheDB) + cdb.Delete(bz("key1")) + require.Empty(t, cdb.Get(bz("key1"))) + require.Equal(t, mem.Get(bz("key1")), bz("value2")) + + cdb.Write() + require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") + require.Empty(t, mem.Get(bz("key1")), "Expected `key1` to be empty") +} + +func TestCacheDBWriteLock(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb.Write() }) + require.Panics(t, func() { cdb.Write() }) + cdb = mem.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb.Write() }) + require.Panics(t, func() { cdb.Write() }) +} + +func TestCacheDBWriteLockNested(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + cdb2 := cdb.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb2.Write() }) + require.Panics(t, func() { cdb2.Write() }) + cdb2 = cdb.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb2.Write() }) + require.Panics(t, func() { cdb2.Write() }) +} + +func TestCacheDBNested(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + cdb.Set(bz("key1"), bz("value1")) + + require.Empty(t, mem.Get(bz("key1"))) + require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) + cdb2 := cdb.CacheWrap().(*CacheDB) + require.Equal(t, bz("value1"), cdb2.Get(bz("key1"))) + + cdb2.Set(bz("key1"), bz("VALUE2")) + require.Equal(t, []byte(nil), mem.Get(bz("key1"))) + require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) + require.Equal(t, bz("VALUE2"), cdb2.Get(bz("key1"))) + + cdb2.Write() + require.Equal(t, []byte(nil), mem.Get(bz("key1"))) + require.Equal(t, bz("VALUE2"), cdb.Get(bz("key1"))) + + cdb.Write() + require.Equal(t, bz("VALUE2"), mem.Get(bz("key1"))) + +} diff --git a/db/common_test.go b/db/common_test.go new file mode 100644 index 000000000..505864c20 --- /dev/null +++ b/db/common_test.go @@ -0,0 +1,172 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tmlibs/common" +) + +func checkValid(t *testing.T, itr Iterator, expected bool) { + valid := itr.Valid() + assert.Equal(t, expected, valid) +} + +func checkNext(t *testing.T, itr Iterator, expected bool) { + itr.Next() + valid := itr.Valid() + assert.Equal(t, expected, valid) +} + +func checkNextPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") +} + +func checkPrevPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Prev() }, "checkPrevPanics expected panic but didn't") +} + +func checkPrev(t *testing.T, itr Iterator, expected bool) { + itr.Prev() + valid := itr.Valid() + assert.Equal(t, expected, valid) +} + +func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { + k, v := itr.Key(), itr.Value() + assert.Exactly(t, key, k) + assert.Exactly(t, value, v) +} + +func checkInvalid(t *testing.T, itr Iterator) { + checkValid(t, itr, false) + checkKeyPanics(t, itr) + checkValuePanics(t, itr) + checkNextPanics(t, itr) + checkPrevPanics(t, itr) +} + +func checkKeyPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") +} + +func checkValuePanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") +} + +func newTempDB(t *testing.T, backend string) (db DB) { + dir, dirname := cmn.Tempdir("test_go_iterator") + db = NewDB("testdb", backend, dirname) + dir.Close() + return db +} + +func TestDBIteratorSingleKey(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator() + + checkValid(t, itr, true) + checkNext(t, itr, false) + checkValid(t, itr, false) + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorTwoKeys(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + db.SetSync(bz("2"), bz("value_1")) + + { // Fail by calling Next too much + itr := db.Iterator() + checkValid(t, itr, true) + + for i := 0; i < 10; i++ { + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkPrev(t, itr, true) + checkValid(t, itr, true) + } + + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkNext(t, itr, false) + checkValid(t, itr, false) + + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + } + + { // Fail by calling Prev too much + itr := db.Iterator() + checkValid(t, itr, true) + + for i := 0; i < 10; i++ { + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkPrev(t, itr, true) + checkValid(t, itr, true) + } + + checkPrev(t, itr, false) + checkValid(t, itr, false) + + checkPrevPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + } + }) + } +} + +func TestDBIteratorEmpty(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator() + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorEmptySeek(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator() + itr.Seek(bz("1")) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorBadSeek(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator() + itr.Seek(bz("2")) + + checkInvalid(t, itr) + }) + } +} diff --git a/db/db.go b/db/db.go index 8156c1e92..6c8bd4800 100644 --- a/db/db.go +++ b/db/db.go @@ -3,7 +3,7 @@ package db import . "github.com/tendermint/tmlibs/common" type DB interface { - Get([]byte) []byte + Get([]byte) []byte // NOTE: returns nil iff never set or deleted. Set([]byte, []byte) SetSync([]byte, []byte) Delete([]byte) @@ -11,11 +11,15 @@ type DB interface { Close() NewBatch() Batch Iterator() Iterator - IteratorPrefix([]byte) Iterator // For debugging Print() + + // Stats returns a map of property values for all keys and the size of the cache. Stats() map[string]string + + // CacheWrap wraps the DB w/ a CacheDB. + CacheWrap() interface{} } type Batch interface { @@ -24,23 +28,66 @@ type Batch interface { Write() } +/* + Usage: + + for itr.Seek(mykey); itr.Valid(); itr.Next() { + k, v := itr.Key(); itr.Value() + .... + } +*/ type Iterator interface { - Next() bool + // Seek moves the iterator the position of the key given or, if the key + // doesn't exist, the next key that does exist in the database. If the key + // doesn't exist, and there is no next key, the Iterator becomes invalid. + Seek(key []byte) + + // Valid returns false only when an Iterator has iterated past either the + // first or the last key in the database. + Valid() bool + + // Next moves the iterator to the next sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Next() + + // Prev moves the iterator to the previous sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Prev() + + // Key returns the key of the cursor. + // + // If Valid returns false, this method will panic. Key() []byte + + // Value returns the key of the cursor. + // + // If Valid returns false, this method will panic. Value() []byte - Release() - Error() error + // GetError returns an IteratorError from LevelDB if it had one during + // iteration. + // + // This method is safe to call when Valid returns false. + GetError() error + + // Close deallocates the given Iterator. + Close() } //----------------------------------------------------------------------------- +// Main entry const ( LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb. CLevelDBBackendStr = "cleveldb" GoLevelDBBackendStr = "goleveldb" MemDBBackendStr = "memdb" + FSDBBackendStr = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) diff --git a/db/fsdb.go b/db/fsdb.go new file mode 100644 index 000000000..65ac3c38e --- /dev/null +++ b/db/fsdb.go @@ -0,0 +1,231 @@ +package db + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "sync" + + "github.com/pkg/errors" +) + +const ( + keyPerm = os.FileMode(0600) + dirPerm = os.FileMode(0700) +) + +func init() { + registerDBCreator(FSDBBackendStr, func(name string, dir string) (DB, error) { + dbPath := filepath.Join(dir, name+".db") + return NewFSDB(dbPath), nil + }, false) +} + +// It's slow. +type FSDB struct { + mtx sync.Mutex + dir string + + cwwMutex +} + +func NewFSDB(dir string) *FSDB { + err := os.MkdirAll(dir, dirPerm) + if err != nil { + panic(errors.Wrap(err, "Creating FSDB dir "+dir)) + } + database := &FSDB{ + dir: dir, + cwwMutex: NewCWWMutex(), + } + return database +} + +func (db *FSDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + + path := db.nameToPath(key) + value, err := read(path) + if os.IsNotExist(err) { + return nil + } else if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + } + return value +} + +func (db *FSDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *FSDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) SetNoLock(key []byte, value []byte) { + if value == nil { + value = []byte{} + } + path := db.nameToPath(key) + err := write(path, value) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Setting key %s (0x%X)", string(key), key))) + } +} + +func (db *FSDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *FSDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) DeleteNoLock(key []byte) { + err := remove(string(key)) + if os.IsNotExist(err) { + return + } else if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Removing key %s (0x%X)", string(key), key))) + } +} + +func (db *FSDB) Close() { + // Nothing to do. +} + +func (db *FSDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Print not yet implemented") +} + +func (db *FSDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Stats not yet implemented") +} + +func (db *FSDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() + + // Not sure we would ever want to try... + // It doesn't seem easy for general filesystems. + panic("FSDB.NewBatch not yet implemented") +} + +func (db *FSDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +func (db *FSDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) +} + +func (db *FSDB) Iterator() Iterator { + it := newMemDBIterator() + it.db = db + it.cur = 0 + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + } + sort.Strings(keys) + it.keys = keys + return it +} + +func (db *FSDB) nameToPath(name []byte) string { + n := url.PathEscape(string(name)) + return path.Join(db.dir, n) +} + +// Read some bytes to a file. +// CONTRACT: returns os errors directly without wrapping. +func read(path string) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + d, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + return d, nil +} + +// Write some bytes from a file. +// CONTRACT: returns os errors directly without wrapping. +func write(path string, d []byte) error { + f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, keyPerm) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write(d) + if err != nil { + return err + } + err = f.Sync() + return err +} + +// Remove a file. +// CONTRACT: returns os errors directly without wrapping. +func remove(path string) error { + return os.Remove(path) +} + +// List files of a path. +// Paths will NOT include dir as the prefix. +// CONTRACT: returns os errors directly without wrapping. +func list(dirPath string) (paths []string, err error) { + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for i, name := range names { + n, err := url.PathUnescape(name) + if err != nil { + return nil, fmt.Errorf("Failed to unescape %s while listing", name) + } + names[i] = n + } + return names, nil +} diff --git a/db/go_level_db.go b/db/go_level_db.go index 4abd76112..d9cec519c 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -8,7 +8,6 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -23,6 +22,8 @@ func init() { type GoLevelDB struct { db *leveldb.DB + + cwwMutex } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { @@ -31,7 +32,10 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { if err != nil { return nil, err } - database := &GoLevelDB{db: db} + database := &GoLevelDB{ + db: db, + cwwMutex: NewCWWMutex(), + } return database, nil } @@ -117,12 +121,59 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } +func (db *GoLevelDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) +} + +//---------------------------------------- +// Batch + +func (db *GoLevelDB) NewBatch() Batch { + batch := new(leveldb.Batch) + return &goLevelDBBatch{db, batch} +} + +type goLevelDBBatch struct { + db *GoLevelDB + batch *leveldb.Batch +} + +func (mBatch *goLevelDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +func (mBatch *goLevelDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +func (mBatch *goLevelDBBatch) Write() { + err := mBatch.db.db.Write(mBatch.batch, nil) + if err != nil { + PanicCrisis(err) + } +} + +//---------------------------------------- +// Iterator + +func (db *GoLevelDB) Iterator() Iterator { + itr := &goLevelDBIterator{ + source: db.db.NewIterator(nil, nil), + } + itr.Seek(nil) + return itr +} + type goLevelDBIterator struct { - source iterator.Iterator + source iterator.Iterator + invalid bool } // Key returns a copy of the current key. func (it *goLevelDBIterator) Key() []byte { + if !it.Valid() { + panic("goLevelDBIterator Key() called when invalid") + } key := it.source.Key() k := make([]byte, len(key)) copy(k, key) @@ -132,6 +183,9 @@ func (it *goLevelDBIterator) Key() []byte { // Value returns a copy of the current value. func (it *goLevelDBIterator) Value() []byte { + if !it.Valid() { + panic("goLevelDBIterator Value() called when invalid") + } val := it.source.Value() v := make([]byte, len(val)) copy(v, val) @@ -139,49 +193,36 @@ func (it *goLevelDBIterator) Value() []byte { return v } -func (it *goLevelDBIterator) Error() error { +func (it *goLevelDBIterator) GetError() error { return it.source.Error() } -func (it *goLevelDBIterator) Next() bool { - return it.source.Next() -} - -func (it *goLevelDBIterator) Release() { - it.source.Release() -} - -func (db *GoLevelDB) Iterator() Iterator { - return &goLevelDBIterator{db.db.NewIterator(nil, nil)} +func (it *goLevelDBIterator) Seek(key []byte) { + it.source.Seek(key) } -func (db *GoLevelDB) IteratorPrefix(prefix []byte) Iterator { - return &goLevelDBIterator{db.db.NewIterator(util.BytesPrefix(prefix), nil)} -} - -func (db *GoLevelDB) NewBatch() Batch { - batch := new(leveldb.Batch) - return &goLevelDBBatch{db, batch} -} - -//-------------------------------------------------------------------------------- - -type goLevelDBBatch struct { - db *GoLevelDB - batch *leveldb.Batch +func (it *goLevelDBIterator) Valid() bool { + if it.invalid { + return false + } + it.invalid = !it.source.Valid() + return !it.invalid } -func (mBatch *goLevelDBBatch) Set(key, value []byte) { - mBatch.batch.Put(key, value) +func (it *goLevelDBIterator) Next() { + if !it.Valid() { + panic("goLevelDBIterator Next() called when invalid") + } + it.source.Next() } -func (mBatch *goLevelDBBatch) Delete(key []byte) { - mBatch.batch.Delete(key) +func (it *goLevelDBIterator) Prev() { + if !it.Valid() { + panic("goLevelDBIterator Prev() called when invalid") + } + it.source.Prev() } -func (mBatch *goLevelDBBatch) Write() { - err := mBatch.db.db.Write(mBatch.batch, nil) - if err != nil { - PanicCrisis(err) - } +func (it *goLevelDBIterator) Close() { + it.source.Release() } diff --git a/db/go_level_db_test.go b/db/go_level_db_test.go index 2cd3192c3..88b6730f3 100644 --- a/db/go_level_db_test.go +++ b/db/go_level_db_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) func BenchmarkRandomReadsWrites(b *testing.B) { @@ -17,7 +17,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12)), "") + db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return @@ -29,7 +29,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < b.N; i++ { // Write something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) internal[idx] += 1 val := internal[idx] idxBytes := int642Bytes(int64(idx)) @@ -42,7 +42,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { } // Read something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) val := internal[idx] idxBytes := int642Bytes(int64(idx)) valBytes := db.Get(idxBytes) diff --git a/db/mem_batch.go b/db/mem_batch.go new file mode 100644 index 000000000..7072d931a --- /dev/null +++ b/db/mem_batch.go @@ -0,0 +1,50 @@ +package db + +import "sync" + +type atomicSetDeleter interface { + Mutex() *sync.Mutex + SetNoLock(key, value []byte) + DeleteNoLock(key []byte) +} + +type memBatch struct { + db atomicSetDeleter + ops []operation +} + +type opType int + +const ( + opTypeSet opType = 1 + opTypeDelete opType = 2 +) + +type operation struct { + opType + key []byte + value []byte +} + +func (mBatch *memBatch) Set(key, value []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) +} + +func (mBatch *memBatch) Delete(key []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) +} + +func (mBatch *memBatch) Write() { + mtx := mBatch.db.Mutex() + mtx.Lock() + defer mtx.Unlock() + + for _, op := range mBatch.ops { + switch op.opType { + case opTypeSet: + mBatch.db.SetNoLock(op.key, op.value) + case opTypeDelete: + mBatch.db.DeleteNoLock(op.key) + } + } +} diff --git a/db/mem_db.go b/db/mem_db.go index 077427509..30697adcf 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -1,8 +1,9 @@ package db import ( + "bytes" "fmt" - "strings" + "sort" "sync" ) @@ -15,40 +16,63 @@ func init() { type MemDB struct { mtx sync.Mutex db map[string][]byte + + cwwMutex } func NewMemDB() *MemDB { - database := &MemDB{db: make(map[string][]byte)} + database := &MemDB{ + db: make(map[string][]byte), + cwwMutex: NewCWWMutex(), + } return database } func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() + return db.db[string(key)] } func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - db.db[string(key)] = value + + db.SetNoLock(key, value) } func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// NOTE: Implements atomicSetDeleter +func (db *MemDB) SetNoLock(key []byte, value []byte) { + if value == nil { + value = []byte{} + } db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + delete(db.db, string(key)) } func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + + delete(db.db, string(key)) +} + +// NOTE: Implements atomicSetDeleter +func (db *MemDB) DeleteNoLock(key []byte) { delete(db.db, string(key)) } @@ -63,115 +87,113 @@ func (db *MemDB) Close() { func (db *MemDB) Print() { db.mtx.Lock() defer db.mtx.Unlock() + for key, value := range db.db { fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) } } func (db *MemDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + stats := make(map[string]string) stats["database.type"] = "memDB" + stats["database.size"] = fmt.Sprintf("%d", len(db.db)) return stats } -type memDBIterator struct { - last int - keys []string - db *MemDB -} - -func newMemDBIterator() *memDBIterator { - return &memDBIterator{} -} - -func (it *memDBIterator) Next() bool { - if it.last >= len(it.keys)-1 { - return false - } - it.last++ - return true -} +func (db *MemDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() -func (it *memDBIterator) Key() []byte { - return []byte(it.keys[it.last]) + return &memBatch{db, nil} } -func (it *memDBIterator) Value() []byte { - return it.db.Get(it.Key()) +func (db *MemDB) Mutex() *sync.Mutex { + return &(db.mtx) } -func (it *memDBIterator) Release() { - it.db = nil - it.keys = nil +func (db *MemDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) } -func (it *memDBIterator) Error() error { - return nil -} +//---------------------------------------- func (db *MemDB) Iterator() Iterator { - return db.IteratorPrefix([]byte{}) -} - -func (db *MemDB) IteratorPrefix(prefix []byte) Iterator { it := newMemDBIterator() it.db = db - it.last = -1 + it.cur = 0 db.mtx.Lock() defer db.mtx.Unlock() - // unfortunately we need a copy of all of the keys + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. for key, _ := range db.db { - if strings.HasPrefix(key, string(prefix)) { - it.keys = append(it.keys, key) - } + it.keys = append(it.keys, key) } + sort.Strings(it.keys) return it } -func (db *MemDB) NewBatch() Batch { - return &memDBBatch{db, nil} +type memDBIterator struct { + cur int + keys []string + db DB } -//-------------------------------------------------------------------------------- - -type memDBBatch struct { - db *MemDB - ops []operation +func newMemDBIterator() *memDBIterator { + return &memDBIterator{} } -type opType int - -const ( - opTypeSet = 1 - opTypeDelete = 2 -) +func (it *memDBIterator) Seek(key []byte) { + for i, ik := range it.keys { + it.cur = i + if bytes.Compare(key, []byte(ik)) <= 0 { + return + } + } + it.cur += 1 // If not found, becomes invalid. +} -type operation struct { - opType - key []byte - value []byte +func (it *memDBIterator) Valid() bool { + return 0 <= it.cur && it.cur < len(it.keys) } -func (mBatch *memDBBatch) Set(key, value []byte) { - mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) +func (it *memDBIterator) Next() { + if !it.Valid() { + panic("memDBIterator Next() called when invalid") + } + it.cur++ } -func (mBatch *memDBBatch) Delete(key []byte) { - mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) +func (it *memDBIterator) Prev() { + if !it.Valid() { + panic("memDBIterator Next() called when invalid") + } + it.cur-- } -func (mBatch *memDBBatch) Write() { - mBatch.db.mtx.Lock() - defer mBatch.db.mtx.Unlock() +func (it *memDBIterator) Key() []byte { + if !it.Valid() { + panic("memDBIterator Key() called when invalid") + } + return []byte(it.keys[it.cur]) +} - for _, op := range mBatch.ops { - if op.opType == opTypeSet { - mBatch.db.db[string(op.key)] = op.value - } else if op.opType == opTypeDelete { - delete(mBatch.db.db, string(op.key)) - } +func (it *memDBIterator) Value() []byte { + if !it.Valid() { + panic("memDBIterator Value() called when invalid") } + return it.db.Get(it.Key()) +} + +func (it *memDBIterator) Close() { + it.db = nil + it.keys = nil +} +func (it *memDBIterator) GetError() error { + return nil } diff --git a/db/mem_db_test.go b/db/mem_db_test.go index 503e361f1..b5c9167c8 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -21,7 +21,7 @@ func TestMemDbIterator(t *testing.T) { iter := db.Iterator() i := 0 - for iter.Next() { + for ; iter.Valid(); iter.Next() { assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") i += 1 } diff --git a/db/stats.go b/db/stats.go new file mode 100644 index 000000000..ef4b0dd0f --- /dev/null +++ b/db/stats.go @@ -0,0 +1,7 @@ +package db + +func mergeStats(src, dest map[string]string, prefix string) { + for key, value := range src { + dest[prefix+key] = value + } +} diff --git a/db/util.go b/db/util.go new file mode 100644 index 000000000..5f381a5be --- /dev/null +++ b/db/util.go @@ -0,0 +1,82 @@ +package db + +import "bytes" + +// A wrapper around itr that tries to keep the iterator +// within the bounds as defined by `prefix` +type prefixIterator struct { + itr Iterator + prefix []byte + invalid bool +} + +func (pi *prefixIterator) Seek(key []byte) { + if !bytes.HasPrefix(key, pi.prefix) { + pi.invalid = true + return + } + pi.itr.Seek(key) + pi.checkInvalid() +} + +func (pi *prefixIterator) checkInvalid() { + if !pi.itr.Valid() { + pi.invalid = true + } +} + +func (pi *prefixIterator) Valid() bool { + if pi.invalid { + return false + } + key := pi.itr.Key() + ok := bytes.HasPrefix(key, pi.prefix) + if !ok { + pi.invalid = true + return false + } + return true +} + +func (pi *prefixIterator) Next() { + if pi.invalid { + panic("prefixIterator Next() called when invalid") + } + pi.itr.Next() + pi.checkInvalid() +} + +func (pi *prefixIterator) Prev() { + if pi.invalid { + panic("prefixIterator Prev() called when invalid") + } + pi.itr.Prev() + pi.checkInvalid() +} + +func (pi *prefixIterator) Key() []byte { + if pi.invalid { + panic("prefixIterator Key() called when invalid") + } + return pi.itr.Key() +} + +func (pi *prefixIterator) Value() []byte { + if pi.invalid { + panic("prefixIterator Value() called when invalid") + } + return pi.itr.Value() +} + +func (pi *prefixIterator) Close() { pi.itr.Close() } +func (pi *prefixIterator) GetError() error { return pi.itr.GetError() } + +func IteratePrefix(db DB, prefix []byte) Iterator { + itr := db.Iterator() + pi := &prefixIterator{ + itr: itr, + prefix: prefix, + } + pi.Seek(prefix) + return pi +} diff --git a/db/util_test.go b/db/util_test.go new file mode 100644 index 000000000..55a41bf5b --- /dev/null +++ b/db/util_test.go @@ -0,0 +1,209 @@ +package db + +import ( + "fmt" + "testing" +) + +func TestPrefixIteratorNoMatchNil(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + + checkInvalid(t, itr) + }) + } +} + +func TestPrefixIteratorNoMatch1(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + db.SetSync(bz("1"), bz("value_1")) + + checkInvalid(t, itr) + }) + } +} + +func TestPrefixIteratorMatch2(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("2"), bz("value_2")) + itr := IteratePrefix(db, []byte("2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("2"), bz("value_2")) + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +func TestPrefixIteratorMatch3(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("3"), bz("value_3")) + itr := IteratePrefix(db, []byte("2")) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/1, fail by too much Next() +func TestPrefixIteratorMatches1N(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/1")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/1, fail by too much Prev() +func TestPrefixIteratorMatches1P(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/1")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + + // Bad! + checkPrev(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/2, fail by too much Next() +func TestPrefixIteratorMatches2N(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/2, fail by too much Prev() +func TestPrefixIteratorMatches2P(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + + // Bad! + checkPrev(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/3, fail by too much Next() +func TestPrefixIteratorMatches3N(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/3")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/3, fail by too much Prev() +func TestPrefixIteratorMatches3P(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/3")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + + // Bad! + checkPrev(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} diff --git a/merkle/kvpairs.go b/merkle/kvpairs.go new file mode 100644 index 000000000..3d67049f2 --- /dev/null +++ b/merkle/kvpairs.go @@ -0,0 +1,48 @@ +package merkle + +import ( + "sort" + + wire "github.com/tendermint/go-wire" + "golang.org/x/crypto/ripemd160" +) + +// NOTE: Behavior is undefined with dup keys. +type KVPair struct { + Key string + Value interface{} // Can be Hashable or not. +} + +func (kv KVPair) Hash() []byte { + hasher, n, err := ripemd160.New(), new(int), new(error) + wire.WriteString(kv.Key, hasher, n, err) + if kvH, ok := kv.Value.(Hashable); ok { + wire.WriteByteSlice(kvH.Hash(), hasher, n, err) + } else { + wire.WriteBinary(kv.Value, hasher, n, err) + } + if *err != nil { + panic(*err) + } + return hasher.Sum(nil) +} + +type KVPairs []KVPair + +func (kvps KVPairs) Len() int { return len(kvps) } +func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } +func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } +func (kvps KVPairs) Sort() { sort.Sort(kvps) } + +func MakeSortedKVPairs(m map[string]interface{}) []Hashable { + kvPairs := make([]KVPair, 0, len(m)) + for k, v := range m { + kvPairs = append(kvPairs, KVPair{k, v}) + } + KVPairs(kvPairs).Sort() + kvPairsH := make([]Hashable, 0, len(kvPairs)) + for _, kvp := range kvPairs { + kvPairsH = append(kvPairsH, kvp) + } + return kvPairsH +} diff --git a/merkle/simple_map.go b/merkle/simple_map.go new file mode 100644 index 000000000..43dce990f --- /dev/null +++ b/merkle/simple_map.go @@ -0,0 +1,26 @@ +package merkle + +type SimpleMap struct { + kvz KVPairs +} + +func NewSimpleMap() *SimpleMap { + return &SimpleMap{ + kvz: nil, + } +} + +func (sm *SimpleMap) Set(k string, o interface{}) { + sm.kvz = append(sm.kvz, KVPair{Key: k, Value: o}) +} + +// Merkle root hash of items sorted by key. +// NOTE: Behavior is undefined when key is duplicate. +func (sm *SimpleMap) Hash() []byte { + sm.kvz.Sort() + kvPairsH := make([]Hashable, 0, len(sm.kvz)) + for _, kvp := range sm.kvz { + kvPairsH = append(kvPairsH, kvp) + } + return SimpleHashFromHashables(kvPairsH) +} diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go new file mode 100644 index 000000000..5eb218274 --- /dev/null +++ b/merkle/simple_map_test.go @@ -0,0 +1,47 @@ +package merkle + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleMap(t *testing.T) { + { + db := NewSimpleMap() + db.Set("key1", "value1") + assert.Equal(t, "376bf717ebe3659a34f68edb833dfdcf4a2d3c10", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key1", "value2") + assert.Equal(t, "72fd3a7224674377952214cb10ef21753ec803eb", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key1", "value1") + db.Set("key2", "value2") + assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key2", "value2") // NOTE: out of order + db.Set("key1", "value1") + assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key1", "value1") + db.Set("key2", "value2") + db.Set("key3", "value3") + assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key2", "value2") // NOTE: out of order + db.Set("key1", "value1") + db.Set("key3", "value3") + assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } +} diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go new file mode 100644 index 000000000..f75568fd9 --- /dev/null +++ b/merkle/simple_proof.go @@ -0,0 +1,131 @@ +package merkle + +import ( + "bytes" + "fmt" +) + +type SimpleProof struct { + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. +} + +// proofs[0] is the proof for items[0]. +func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromHashables(items) + rootHash = rootSPN.Hash + proofs = make([]*SimpleProof, len(items)) + for i, trail := range trails { + proofs[i] = &SimpleProof{ + Aunts: trail.FlattenAunts(), + } + } + return +} + +// Verify that leafHash is a leaf hash of the simple-merkle-tree +// which hashes to rootHash. +func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { + computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) + return computedHash != nil && bytes.Equal(computedHash, rootHash) +} + +func (sp *SimpleProof) String() string { + return sp.StringIndented("") +} + +func (sp *SimpleProof) StringIndented(indent string) string { + return fmt.Sprintf(`SimpleProof{ +%s Aunts: %X +%s}`, + indent, sp.Aunts, + indent) +} + +// Use the leafHash and innerHashes to get the root merkle hash. +// If the length of the innerHashes slice isn't exactly correct, the result is nil. +func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { + // Recursive impl. + if index >= total { + return nil + } + switch total { + case 0: + panic("Cannot call computeHashFromAunts() with 0 total") + case 1: + if len(innerHashes) != 0 { + return nil + } + return leafHash + default: + if len(innerHashes) == 0 { + return nil + } + numLeft := (total + 1) / 2 + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if leftHash == nil { + return nil + } + return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) + } else { + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if rightHash == nil { + return nil + } + return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) + } + } +} + +// Helper structure to construct merkle proof. +// The node and the tree is thrown away afterwards. +// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. +// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or +// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. +type SimpleProofNode struct { + Hash []byte + Parent *SimpleProofNode + Left *SimpleProofNode // Left sibling (only one of Left,Right is set) + Right *SimpleProofNode // Right sibling (only one of Left,Right is set) +} + +// Starting from a leaf SimpleProofNode, FlattenAunts() will return +// the inner hashes for the item corresponding to the leaf. +func (spn *SimpleProofNode) FlattenAunts() [][]byte { + // Nonrecursive impl. + innerHashes := [][]byte{} + for spn != nil { + if spn.Left != nil { + innerHashes = append(innerHashes, spn.Left.Hash) + } else if spn.Right != nil { + innerHashes = append(innerHashes, spn.Right.Hash) + } else { + break + } + spn = spn.Parent + } + return innerHashes +} + +// trails[0].Hash is the leaf hash for items[0]. +// trails[i].Parent.Parent....Parent == root for all i. +func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { + // Recursive impl. + switch len(items) { + case 0: + return nil, nil + case 1: + trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} + return []*SimpleProofNode{trail}, trail + default: + lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) + rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) + root := &SimpleProofNode{rootHash, nil, nil, nil} + leftRoot.Parent = root + leftRoot.Right = rightRoot + rightRoot.Parent = root + rightRoot.Left = leftRoot + return append(lefts, rights...), root + } +} diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 8106246d6..d64082b43 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -25,10 +25,6 @@ For larger datasets, use IAVLTree. package merkle import ( - "bytes" - "fmt" - "sort" - "golang.org/x/crypto/ripemd160" "github.com/tendermint/go-wire" @@ -95,183 +91,3 @@ func SimpleHashFromMap(m map[string]interface{}) []byte { kpPairsH := MakeSortedKVPairs(m) return SimpleHashFromHashables(kpPairsH) } - -//-------------------------------------------------------------------------------- - -/* Convenience struct for key-value pairs. -A list of KVPairs is hashed via `SimpleHashFromHashables`. -NOTE: Each `Value` is encoded for hashing without extra type information, -so the user is presumed to be aware of the Value types. -*/ -type KVPair struct { - Key string - Value interface{} -} - -func (kv KVPair) Hash() []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteString(kv.Key, hasher, n, err) - if kvH, ok := kv.Value.(Hashable); ok { - wire.WriteByteSlice(kvH.Hash(), hasher, n, err) - } else { - wire.WriteBinary(kv.Value, hasher, n, err) - } - if *err != nil { - PanicSanity(*err) - } - return hasher.Sum(nil) -} - -type KVPairs []KVPair - -func (kvps KVPairs) Len() int { return len(kvps) } -func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } -func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } -func (kvps KVPairs) Sort() { sort.Sort(kvps) } - -func MakeSortedKVPairs(m map[string]interface{}) []Hashable { - kvPairs := []KVPair{} - for k, v := range m { - kvPairs = append(kvPairs, KVPair{k, v}) - } - KVPairs(kvPairs).Sort() - kvPairsH := []Hashable{} - for _, kvp := range kvPairs { - kvPairsH = append(kvPairsH, kvp) - } - return kvPairsH -} - -//-------------------------------------------------------------------------------- - -type SimpleProof struct { - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. -} - -// proofs[0] is the proof for items[0]. -func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromHashables(items) - rootHash = rootSPN.Hash - proofs = make([]*SimpleProof, len(items)) - for i, trail := range trails { - proofs[i] = &SimpleProof{ - Aunts: trail.FlattenAunts(), - } - } - return -} - -// Verify that leafHash is a leaf hash of the simple-merkle-tree -// which hashes to rootHash. -func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { - computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) - if computedHash == nil { - return false - } - if !bytes.Equal(computedHash, rootHash) { - return false - } - return true -} - -func (sp *SimpleProof) String() string { - return sp.StringIndented("") -} - -func (sp *SimpleProof) StringIndented(indent string) string { - return fmt.Sprintf(`SimpleProof{ -%s Aunts: %X -%s}`, - indent, sp.Aunts, - indent) -} - -// Use the leafHash and innerHashes to get the root merkle hash. -// If the length of the innerHashes slice isn't exactly correct, the result is nil. -func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { - // Recursive impl. - if index >= total { - return nil - } - switch total { - case 0: - PanicSanity("Cannot call computeHashFromAunts() with 0 total") - return nil - case 1: - if len(innerHashes) != 0 { - return nil - } - return leafHash - default: - if len(innerHashes) == 0 { - return nil - } - numLeft := (total + 1) / 2 - if index < numLeft { - leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if leftHash == nil { - return nil - } - return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) - } else { - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil - } - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) - } - } -} - -// Helper structure to construct merkle proof. -// The node and the tree is thrown away afterwards. -// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. -// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or -// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. -type SimpleProofNode struct { - Hash []byte - Parent *SimpleProofNode - Left *SimpleProofNode // Left sibling (only one of Left,Right is set) - Right *SimpleProofNode // Right sibling (only one of Left,Right is set) -} - -// Starting from a leaf SimpleProofNode, FlattenAunts() will return -// the inner hashes for the item corresponding to the leaf. -func (spn *SimpleProofNode) FlattenAunts() [][]byte { - // Nonrecursive impl. - innerHashes := [][]byte{} - for spn != nil { - if spn.Left != nil { - innerHashes = append(innerHashes, spn.Left.Hash) - } else if spn.Right != nil { - innerHashes = append(innerHashes, spn.Right.Hash) - } else { - break - } - spn = spn.Parent - } - return innerHashes -} - -// trails[0].Hash is the leaf hash for items[0]. -// trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { - // Recursive impl. - switch len(items) { - case 0: - return nil, nil - case 1: - trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} - return []*SimpleProofNode{trail}, trail - default: - lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) - rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) - root := &SimpleProofNode{rootHash, nil, nil, nil} - leftRoot.Parent = root - leftRoot.Right = rightRoot - rightRoot.Parent = root - rightRoot.Left = leftRoot - return append(lefts, rights...), root - } -} From 4ea6340f1ac343d2dafb606cf11ee1c971c5a8ef Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 11 Nov 2017 11:25:30 -0500 Subject: [PATCH 252/515] add .editorconfig --- .editorconfig | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..82f774362 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,19 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[Makefile] +indent_style = tab + +[*.sh] +indent_style = tab + +[*.proto] +indent_style = space +indent_size = 2 From 56e51bc1133bd54a331e0314d5f80fb547460362 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 11 Nov 2017 17:33:41 -0500 Subject: [PATCH 253/515] CacheWrap() -> CacheDB() CacheDB --- db/c_level_db.go | 2 +- db/cache_db.go | 57 +++++++++++++++++++++++---------------------- db/cache_db_test.go | 18 +++++++------- db/db.go | 9 +++++-- db/fsdb.go | 2 +- db/go_level_db.go | 2 +- db/mem_db.go | 2 +- 7 files changed, 49 insertions(+), 43 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 95651c0a2..e4450aaa6 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -120,7 +120,7 @@ func (db *CLevelDB) Stats() map[string]string { return stats } -func (db *CLevelDB) CacheWrap() interface{} { +func (db *CLevelDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } diff --git a/db/cache_db.go b/db/cache_db.go index a41680c1b..ed85e3057 100644 --- a/db/cache_db.go +++ b/db/cache_db.go @@ -16,8 +16,8 @@ type cDBValue struct { dirty bool } -// CacheDB wraps an in-memory cache around an underlying DB. -type CacheDB struct { +// cacheDB wraps an in-memory cache around an underlying DB. +type cacheDB struct { mtx sync.Mutex cache map[string]cDBValue parent DB @@ -27,13 +27,14 @@ type CacheDB struct { } // Needed by MultiStore.CacheWrap(). -var _ atomicSetDeleter = (*CacheDB)(nil) +var _ atomicSetDeleter = (*cacheDB)(nil) +var _ CacheDB = (*cacheDB)(nil) // Users should typically not be required to call NewCacheDB directly, as the -// DB implementations here provide a .CacheWrap() function already. +// DB implementations here provide a .CacheDB() function already. // `lockVersion` is typically provided by parent.GetWriteLockVersion(). -func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB { - db := &CacheDB{ +func NewCacheDB(parent DB, lockVersion interface{}) CacheDB { + db := &cacheDB{ cache: make(map[string]cDBValue), parent: parent, lockVersion: lockVersion, @@ -42,7 +43,7 @@ func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB { return db } -func (db *CacheDB) Get(key []byte) []byte { +func (db *cacheDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() @@ -55,54 +56,54 @@ func (db *CacheDB) Get(key []byte) []byte { return dbValue.value } -func (db *CacheDB) Set(key []byte, value []byte) { +func (db *cacheDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.SetNoLock(key, value) } -func (db *CacheDB) SetSync(key []byte, value []byte) { +func (db *cacheDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.SetNoLock(key, value) } -func (db *CacheDB) SetNoLock(key []byte, value []byte) { +func (db *cacheDB) SetNoLock(key []byte, value []byte) { db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true} } -func (db *CacheDB) Delete(key []byte) { +func (db *cacheDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.DeleteNoLock(key) } -func (db *CacheDB) DeleteSync(key []byte) { +func (db *cacheDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.DeleteNoLock(key) } -func (db *CacheDB) DeleteNoLock(key []byte) { +func (db *cacheDB) DeleteNoLock(key []byte) { db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true} } -func (db *CacheDB) Close() { +func (db *cacheDB) Close() { db.mtx.Lock() defer db.mtx.Unlock() db.parent.Close() } -func (db *CacheDB) Print() { +func (db *cacheDB) Print() { db.mtx.Lock() defer db.mtx.Unlock() - fmt.Println("CacheDB\ncache:") + fmt.Println("cacheDB\ncache:") for key, value := range db.cache { fmt.Printf("[%X]:\t[%v]\n", []byte(key), value) } @@ -110,7 +111,7 @@ func (db *CacheDB) Print() { db.parent.Print() } -func (db *CacheDB) Stats() map[string]string { +func (db *cacheDB) Stats() map[string]string { db.mtx.Lock() defer db.mtx.Unlock() @@ -121,30 +122,30 @@ func (db *CacheDB) Stats() map[string]string { return stats } -func (db *CacheDB) Iterator() Iterator { - panic("CacheDB.Iterator() not yet supported") +func (db *cacheDB) Iterator() Iterator { + panic("cacheDB.Iterator() not yet supported") } -func (db *CacheDB) NewBatch() Batch { +func (db *cacheDB) NewBatch() Batch { return &memBatch{db, nil} } // Implements `atomicSetDeleter` for Batch support. -func (db *CacheDB) Mutex() *sync.Mutex { +func (db *cacheDB) Mutex() *sync.Mutex { return &(db.mtx) } // Write writes pending updates to the parent database and clears the cache. -func (db *CacheDB) Write() { +func (db *cacheDB) Write() { db.mtx.Lock() defer db.mtx.Unlock() - // Optional sanity check to ensure that CacheDB is valid + // Optional sanity check to ensure that cacheDB is valid if parent, ok := db.parent.(WriteLocker); ok { if parent.TryWriteLock(db.lockVersion) { // All good! } else { - panic("CacheDB.Write() failed. Did this CacheDB expire?") + panic("cacheDB.Write() failed. Did this CacheDB expire?") } } @@ -176,14 +177,14 @@ func (db *CacheDB) Write() { } //---------------------------------------- -// To CacheWrap this CacheDB further. +// To cache-wrap this cacheDB further. -func (db *CacheDB) CacheWrap() interface{} { +func (db *cacheDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } -// If the parent parent DB implements this, (e.g. such as a CacheDB parent to a -// CacheDB child), CacheDB will call `parent.TryWriteLock()` before attempting +// If the parent parent DB implements this, (e.g. such as a cacheDB parent to a +// cacheDB child), cacheDB will call `parent.TryWriteLock()` before attempting // to write. type WriteLocker interface { GetWriteLockVersion() (lockVersion interface{}) diff --git a/db/cache_db_test.go b/db/cache_db_test.go index 1de08e3f0..2a2684fe2 100644 --- a/db/cache_db_test.go +++ b/db/cache_db_test.go @@ -10,7 +10,7 @@ func bz(s string) []byte { return []byte(s) } func TestCacheDB(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") @@ -27,7 +27,7 @@ func TestCacheDB(t *testing.T) { require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail") - cdb = mem.CacheWrap().(*CacheDB) + cdb = mem.CacheDB() cdb.Delete(bz("key1")) require.Empty(t, cdb.Get(bz("key1"))) require.Equal(t, mem.Get(bz("key1")), bz("value2")) @@ -39,33 +39,33 @@ func TestCacheDB(t *testing.T) { func TestCacheDBWriteLock(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() require.NotPanics(t, func() { cdb.Write() }) require.Panics(t, func() { cdb.Write() }) - cdb = mem.CacheWrap().(*CacheDB) + cdb = mem.CacheDB() require.NotPanics(t, func() { cdb.Write() }) require.Panics(t, func() { cdb.Write() }) } func TestCacheDBWriteLockNested(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) - cdb2 := cdb.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() + cdb2 := cdb.CacheDB() require.NotPanics(t, func() { cdb2.Write() }) require.Panics(t, func() { cdb2.Write() }) - cdb2 = cdb.CacheWrap().(*CacheDB) + cdb2 = cdb.CacheDB() require.NotPanics(t, func() { cdb2.Write() }) require.Panics(t, func() { cdb2.Write() }) } func TestCacheDBNested(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() cdb.Set(bz("key1"), bz("value1")) require.Empty(t, mem.Get(bz("key1"))) require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - cdb2 := cdb.CacheWrap().(*CacheDB) + cdb2 := cdb.CacheDB() require.Equal(t, bz("value1"), cdb2.Get(bz("key1"))) cdb2.Set(bz("key1"), bz("VALUE2")) diff --git a/db/db.go b/db/db.go index 6c8bd4800..e86394713 100644 --- a/db/db.go +++ b/db/db.go @@ -18,8 +18,13 @@ type DB interface { // Stats returns a map of property values for all keys and the size of the cache. Stats() map[string]string - // CacheWrap wraps the DB w/ a CacheDB. - CacheWrap() interface{} + // CacheDB wraps the DB w/ a cache. + CacheDB() CacheDB +} + +type CacheDB interface { + DB + Write() // Write to the underlying DB } type Batch interface { diff --git a/db/fsdb.go b/db/fsdb.go index 65ac3c38e..4b1914453 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -140,7 +140,7 @@ func (db *FSDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *FSDB) CacheWrap() interface{} { +func (db *FSDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } diff --git a/db/go_level_db.go b/db/go_level_db.go index d9cec519c..cffe7329c 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -121,7 +121,7 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } -func (db *GoLevelDB) CacheWrap() interface{} { +func (db *GoLevelDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } diff --git a/db/mem_db.go b/db/mem_db.go index 30697adcf..f5d55f3ae 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -114,7 +114,7 @@ func (db *MemDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *MemDB) CacheWrap() interface{} { +func (db *MemDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } From 135a1a7cd78215105a55308c167b3331c225e00b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 20 Nov 2017 03:06:18 +0000 Subject: [PATCH 254/515] db: sort keys for memdb iterator --- db/mem_db.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/db/mem_db.go b/db/mem_db.go index 077427509..2f507321b 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -2,6 +2,7 @@ package db import ( "fmt" + "sort" "strings" "sync" ) @@ -127,6 +128,8 @@ func (db *MemDB) IteratorPrefix(prefix []byte) Iterator { it.keys = append(it.keys, key) } } + // and we need to sort them + sort.Strings(it.keys) return it } From d3bac7a6fefaeaec662c8b8483c1728ba2bd746c Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 27 Nov 2017 19:49:30 +0000 Subject: [PATCH 255/515] clist: reduce numTimes in test --- clist/clist_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clist/clist_test.go b/clist/clist_test.go index 2063cf465..9d5272de5 100644 --- a/clist/clist_test.go +++ b/clist/clist_test.go @@ -149,7 +149,7 @@ func _TestGCRandom(t *testing.T) { func TestScanRightDeleteRandom(t *testing.T) { const numElements = 10000 - const numTimes = 100000 + const numTimes = 1000 const numScanners = 10 l := New() From 4e705a3157512d757e459c2c86dd2d38068e7bc0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 27 Nov 2017 21:37:15 -0600 Subject: [PATCH 256/515] update changelog --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c380fdcd0..b0aa90d7a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 0.4.1 (November 27, 2017) + +FEATURES: + - [common] `Keys()` method on `CMap` + +IMPROVEMENTS: + - [log] complex types now encoded as "%+v" by default if `String()` method is undefined (previously resulted in error) + - [log] logger logs its own errors + +BUG FIXES: + - [common] fixed `Kill()` to build on Windows (Windows does not have `syscall.Kill`) + ## 0.4.0 (October 26, 2017) BREAKING: From 3244f73f32497457987770d4b76523f9d3afdfe9 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 27 Nov 2017 21:37:39 -0600 Subject: [PATCH 257/515] update version --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index c1635d202..c30887b49 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.4.0" +const Version = "0.4.1" From e07ad01f62e01016cdcf1e3e05ffb1a6cc8c5b8f Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Mon, 27 Nov 2017 21:24:42 -0700 Subject: [PATCH 258/515] remove package process Fixes https://github.com/tendermint/tmlibs/issues/81 That package is untested and racy, and not used except in a test, but even that's now gutted with https://github.com/tendermint/abci/pull/139 so the general consensus is that we sunset this package. --- process/process.go | 76 ---------------------------------------------- process/util.go | 22 -------------- 2 files changed, 98 deletions(-) delete mode 100644 process/process.go delete mode 100644 process/util.go diff --git a/process/process.go b/process/process.go deleted file mode 100644 index 7d2ae9140..000000000 --- a/process/process.go +++ /dev/null @@ -1,76 +0,0 @@ -package process - -import ( - "fmt" - "io" - "os" - "os/exec" - "time" -) - -type Process struct { - Label string - ExecPath string - Args []string - Pid int - StartTime time.Time - EndTime time.Time - Cmd *exec.Cmd `json:"-"` - ExitState *os.ProcessState `json:"-"` - InputFile io.Reader `json:"-"` - OutputFile io.WriteCloser `json:"-"` - WaitCh chan struct{} `json:"-"` -} - -// execPath: command name -// args: args to command. (should not include name) -func StartProcess(label string, dir string, execPath string, args []string, inFile io.Reader, outFile io.WriteCloser) (*Process, error) { - cmd := exec.Command(execPath, args...) - cmd.Dir = dir - cmd.Stdout = outFile - cmd.Stderr = outFile - cmd.Stdin = inFile - if err := cmd.Start(); err != nil { - return nil, err - } - proc := &Process{ - Label: label, - ExecPath: execPath, - Args: args, - Pid: cmd.Process.Pid, - StartTime: time.Now(), - Cmd: cmd, - ExitState: nil, - InputFile: inFile, - OutputFile: outFile, - WaitCh: make(chan struct{}), - } - go func() { - err := proc.Cmd.Wait() - if err != nil { - // fmt.Printf("Process exit: %v\n", err) - if exitError, ok := err.(*exec.ExitError); ok { - proc.ExitState = exitError.ProcessState - } - } - proc.ExitState = proc.Cmd.ProcessState - proc.EndTime = time.Now() // TODO make this goroutine-safe - err = proc.OutputFile.Close() - if err != nil { - fmt.Printf("Error closing output file for %v: %v\n", proc.Label, err) - } - close(proc.WaitCh) - }() - return proc, nil -} - -func (proc *Process) StopProcess(kill bool) error { - defer proc.OutputFile.Close() - if kill { - // fmt.Printf("Killing process %v\n", proc.Cmd.Process) - return proc.Cmd.Process.Kill() - } else { - // fmt.Printf("Stopping process %v\n", proc.Cmd.Process) - return proc.Cmd.Process.Signal(os.Interrupt) - } -} diff --git a/process/util.go b/process/util.go deleted file mode 100644 index 24cf35280..000000000 --- a/process/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package process - -import ( - . "github.com/tendermint/tmlibs/common" -) - -// Runs a command and gets the result. -func Run(dir string, command string, args []string) (string, bool, error) { - outFile := NewBufferCloser(nil) - proc, err := StartProcess("", dir, command, args, nil, outFile) - if err != nil { - return "", false, err - } - - <-proc.WaitCh - - if proc.ExitState.Success() { - return outFile.String(), true, nil - } else { - return outFile.String(), false, nil - } -} From c2fcc093b28e8c3c9ba99d0617127060c3c2e917 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 27 Nov 2017 23:42:36 -0600 Subject: [PATCH 259/515] remove bool from Service#Reset --- common/service.go | 9 +++++---- common/service_test.go | 46 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 12 deletions(-) diff --git a/common/service.go b/common/service.go index 32f531d1a..608f8b722 100644 --- a/common/service.go +++ b/common/service.go @@ -2,6 +2,7 @@ package common import ( "errors" + "fmt" "sync/atomic" "github.com/tendermint/tmlibs/log" @@ -19,7 +20,7 @@ type Service interface { Stop() error OnStop() - Reset() (bool, error) + Reset() error OnReset() error IsRunning() bool @@ -145,17 +146,17 @@ func (bs *BaseService) Stop() error { func (bs *BaseService) OnStop() {} // Implements Service -func (bs *BaseService) Reset() (bool, error) { +func (bs *BaseService) Reset() error { if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) - return false, nil + return fmt.Errorf("can't reset running %s", bs.name) } // whether or not we've started, we can reset atomic.CompareAndSwapUint32(&bs.started, 1, 0) bs.Quit = make(chan struct{}) - return true, bs.impl.OnReset() + return bs.impl.OnReset() } // Implements Service diff --git a/common/service_test.go b/common/service_test.go index 6e24dad6a..ef360a648 100644 --- a/common/service_test.go +++ b/common/service_test.go @@ -2,23 +2,53 @@ package common import ( "testing" + "time" + + "github.com/stretchr/testify/require" ) -func TestBaseServiceWait(t *testing.T) { +type testService struct { + BaseService +} - type TestService struct { - BaseService - } - ts := &TestService{} +func (testService) OnReset() error { + return nil +} + +func TestBaseServiceWait(t *testing.T) { + ts := &testService{} ts.BaseService = *NewBaseService(nil, "TestService", ts) ts.Start() + waitFinished := make(chan struct{}) go func() { - ts.Stop() + ts.Wait() + waitFinished <- struct{}{} }() - for i := 0; i < 10; i++ { - ts.Wait() + go ts.Stop() + + select { + case <-waitFinished: + // all good + case <-time.After(100 * time.Millisecond): + t.Fatal("expected Wait() to finish within 100 ms.") } +} + +func TestBaseServiceReset(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + err := ts.Reset() + require.Error(t, err, "expected cant reset service error") + + ts.Stop() + + err = ts.Reset() + require.NoError(t, err) + err = ts.Start() + require.NoError(t, err) } From 57fea1335a7bf898349bcbc9861a05b91625bf35 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 29 Nov 2017 03:05:20 +0000 Subject: [PATCH 260/515] Makefile and linter --- Makefile | 17 +++++++++-------- circle.yml | 2 +- test.sh | 7 +++++-- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index 25773ed36..dd4711aac 100644 --- a/Makefile +++ b/Makefile @@ -4,14 +4,16 @@ GOTOOLS = \ github.com/Masterminds/glide \ github.com/alecthomas/gometalinter +PACKAGES=$(shell go list ./... | grep -v '/vendor/') REPO:=github.com/tendermint/tmlibs all: test -NOVENDOR = go list github.com/tendermint/tmlibs/... | grep -v /vendor/ - test: - go test `glide novendor` + @echo "--> Running linter" + @make metalinter_test + @echo "--> Running go test" + @go test $(PACKAGES) get_vendor_deps: ensure_tools @rm -rf vendor/ @@ -20,16 +22,14 @@ get_vendor_deps: ensure_tools ensure_tools: go get $(GOTOOLS) - -metalinter: ensure_tools @gometalinter --install + +metalinter: gometalinter --vendor --deadline=600s --enable-all --disable=lll ./... -metalinter_test: ensure_tools - @gometalinter --install +metalinter_test: gometalinter --vendor --deadline=600s --disable-all \ --enable=deadcode \ - --enable=gas \ --enable=goconst \ --enable=gosimple \ --enable=ineffassign \ @@ -46,6 +46,7 @@ metalinter_test: ensure_tools --enable=vet \ ./... + #--enable=gas \ #--enable=aligncheck \ #--enable=dupl \ #--enable=errcheck \ diff --git a/circle.yml b/circle.yml index 3dba976be..104cfa6f3 100644 --- a/circle.yml +++ b/circle.yml @@ -15,7 +15,7 @@ dependencies: test: override: - - cd $PROJECT_PATH && make get_vendor_deps && make metalinter_test && bash ./test.sh + - cd $PROJECT_PATH && make get_vendor_deps && bash ./test.sh post: - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/test.sh b/test.sh index 012162b07..02bdaae86 100755 --- a/test.sh +++ b/test.sh @@ -1,8 +1,11 @@ #!/usr/bin/env bash - set -e -echo "" > coverage.txt +# run the linter +make metalinter_test + +# run the unit tests with coverage +echo "" > coverage.txt for d in $(go list ./... | grep -v vendor); do go test -race -coverprofile=profile.out -covermode=atomic "$d" if [ -f profile.out ]; then From 4d991acae0f0eb0ebfab14eabb55e18854c5a2a2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 29 Nov 2017 05:16:15 +0000 Subject: [PATCH 261/515] common: comments for Service --- common/service.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/common/service.go b/common/service.go index 608f8b722..d70d16a80 100644 --- a/common/service.go +++ b/common/service.go @@ -13,18 +13,29 @@ var ( ErrAlreadyStopped = errors.New("already stopped") ) +// Service defines a service that can be started, stopped, and reset. type Service interface { + // Start the service. + // If it's already started or stopped, will return an error. + // If OnStart() returns an error, it's returned by Start() Start() error OnStart() error + // Stop the service. + // If it's already stopped, will return an error. + // OnStop must never error. Stop() error OnStop() + // Reset the service. + // Panics by default - must be overwritten to enable reset. Reset() error OnReset() error + // Return true if the service is running IsRunning() bool + // String representation of the service String() string SetLogger(log.Logger) From 33abe87c5bcf9e3e41ca4030cdce63e7250c6870 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 29 Nov 2017 12:18:03 -0600 Subject: [PATCH 262/515] IntInSlice and StringInSlice functions Refs https://github.com/tendermint/tendermint/pull/835 --- common/int.go | 10 ++++++++++ common/int_test.go | 14 ++++++++++++++ common/string.go | 10 ++++++++++ common/string_test.go | 14 ++++++++++++++ 4 files changed, 48 insertions(+) create mode 100644 common/int_test.go create mode 100644 common/string_test.go diff --git a/common/int.go b/common/int.go index 756e38cda..a8a5f1e00 100644 --- a/common/int.go +++ b/common/int.go @@ -53,3 +53,13 @@ func PutInt64BE(dest []byte, i int64) { func GetInt64BE(src []byte) int64 { return int64(binary.BigEndian.Uint64(src)) } + +// IntInSlice returns true if a is found in the list. +func IntInSlice(a int, list []int) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/common/int_test.go b/common/int_test.go new file mode 100644 index 000000000..1ecc7844c --- /dev/null +++ b/common/int_test.go @@ -0,0 +1,14 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIntInSlice(t *testing.T) { + assert.True(t, IntInSlice(1, []int{1, 2, 3})) + assert.False(t, IntInSlice(4, []int{1, 2, 3})) + assert.True(t, IntInSlice(0, []int{0})) + assert.False(t, IntInSlice(0, []int{})) +} diff --git a/common/string.go b/common/string.go index 1ab91f15a..6924e6a5b 100644 --- a/common/string.go +++ b/common/string.go @@ -43,3 +43,13 @@ func StripHex(s string) string { } return s } + +// StringInSlice returns true if a is found the list. +func StringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/common/string_test.go b/common/string_test.go new file mode 100644 index 000000000..a82f1022b --- /dev/null +++ b/common/string_test.go @@ -0,0 +1,14 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringInSlice(t *testing.T) { + assert.True(t, StringInSlice("a", []string{"a", "b", "c"})) + assert.False(t, StringInSlice("d", []string{"a", "b", "c"})) + assert.True(t, StringInSlice("", []string{""})) + assert.False(t, StringInSlice("", []string{})) +} From 382272798165ac5d24bbedf112744b83e218838c Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 27 Nov 2017 18:40:51 -0600 Subject: [PATCH 263/515] add Conditions function Refs https://github.com/tendermint/tendermint/pull/835 --- pubsub/query/query.go | 146 +++++++++++++++++++++++++++++-------- pubsub/query/query_test.go | 21 ++++++ 2 files changed, 135 insertions(+), 32 deletions(-) diff --git a/pubsub/query/query.go b/pubsub/query/query.go index fdfb87d7a..56f2829d2 100644 --- a/pubsub/query/query.go +++ b/pubsub/query/query.go @@ -22,6 +22,14 @@ type Query struct { parser *QueryParser } +// Condition represents a single condition within a query and consists of tag +// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7"). +type Condition struct { + Tag string + Op Operator + Operand interface{} +} + // New parses the given string and returns a query or error if the string is // invalid. func New(s string) (*Query, error) { @@ -48,17 +56,91 @@ func (q *Query) String() string { return q.str } -type operator uint8 +// Operator is an operator that defines some kind of relation between tag and +// operand (equality, etc.). +type Operator uint8 const ( - opLessEqual operator = iota - opGreaterEqual - opLess - opGreater - opEqual - opContains + // "<=" + OpLessEqual Operator = iota + // ">=" + OpGreaterEqual + // "<" + OpLess + // ">" + OpGreater + // "=" + OpEqual + // "CONTAINS"; used to check if a string contains a certain sub string. + OpContains ) +// Conditions returns a list of conditions. +func (q *Query) Conditions() []Condition { + conditions := make([]Condition, 0) + + buffer, begin, end := q.parser.Buffer, 0, 0 + + var tag string + var op Operator + + // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + for _, token := range q.parser.Tokens() { + switch token.pegRule { + + case rulePegText: + begin, end = int(token.begin), int(token.end) + case ruletag: + tag = buffer[begin:end] + case rulele: + op = OpLessEqual + case rulege: + op = OpGreaterEqual + case rulel: + op = OpLess + case ruleg: + op = OpGreater + case ruleequal: + op = OpEqual + case rulecontains: + op = OpContains + case rulevalue: + // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") + valueWithoutSingleQuotes := buffer[begin+1 : end-1] + conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes}) + case rulenumber: + number := buffer[begin:end] + if strings.Contains(number, ".") { // if it looks like a floating-point number + value, err := strconv.ParseFloat(number, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) + } + conditions = append(conditions, Condition{tag, op, value}) + } else { + value, err := strconv.ParseInt(number, 10, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) + } + conditions = append(conditions, Condition{tag, op, value}) + } + case ruletime: + value, err := time.Parse(time.RFC3339, buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + conditions = append(conditions, Condition{tag, op, value}) + case ruledate: + value, err := time.Parse("2006-01-02", buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + conditions = append(conditions, Condition{tag, op, value}) + } + } + + return conditions +} + // Matches returns true if the query matches the given set of tags, false otherwise. // // For example, query "name=John" matches tags = {"name": "John"}. More @@ -71,7 +153,7 @@ func (q *Query) Matches(tags map[string]interface{}) bool { buffer, begin, end := q.parser.Buffer, 0, 0 var tag string - var op operator + var op Operator // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") for _, token := range q.parser.Tokens() { @@ -82,17 +164,17 @@ func (q *Query) Matches(tags map[string]interface{}) bool { case ruletag: tag = buffer[begin:end] case rulele: - op = opLessEqual + op = OpLessEqual case rulege: - op = opGreaterEqual + op = OpGreaterEqual case rulel: - op = opLess + op = OpLess case ruleg: - op = opGreater + op = OpGreater case ruleequal: - op = opEqual + op = OpEqual case rulecontains: - op = opContains + op = OpContains case rulevalue: // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") valueWithoutSingleQuotes := buffer[begin+1 : end-1] @@ -149,7 +231,7 @@ func (q *Query) Matches(tags map[string]interface{}) bool { // value from it to the operand using the operator. // // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } -func match(tag string, op operator, operand reflect.Value, tags map[string]interface{}) bool { +func match(tag string, op Operator, operand reflect.Value, tags map[string]interface{}) bool { // look up the tag from the query in tags value, ok := tags[tag] if !ok { @@ -163,15 +245,15 @@ func match(tag string, op operator, operand reflect.Value, tags map[string]inter return false } switch op { - case opLessEqual: + case OpLessEqual: return v.Before(operandAsTime) || v.Equal(operandAsTime) - case opGreaterEqual: + case OpGreaterEqual: return v.Equal(operandAsTime) || v.After(operandAsTime) - case opLess: + case OpLess: return v.Before(operandAsTime) - case opGreater: + case OpGreater: return v.After(operandAsTime) - case opEqual: + case OpEqual: return v.Equal(operandAsTime) } case reflect.Float64: @@ -197,15 +279,15 @@ func match(tag string, op operator, operand reflect.Value, tags map[string]inter panic(fmt.Sprintf("Incomparable types: %T (%v) vs float64 (%v)", value, value, operandFloat64)) } switch op { - case opLessEqual: + case OpLessEqual: return v <= operandFloat64 - case opGreaterEqual: + case OpGreaterEqual: return v >= operandFloat64 - case opLess: + case OpLess: return v < operandFloat64 - case opGreater: + case OpGreater: return v > operandFloat64 - case opEqual: + case OpEqual: return v == operandFloat64 } case reflect.Int64: @@ -231,15 +313,15 @@ func match(tag string, op operator, operand reflect.Value, tags map[string]inter panic(fmt.Sprintf("Incomparable types: %T (%v) vs int64 (%v)", value, value, operandInt)) } switch op { - case opLessEqual: + case OpLessEqual: return v <= operandInt - case opGreaterEqual: + case OpGreaterEqual: return v >= operandInt - case opLess: + case OpLess: return v < operandInt - case opGreater: + case OpGreater: return v > operandInt - case opEqual: + case OpEqual: return v == operandInt } case reflect.String: @@ -248,9 +330,9 @@ func match(tag string, op operator, operand reflect.Value, tags map[string]inter return false } switch op { - case opEqual: + case OpEqual: return v == operand.String() - case opContains: + case OpContains: return strings.Contains(v, operand.String()) } default: diff --git a/pubsub/query/query_test.go b/pubsub/query/query_test.go index 431ae1fef..93b63a157 100644 --- a/pubsub/query/query_test.go +++ b/pubsub/query/query_test.go @@ -62,3 +62,24 @@ func TestMustParse(t *testing.T) { assert.Panics(t, func() { query.MustParse("=") }) assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") }) } + +func TestConditions(t *testing.T) { + txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") + require.NoError(t, err) + + testCases := []struct { + s string + conditions []query.Condition + }{ + {"tm.events.type='NewBlock'", []query.Condition{query.Condition{"tm.events.type", query.OpEqual, "NewBlock"}}}, + {"tx.gas > 7 AND tx.gas < 9", []query.Condition{query.Condition{"tx.gas", query.OpGreater, int64(7)}, query.Condition{"tx.gas", query.OpLess, int64(9)}}}, + {"tx.time >= TIME 2013-05-03T14:45:00Z", []query.Condition{query.Condition{"tx.time", query.OpGreaterEqual, txTime}}}, + } + + for _, tc := range testCases { + query, err := query.New(tc.s) + require.Nil(t, err) + + assert.Equal(t, tc.conditions, query.Conditions()) + } +} From c9694b1ba1452403a521c3952537528ce64c9a96 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 27 Nov 2017 18:46:57 -0600 Subject: [PATCH 264/515] fix warnings --- pubsub/query/query_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pubsub/query/query_test.go b/pubsub/query/query_test.go index 93b63a157..b16abdafa 100644 --- a/pubsub/query/query_test.go +++ b/pubsub/query/query_test.go @@ -45,15 +45,15 @@ func TestMatches(t *testing.T) { } for _, tc := range testCases { - query, err := query.New(tc.s) + q, err := query.New(tc.s) if !tc.err { require.Nil(t, err) } if tc.matches { - assert.True(t, query.Matches(tc.tags), "Query '%s' should match %v", tc.s, tc.tags) + assert.True(t, q.Matches(tc.tags), "Query '%s' should match %v", tc.s, tc.tags) } else { - assert.False(t, query.Matches(tc.tags), "Query '%s' should not match %v", tc.s, tc.tags) + assert.False(t, q.Matches(tc.tags), "Query '%s' should not match %v", tc.s, tc.tags) } } } @@ -77,9 +77,9 @@ func TestConditions(t *testing.T) { } for _, tc := range testCases { - query, err := query.New(tc.s) + q, err := query.New(tc.s) require.Nil(t, err) - assert.Equal(t, tc.conditions, query.Conditions()) + assert.Equal(t, tc.conditions, q.Conditions()) } } From ebc543ebe3598045b15506984d99acc138ec06eb Mon Sep 17 00:00:00 2001 From: Petabyte Storage Date: Fri, 1 Dec 2017 09:51:37 -0800 Subject: [PATCH 265/515] fix warnings --- pubsub/query/parser_test.go | 4 ++-- pubsub/query/query_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pubsub/query/parser_test.go b/pubsub/query/parser_test.go index 165ddda7b..e31079b43 100644 --- a/pubsub/query/parser_test.go +++ b/pubsub/query/parser_test.go @@ -83,9 +83,9 @@ func TestParser(t *testing.T) { for _, c := range cases { _, err := query.New(c.query) if c.valid { - assert.NoError(t, err, "Query was '%s'", c.query) + assert.NoErrorf(t, err, "Query was '%s'", c.query) } else { - assert.Error(t, err, "Query was '%s'", c.query) + assert.Errorf(t, err, "Query was '%s'", c.query) } } } diff --git a/pubsub/query/query_test.go b/pubsub/query/query_test.go index b16abdafa..b980a79c0 100644 --- a/pubsub/query/query_test.go +++ b/pubsub/query/query_test.go @@ -71,9 +71,9 @@ func TestConditions(t *testing.T) { s string conditions []query.Condition }{ - {"tm.events.type='NewBlock'", []query.Condition{query.Condition{"tm.events.type", query.OpEqual, "NewBlock"}}}, - {"tx.gas > 7 AND tx.gas < 9", []query.Condition{query.Condition{"tx.gas", query.OpGreater, int64(7)}, query.Condition{"tx.gas", query.OpLess, int64(9)}}}, - {"tx.time >= TIME 2013-05-03T14:45:00Z", []query.Condition{query.Condition{"tx.time", query.OpGreaterEqual, txTime}}}, + {s: "tm.events.type='NewBlock'", conditions: []query.Condition{query.Condition{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}}, + {s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{query.Condition{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, query.Condition{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}}, + {s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{query.Condition{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}}, } for _, tc := range testCases { From 17dc8a74497d3fee933592ef860275e6b0dd71d6 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 3 Dec 2017 21:44:48 -0800 Subject: [PATCH 266/515] SetDeleter/Batch separation --- db/db.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/db/db.go b/db/db.go index e86394713..08ebaeaac 100644 --- a/db/db.go +++ b/db/db.go @@ -27,9 +27,13 @@ type CacheDB interface { Write() // Write to the underlying DB } -type Batch interface { +type SetDeleter interface { Set(key, value []byte) Delete(key []byte) +} + +type Batch interface { + SetDeleter Write() } From 3af6044fdf1945a37d81af4f7703e4a2c24ebfbc Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 4 Dec 2017 10:38:18 -0600 Subject: [PATCH 267/515] add license file (Fixes #87) [ci skip] --- LICENSE | 193 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..5d4ad3b1b --- /dev/null +++ b/LICENSE @@ -0,0 +1,193 @@ +Tendermint Libraries +Copyright (C) 2015 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 4769719a4b3fbe401b9e5bdf49418d32fa027119 Mon Sep 17 00:00:00 2001 From: Petabyte Storage Date: Mon, 4 Dec 2017 08:54:19 -0800 Subject: [PATCH 268/515] fix Errorf --- glide.lock | 40 ++++++++++++++++++------------------- pubsub/query/parser_test.go | 4 ++-- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/glide.lock b/glide.lock index b0b3ff3c7..9f8ddf0e8 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-08-11T20:28:34.550901198Z +updated: 2017-12-04T08:45:29.247829134-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: 0873e56b0faeae3a1d661b10d629135508ea5504 + version: d67bb4c202e3b91377d1079b110a6c9ce23ab2f8 subpackages: - log - log/level @@ -18,11 +18,11 @@ imports: - name: github.com/go-playground/universal-translator version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 7a2f19628aabfe68f0766b59e74d6315f8347d22 + version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl - version: a4b07c25de5ff55ad3b8936cea69a79a3d95a855 + version: 23c074d0eceb2b8a5bfdbb271ab780cde70f05a8 subpackages: - hcl/ast - hcl/parser @@ -39,33 +39,31 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 51463bfca2576e06c62a8504b5c0f06d61312647 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a - name: github.com/mattn/go-colorable - version: ded68f7a9561c023e790de24279db7ebf473ea80 + version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 - name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe + version: a5cdd64afdee435007ee3e9f6ed4684af949d568 - name: github.com/mitchellh/mapstructure - version: cc8532a8e9a55ea36402aa21efdf403a60d34096 -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: 06020f85339e21b2478f756a78e295255ffa4d6a - name: github.com/pelletier/go-toml - version: 97253b98df84f9eef872866d079e74b8265150f1 + version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd - name: github.com/pkg/errors - version: c605e284fe17294bda444b34710735b29d1a9d90 + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 9be650865eab0c12963d8753212f4f9c66cdcf12 + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 - name: github.com/spf13/cobra - version: db6b9a8b3f3f400c8ecb4a4d7d02245b8facad66 + version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: fa7ca7e836cf3a8bb4ebf799f472c12d7e903d66 + version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag version: 80fe0fb4eba54167e2ccae1c6c950e72abf61b73 - name: github.com/spf13/viper - version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4 subpackages: @@ -82,7 +80,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b53add0b622662731985485f3a19be7f684660b8 + version: 7d50b38b3815efe313728de77e2995c8813ce13f subpackages: - data - data/base58 @@ -91,11 +89,11 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: 5a033cc77e57eca05bdb50522851d29e03569cbe + version: c7af5bf2638a1164f2eb5467c39c6cffbd13a02e subpackages: - ripemd160 - name: golang.org/x/sys - version: 9ccfe848b9db8435a24c424abbc07a921adf1df5 + version: 661970f62f5897bc0cd5fdca7e087ba8a98a8fa1 subpackages: - unix - name: golang.org/x/text @@ -104,9 +102,9 @@ imports: - transform - unicode/norm - name: gopkg.in/go-playground/validator.v9 - version: d529ee1b0f30352444f507cc6cdac96bfd12decc + version: 6d8c18553ea1ac493d049edd6f102f52e618f085 - name: gopkg.in/yaml.v2 - version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: - name: github.com/davecgh/go-spew version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 diff --git a/pubsub/query/parser_test.go b/pubsub/query/parser_test.go index e31079b43..bf562c0bd 100644 --- a/pubsub/query/parser_test.go +++ b/pubsub/query/parser_test.go @@ -83,9 +83,9 @@ func TestParser(t *testing.T) { for _, c := range cases { _, err := query.New(c.query) if c.valid { - assert.NoErrorf(t, err, "Query was '%s'", c.query) + assert.NoError(t, err, "Query was '"+c.query+"'") } else { - assert.Errorf(t, err, "Query was '%s'", c.query) + assert.Error(t, err, "Query was '"+c.query+"'") } } } From 53cdb6cf82148a7a0c52d959e152d72797f3df87 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Tue, 5 Dec 2017 11:25:17 +0100 Subject: [PATCH 269/515] Demo throttle timer is broken --- common/throttle_timer_test.go | 78 +++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 common/throttle_timer_test.go diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go new file mode 100644 index 000000000..bd03a7a68 --- /dev/null +++ b/common/throttle_timer_test.go @@ -0,0 +1,78 @@ +package common + +import ( + "sync" + "testing" + "time" + + // make govet noshadow happy... + asrt "github.com/stretchr/testify/assert" +) + +type counter struct { + input chan struct{} + mtx sync.Mutex + count int +} + +func (c *counter) Increment() { + c.mtx.Lock() + c.count++ + c.mtx.Unlock() +} + +func (c *counter) Count() int { + c.mtx.Lock() + val := c.count + c.mtx.Unlock() + return val +} + +// Read should run in a go-routine and +// updates count by one every time a packet comes in +func (c *counter) Read() { + for range c.input { + c.Increment() + } +} + +func TestThrottle(test *testing.T) { + assert := asrt.New(test) + + ms := 50 + delay := time.Duration(ms) * time.Millisecond + longwait := time.Duration(2) * delay + t := NewThrottleTimer("foo", delay) + + // start at 0 + c := &counter{input: t.Ch} + assert.Equal(0, c.Count()) + go c.Read() + + // waiting does nothing + time.Sleep(longwait) + assert.Equal(0, c.Count()) + + // send one event adds one + t.Set() + time.Sleep(longwait) + assert.Equal(1, c.Count()) + + // send a burst adds one + for i := 0; i < 5; i++ { + t.Set() + } + time.Sleep(longwait) + assert.Equal(2, c.Count()) + + // send 12, over 2 delay sections, adds 3 + short := time.Duration(ms/5) * time.Millisecond + for i := 0; i < 13; i++ { + t.Set() + time.Sleep(short) + } + time.Sleep(longwait) + assert.Equal(5, c.Count()) + + close(t.Ch) +} From 26abd65e34910994e921020335ed0d5c01a113a9 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Tue, 5 Dec 2017 15:01:07 +0100 Subject: [PATCH 270/515] Add tests for repeat timer --- common/repeat_timer_test.go | 78 +++++++++++++++++++++++++++++++++++ common/throttle_timer_test.go | 10 ++--- 2 files changed, 83 insertions(+), 5 deletions(-) create mode 100644 common/repeat_timer_test.go diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go new file mode 100644 index 000000000..9f03f41df --- /dev/null +++ b/common/repeat_timer_test.go @@ -0,0 +1,78 @@ +package common + +import ( + "sync" + "testing" + "time" + + // make govet noshadow happy... + asrt "github.com/stretchr/testify/assert" +) + +type rCounter struct { + input chan time.Time + mtx sync.Mutex + count int +} + +func (c *rCounter) Increment() { + c.mtx.Lock() + c.count++ + c.mtx.Unlock() +} + +func (c *rCounter) Count() int { + c.mtx.Lock() + val := c.count + c.mtx.Unlock() + return val +} + +// Read should run in a go-routine and +// updates count by one every time a packet comes in +func (c *rCounter) Read() { + for range c.input { + c.Increment() + } +} + +func TestRepeat(test *testing.T) { + assert := asrt.New(test) + + dur := time.Duration(50) * time.Millisecond + short := time.Duration(20) * time.Millisecond + // delay waits for cnt durations, an a little extra + delay := func(cnt int) time.Duration { + return time.Duration(cnt)*dur + time.Millisecond + } + t := NewRepeatTimer("bar", dur) + + // start at 0 + c := &rCounter{input: t.Ch} + go c.Read() + assert.Equal(0, c.Count()) + + // wait for 4 periods + time.Sleep(delay(4)) + assert.Equal(4, c.Count()) + + // keep reseting leads to no firing + for i := 0; i < 20; i++ { + time.Sleep(short) + t.Reset() + } + assert.Equal(4, c.Count()) + + // after this, it still works normal + time.Sleep(delay(2)) + assert.Equal(6, c.Count()) + + // after a stop, nothing more is sent + stopped := t.Stop() + assert.True(stopped) + time.Sleep(delay(7)) + assert.Equal(6, c.Count()) + + // close channel to stop counter + close(t.Ch) +} diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index bd03a7a68..00f5abdec 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -9,19 +9,19 @@ import ( asrt "github.com/stretchr/testify/assert" ) -type counter struct { +type thCounter struct { input chan struct{} mtx sync.Mutex count int } -func (c *counter) Increment() { +func (c *thCounter) Increment() { c.mtx.Lock() c.count++ c.mtx.Unlock() } -func (c *counter) Count() int { +func (c *thCounter) Count() int { c.mtx.Lock() val := c.count c.mtx.Unlock() @@ -30,7 +30,7 @@ func (c *counter) Count() int { // Read should run in a go-routine and // updates count by one every time a packet comes in -func (c *counter) Read() { +func (c *thCounter) Read() { for range c.input { c.Increment() } @@ -45,7 +45,7 @@ func TestThrottle(test *testing.T) { t := NewThrottleTimer("foo", delay) // start at 0 - c := &counter{input: t.Ch} + c := &thCounter{input: t.Ch} assert.Equal(0, c.Count()) go c.Read() From c325ce218293417161ac53152419ebc17b48f132 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 5 Dec 2017 17:13:10 -0600 Subject: [PATCH 271/515] use NoErrorf and Errorf functions --- glide.lock | 42 ++++++++++++++++++------------------- glide.yaml | 1 - pubsub/query/parser_test.go | 4 ++-- 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/glide.lock b/glide.lock index 9f8ddf0e8..4b9c46c77 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ -hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-12-04T08:45:29.247829134-08:00 +hash: 1f3d3426e823e4a8e6d4473615fcc86c767bbea6da9114ea1e7e0a9f0ccfa129 +updated: 2017-12-05T23:47:13.202024407Z imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: d67bb4c202e3b91377d1079b110a6c9ce23ab2f8 + version: 53f10af5d5c7375d4655a3d6852457ed17ab5cc7 subpackages: - log - log/level @@ -12,13 +12,13 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-playground/locales - version: 1e5f1161c6416a5ff48840eb8724a394e48cc534 + version: e4cbcb5d0652150d40ad0646651076b6bd2be4f6 subpackages: - currency - name: github.com/go-playground/universal-translator version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl @@ -39,33 +39,33 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mattn/go-colorable version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 - name: github.com/mattn/go-isatty - version: a5cdd64afdee435007ee3e9f6ed4684af949d568 + version: 6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a - name: github.com/pelletier/go-toml version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd - name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d + version: f15c970de5b76fac0b59abb32d62c17cc7bed265 - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536 subpackages: - mem - name: github.com/spf13/cast version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 - name: github.com/spf13/cobra - version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b + version: de2d9c4eca8f3c1de17d48b096b6504e0296f003 - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 80fe0fb4eba54167e2ccae1c6c950e72abf61b73 + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper - version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 + version: 4dddf7c62e16bce5807744018f5b753bfe21bbd2 - name: github.com/syndtr/goleveldb - version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4 + version: adf24ef3f94bd13ec4163060b21a5678f22b429b subpackages: - leveldb - leveldb/cache @@ -80,7 +80,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 7d50b38b3815efe313728de77e2995c8813ce13f + version: 2baffcb6b690057568bc90ef1d457efb150b979a subpackages: - data - data/base58 @@ -89,25 +89,25 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: c7af5bf2638a1164f2eb5467c39c6cffbd13a02e + version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 subpackages: - ripemd160 - name: golang.org/x/sys - version: 661970f62f5897bc0cd5fdca7e087ba8a98a8fa1 + version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840 subpackages: - unix - name: golang.org/x/text - version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4 + version: 57961680700a5336d15015c8c50686ca5ba362a4 subpackages: - transform - unicode/norm - name: gopkg.in/go-playground/validator.v9 - version: 6d8c18553ea1ac493d049edd6f102f52e618f085 + version: 61caf9d3038e1af346dbf5c2e16f6678e1548364 - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 testImports: - name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 subpackages: - spew - name: github.com/pmezard/go-difflib @@ -115,7 +115,7 @@ testImports: subpackages: - difflib - name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index 22825a273..0d722c853 100644 --- a/glide.yaml +++ b/glide.yaml @@ -26,7 +26,6 @@ import: - package: gopkg.in/go-playground/validator.v9 testImport: - package: github.com/stretchr/testify - version: ^1.1.4 subpackages: - assert - require diff --git a/pubsub/query/parser_test.go b/pubsub/query/parser_test.go index bf562c0bd..e31079b43 100644 --- a/pubsub/query/parser_test.go +++ b/pubsub/query/parser_test.go @@ -83,9 +83,9 @@ func TestParser(t *testing.T) { for _, c := range cases { _, err := query.New(c.query) if c.valid { - assert.NoError(t, err, "Query was '"+c.query+"'") + assert.NoErrorf(t, err, "Query was '%s'", c.query) } else { - assert.Error(t, err, "Query was '"+c.query+"'") + assert.Errorf(t, err, "Query was '%s'", c.query) } } } From 303b6df81274e5d18c9b47088c8318da627d61ea Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 5 Dec 2017 18:04:07 -0600 Subject: [PATCH 272/515] update changelog --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0aa90d7a..5acfa1940 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Changelog +## 0.5.0 (December 5, 2017) + +BREAKING: + - [common] replace Service#Start, Service#Stop first param (bool) with an + error (ErrAlreadyStarted, ErrAlreadyStopped) + - [common] replace Service#Reset first param (bool) with an error + - [process] removed + +FEATURES: + - [common] IntInSlice and StringInSlice functions + ## 0.4.1 (November 27, 2017) FEATURES: From b166d627f3317e8e547412596ebfb390fd837d85 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 5 Dec 2017 18:04:28 -0600 Subject: [PATCH 273/515] bump up version to 0.5.0 --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index c30887b49..45222da79 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.4.1" +const Version = "0.5.0" From e6be03db31949be332e13ff460cc4f067575c7f0 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 6 Dec 2017 02:05:57 -0500 Subject: [PATCH 274/515] update license and changelog --- CHANGELOG.md | 5 +++-- LICENSE | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5acfa1940..b679b839d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,13 +3,14 @@ ## 0.5.0 (December 5, 2017) BREAKING: - - [common] replace Service#Start, Service#Stop first param (bool) with an + - [common] replace Service#Start, Service#Stop first return value (bool) with an error (ErrAlreadyStarted, ErrAlreadyStopped) - - [common] replace Service#Reset first param (bool) with an error + - [common] replace Service#Reset first return value (bool) with an error - [process] removed FEATURES: - [common] IntInSlice and StringInSlice functions + - [pubsub/query] introduce `Condition` struct, expose `Operator`, and add `query.Conditions()` ## 0.4.1 (November 27, 2017) diff --git a/LICENSE b/LICENSE index 5d4ad3b1b..06bc5e1c6 100644 --- a/LICENSE +++ b/LICENSE @@ -1,5 +1,5 @@ Tendermint Libraries -Copyright (C) 2015 Tendermint +Copyright (C) 2017 Tendermint From 3d9113c16e08fe53f31a2403a5280202c8c9cc14 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 6 Dec 2017 09:18:04 +0100 Subject: [PATCH 275/515] Add a bit more padding to tests so they pass on osx with -race --- common/repeat_timer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 9f03f41df..87f34b950 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -43,7 +43,7 @@ func TestRepeat(test *testing.T) { short := time.Duration(20) * time.Millisecond // delay waits for cnt durations, an a little extra delay := func(cnt int) time.Duration { - return time.Duration(cnt)*dur + time.Millisecond + return time.Duration(cnt)*dur + time.Duration(5)*time.Millisecond } t := NewRepeatTimer("bar", dur) From dcb43956048f0d38495f39e43fd4438ec6d47de7 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 6 Dec 2017 11:17:50 +0100 Subject: [PATCH 276/515] Refactor throttle timer --- common/throttle_timer.go | 102 ++++++++++++++++++++++------------ common/throttle_timer_test.go | 19 ++++++- 2 files changed, 85 insertions(+), 36 deletions(-) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index 38ef4e9a3..e260e01bd 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -1,7 +1,7 @@ package common import ( - "sync" + "fmt" "time" ) @@ -12,54 +12,88 @@ If a long continuous burst of .Set() calls happens, ThrottleTimer fires at most once every "dur". */ type ThrottleTimer struct { - Name string - Ch chan struct{} - quit chan struct{} - dur time.Duration + Name string + Ch chan struct{} + input chan command + dur time.Duration - mtx sync.Mutex timer *time.Timer isSet bool } +type command int32 + +const ( + Set command = iota + Unset + Quit +) + func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { - var ch = make(chan struct{}) - var quit = make(chan struct{}) - var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} - t.mtx.Lock() - t.timer = time.AfterFunc(dur, t.fireRoutine) - t.mtx.Unlock() + var t = &ThrottleTimer{ + Name: name, + Ch: make(chan struct{}, 1), + dur: dur, + input: make(chan command), + timer: time.NewTimer(dur), + } t.timer.Stop() + go t.run() return t } -func (t *ThrottleTimer) fireRoutine() { - t.mtx.Lock() - defer t.mtx.Unlock() - select { - case t.Ch <- struct{}{}: - t.isSet = false - case <-t.quit: - // do nothing +func (t *ThrottleTimer) run() { + for { + select { + case cmd := <-t.input: + // stop goroutine if the input says so + if t.processInput(cmd) { + // TODO: do we want to close the channels??? + // close(t.Ch) + // close(t.input) + return + } + case <-t.timer.C: + t.isSet = false + t.Ch <- struct{}{} + } + } +} + +// all modifications of the internal state of ThrottleTimer +// happen in this method. It is only called from the run goroutine +// so we avoid any race conditions +func (t *ThrottleTimer) processInput(cmd command) (shutdown bool) { + fmt.Printf("processInput: %d\n", cmd) + switch cmd { + case Set: + if !t.isSet { + t.isSet = true + t.timer.Reset(t.dur) + } + case Quit: + shutdown = true + fallthrough + case Unset: + if t.isSet { + t.isSet = false + if !t.timer.Stop() { + <-t.timer.C + } + } default: - t.timer.Reset(t.dur) + panic("unknown command!") } + // return true + return shutdown } func (t *ThrottleTimer) Set() { - t.mtx.Lock() - defer t.mtx.Unlock() - if !t.isSet { - t.isSet = true - t.timer.Reset(t.dur) - } + t.input <- Set } func (t *ThrottleTimer) Unset() { - t.mtx.Lock() - defer t.mtx.Unlock() - t.isSet = false - t.timer.Stop() + t.input <- Unset } // For ease of .Stop()'ing services before .Start()'ing them, @@ -68,8 +102,6 @@ func (t *ThrottleTimer) Stop() bool { if t == nil { return false } - close(t.quit) - t.mtx.Lock() - defer t.mtx.Unlock() - return t.timer.Stop() + t.input <- Quit + return true } diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index 00f5abdec..014f9dcdc 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -41,6 +41,7 @@ func TestThrottle(test *testing.T) { ms := 50 delay := time.Duration(ms) * time.Millisecond + shortwait := time.Duration(ms/2) * time.Millisecond longwait := time.Duration(2) * delay t := NewThrottleTimer("foo", delay) @@ -65,6 +66,21 @@ func TestThrottle(test *testing.T) { time.Sleep(longwait) assert.Equal(2, c.Count()) + // keep cancelling before it is ready + for i := 0; i < 10; i++ { + t.Set() + time.Sleep(shortwait) + t.Unset() + } + time.Sleep(longwait) + assert.Equal(2, c.Count()) + + // a few unsets do nothing... + for i := 0; i < 5; i++ { + t.Unset() + } + assert.Equal(2, c.Count()) + // send 12, over 2 delay sections, adds 3 short := time.Duration(ms/5) * time.Millisecond for i := 0; i < 13; i++ { @@ -74,5 +90,6 @@ func TestThrottle(test *testing.T) { time.Sleep(longwait) assert.Equal(5, c.Count()) - close(t.Ch) + stopped := t.Stop() + assert.True(stopped) } From 4ec7883891fa9700ce4b122252b8fc697df0bfca Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 6 Dec 2017 11:21:01 +0100 Subject: [PATCH 277/515] Cleanup --- common/throttle_timer.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index e260e01bd..705a12a12 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -1,7 +1,6 @@ package common import ( - "fmt" "time" ) @@ -64,7 +63,6 @@ func (t *ThrottleTimer) run() { // happen in this method. It is only called from the run goroutine // so we avoid any race conditions func (t *ThrottleTimer) processInput(cmd command) (shutdown bool) { - fmt.Printf("processInput: %d\n", cmd) switch cmd { case Set: if !t.isSet { @@ -77,9 +75,7 @@ func (t *ThrottleTimer) processInput(cmd command) (shutdown bool) { case Unset: if t.isSet { t.isSet = false - if !t.timer.Stop() { - <-t.timer.C - } + t.timer.Stop() } default: panic("unknown command!") From 0a8721113a67b3c05f58e12328a0fe0216811b0c Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 6 Dec 2017 21:08:55 +0100 Subject: [PATCH 278/515] First pass of PR updates --- common/throttle_timer.go | 28 ++++++++++++++-------------- common/throttle_timer_test.go | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index 705a12a12..f2ce60b2a 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -11,10 +11,11 @@ If a long continuous burst of .Set() calls happens, ThrottleTimer fires at most once every "dur". */ type ThrottleTimer struct { - Name string - Ch chan struct{} - input chan command - dur time.Duration + Name string + Ch <-chan struct{} + output chan<- struct{} + input chan command + dur time.Duration timer *time.Timer isSet bool @@ -29,12 +30,14 @@ const ( ) func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { + c := make(chan struct{}, 1) var t = &ThrottleTimer{ - Name: name, - Ch: make(chan struct{}, 1), - dur: dur, - input: make(chan command), - timer: time.NewTimer(dur), + Name: name, + Ch: c, + dur: dur, + output: c, + input: make(chan command), + timer: time.NewTimer(dur), } t.timer.Stop() go t.run() @@ -47,14 +50,12 @@ func (t *ThrottleTimer) run() { case cmd := <-t.input: // stop goroutine if the input says so if t.processInput(cmd) { - // TODO: do we want to close the channels??? - // close(t.Ch) - // close(t.input) + close(t.output) return } case <-t.timer.C: t.isSet = false - t.Ch <- struct{}{} + t.output <- struct{}{} } } } @@ -80,7 +81,6 @@ func (t *ThrottleTimer) processInput(cmd command) (shutdown bool) { default: panic("unknown command!") } - // return true return shutdown } diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index 014f9dcdc..81b817038 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -10,7 +10,7 @@ import ( ) type thCounter struct { - input chan struct{} + input <-chan struct{} mtx sync.Mutex count int } From 1ac4c5dd6d007a708337e1ad2636e456e1e4b8db Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 6 Dec 2017 21:20:30 +0100 Subject: [PATCH 279/515] Made throttle output non-blocking --- common/throttle_timer.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index f2ce60b2a..069b6d84b 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -30,7 +30,7 @@ const ( ) func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { - c := make(chan struct{}, 1) + c := make(chan struct{}) var t = &ThrottleTimer{ Name: name, Ch: c, @@ -54,12 +54,22 @@ func (t *ThrottleTimer) run() { return } case <-t.timer.C: - t.isSet = false - t.output <- struct{}{} + t.trySend() } } } +// trySend performs non-blocking send on t.output (t.Ch) +func (t *ThrottleTimer) trySend() { + select { + case t.output <- struct{}{}: + t.isSet = false + default: + // if we just want to drop, replace this with t.isSet = false + t.timer.Reset(t.dur) + } +} + // all modifications of the internal state of ThrottleTimer // happen in this method. It is only called from the run goroutine // so we avoid any race conditions From e430d3f8447d23b739840d5137ae75c37ff33a1d Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 6 Dec 2017 21:51:23 +0100 Subject: [PATCH 280/515] One more attempt with a read-only channel --- common/throttle_timer.go | 33 ++++++++++++++++++--------------- common/throttle_timer_test.go | 2 +- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index 069b6d84b..4a4b30033 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -11,11 +11,10 @@ If a long continuous burst of .Set() calls happens, ThrottleTimer fires at most once every "dur". */ type ThrottleTimer struct { - Name string - Ch <-chan struct{} - output chan<- struct{} - input chan command - dur time.Duration + Name string + Ch chan struct{} + input chan command + dur time.Duration timer *time.Timer isSet bool @@ -30,27 +29,31 @@ const ( ) func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { - c := make(chan struct{}) var t = &ThrottleTimer{ - Name: name, - Ch: c, - dur: dur, - output: c, - input: make(chan command), - timer: time.NewTimer(dur), + Name: name, + Ch: make(chan struct{}), + dur: dur, + input: make(chan command), + timer: time.NewTimer(dur), } t.timer.Stop() go t.run() return t } +// C is the proper way to listen to the timer output. +// t.Ch will be made private in the (near?) future +func (t *ThrottleTimer) C() <-chan struct{} { + return t.Ch +} + func (t *ThrottleTimer) run() { for { select { case cmd := <-t.input: // stop goroutine if the input says so if t.processInput(cmd) { - close(t.output) + close(t.Ch) return } case <-t.timer.C: @@ -59,10 +62,10 @@ func (t *ThrottleTimer) run() { } } -// trySend performs non-blocking send on t.output (t.Ch) +// trySend performs non-blocking send on t.Ch func (t *ThrottleTimer) trySend() { select { - case t.output <- struct{}{}: + case t.Ch <- struct{}{}: t.isSet = false default: // if we just want to drop, replace this with t.isSet = false diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index 81b817038..f6b5d1df5 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -46,7 +46,7 @@ func TestThrottle(test *testing.T) { t := NewThrottleTimer("foo", delay) // start at 0 - c := &thCounter{input: t.Ch} + c := &thCounter{input: t.C()} assert.Equal(0, c.Count()) go c.Read() From 8b518fadb2f3eb928ce5d5a014b4087c5b31309a Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Wed, 6 Dec 2017 22:28:18 +0100 Subject: [PATCH 281/515] Don't close throttle channel, explain why --- common/throttle_timer.go | 2 +- common/throttle_timer_test.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index 4a4b30033..051d44376 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -52,8 +52,8 @@ func (t *ThrottleTimer) run() { select { case cmd := <-t.input: // stop goroutine if the input says so + // don't close channels, as closed channels mess up select reads if t.processInput(cmd) { - close(t.Ch) return } case <-t.timer.C: diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index f6b5d1df5..7d96ac7c5 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -31,6 +31,9 @@ func (c *thCounter) Count() int { // Read should run in a go-routine and // updates count by one every time a packet comes in func (c *thCounter) Read() { + // note, since this channel never closes, this will never end + // if thCounter was used in anything beyond trivial test cases. + // it would have to be smarter. for range c.input { c.Increment() } From 3779310c72c93173b9e87561281e697e7cdf9437 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 6 Dec 2017 18:48:39 -0600 Subject: [PATCH 282/515] return back output internal channel (way go does with Timer) --- common/throttle_timer.go | 47 +++++++++++++++++++++-------------- common/throttle_timer_test.go | 2 +- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index 051d44376..ab2ad2e62 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -11,10 +11,11 @@ If a long continuous burst of .Set() calls happens, ThrottleTimer fires at most once every "dur". */ type ThrottleTimer struct { - Name string - Ch chan struct{} - input chan command - dur time.Duration + Name string + Ch <-chan struct{} + input chan command + output chan<- struct{} + dur time.Duration timer *time.Timer isSet bool @@ -28,25 +29,22 @@ const ( Quit ) +// NewThrottleTimer creates a new ThrottleTimer. func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { + c := make(chan struct{}) var t = &ThrottleTimer{ - Name: name, - Ch: make(chan struct{}), - dur: dur, - input: make(chan command), - timer: time.NewTimer(dur), + Name: name, + Ch: c, + dur: dur, + input: make(chan command), + output: c, + timer: time.NewTimer(dur), } t.timer.Stop() go t.run() return t } -// C is the proper way to listen to the timer output. -// t.Ch will be made private in the (near?) future -func (t *ThrottleTimer) C() <-chan struct{} { - return t.Ch -} - func (t *ThrottleTimer) run() { for { select { @@ -65,7 +63,7 @@ func (t *ThrottleTimer) run() { // trySend performs non-blocking send on t.Ch func (t *ThrottleTimer) trySend() { select { - case t.Ch <- struct{}{}: + case t.output <- struct{}{}: t.isSet = false default: // if we just want to drop, replace this with t.isSet = false @@ -105,8 +103,21 @@ func (t *ThrottleTimer) Unset() { t.input <- Unset } -// For ease of .Stop()'ing services before .Start()'ing them, -// we ignore .Stop()'s on nil ThrottleTimers +// Stop prevents the ThrottleTimer from firing. It always returns true. Stop does not +// close the channel, to prevent a read from the channel succeeding +// incorrectly. +// +// To prevent a timer created with NewThrottleTimer from firing after a call to +// Stop, check the return value and drain the channel. +// +// For example, assuming the program has not received from t.C already: +// +// if !t.Stop() { +// <-t.C +// } +// +// For ease of stopping services before starting them, we ignore Stop on nil +// ThrottleTimers. func (t *ThrottleTimer) Stop() bool { if t == nil { return false diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index 7d96ac7c5..a1b6606f5 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -49,7 +49,7 @@ func TestThrottle(test *testing.T) { t := NewThrottleTimer("foo", delay) // start at 0 - c := &thCounter{input: t.C()} + c := &thCounter{input: t.Ch} assert.Equal(0, c.Count()) go c.Read() From 887d766c86f1f217653915a2042374972c8f38ae Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Thu, 7 Dec 2017 10:15:38 +0100 Subject: [PATCH 283/515] Refactored RepeatTimer, tests hang --- common/repeat_timer.go | 121 +++++++++++++++++++++--------------- common/repeat_timer_test.go | 4 +- common/throttle_timer.go | 16 ++--- 3 files changed, 81 insertions(+), 60 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index d7d9154d4..0f6501131 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -1,7 +1,7 @@ package common import ( - "sync" + "fmt" "time" ) @@ -11,54 +11,40 @@ It's good for keeping connections alive. A RepeatTimer must be Stop()'d or it will keep a goroutine alive. */ type RepeatTimer struct { - Ch chan time.Time + Name string + Ch <-chan time.Time + output chan<- time.Time + input chan repeatCommand - mtx sync.Mutex - name string - ticker *time.Ticker - quit chan struct{} - wg *sync.WaitGroup - dur time.Duration + dur time.Duration + timer *time.Timer } +type repeatCommand int32 + +const ( + Reset repeatCommand = iota + RQuit +) + func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { + c := make(chan time.Time) var t = &RepeatTimer{ - Ch: make(chan time.Time), - ticker: time.NewTicker(dur), - quit: make(chan struct{}), - wg: new(sync.WaitGroup), - name: name, - dur: dur, - } - t.wg.Add(1) - go t.fireRoutine(t.ticker) - return t -} + Name: name, + Ch: c, + output: c, + input: make(chan repeatCommand), -func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { - for { - select { - case t_ := <-ticker.C: - t.Ch <- t_ - case <-t.quit: - // needed so we know when we can reset t.quit - t.wg.Done() - return - } + timer: time.NewTimer(dur), + dur: dur, } + go t.run() + return t } // Wait the duration again before firing. func (t *RepeatTimer) Reset() { - t.Stop() - - t.mtx.Lock() // Lock - defer t.mtx.Unlock() - - t.ticker = time.NewTicker(t.dur) - t.quit = make(chan struct{}) - t.wg.Add(1) - go t.fireRoutine(t.ticker) + t.input <- Reset } // For ease of .Stop()'ing services before .Start()'ing them, @@ -67,20 +53,55 @@ func (t *RepeatTimer) Stop() bool { if t == nil { return false } - t.mtx.Lock() // Lock - defer t.mtx.Unlock() + t.input <- RQuit + return true +} - exists := t.ticker != nil - if exists { - t.ticker.Stop() // does not close the channel +func (t *RepeatTimer) run() { + for { + fmt.Println("for") select { - case <-t.Ch: - // read off channel if there's anything there - default: + case cmd := <-t.input: + // stop goroutine if the input says so + // don't close channels, as closed channels mess up select reads + if t.processInput(cmd) { + t.timer.Stop() + return + } + case <-t.timer.C: + fmt.Println("tick") + // send if not blocked, then start the next tick + // for blocking send, just + // t.output <- time.Now() + t.trySend() + t.timer.Reset(t.dur) } - close(t.quit) - t.wg.Wait() // must wait for quit to close else we race Reset - t.ticker = nil } - return exists +} + +// trySend performs non-blocking send on t.Ch +func (t *RepeatTimer) trySend() { + // TODO: this was blocking in previous version (t.Ch <- t_) + // should I use that behavior unstead of unblocking as per throttle? + select { + case t.output <- time.Now(): + default: + } +} + +// all modifications of the internal state of ThrottleTimer +// happen in this method. It is only called from the run goroutine +// so we avoid any race conditions +func (t *RepeatTimer) processInput(cmd repeatCommand) (shutdown bool) { + fmt.Printf("process: %d\n", cmd) + switch cmd { + case Reset: + t.timer.Reset(t.dur) + case RQuit: + t.timer.Stop() + shutdown = true + default: + panic("unknown command!") + } + return shutdown } diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 87f34b950..d66cd3152 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -10,7 +10,7 @@ import ( ) type rCounter struct { - input chan time.Time + input <-chan time.Time mtx sync.Mutex count int } @@ -74,5 +74,5 @@ func TestRepeat(test *testing.T) { assert.Equal(6, c.Count()) // close channel to stop counter - close(t.Ch) + t.Stop() } diff --git a/common/throttle_timer.go b/common/throttle_timer.go index ab2ad2e62..c148d9904 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -13,7 +13,7 @@ at most once every "dur". type ThrottleTimer struct { Name string Ch <-chan struct{} - input chan command + input chan throttleCommand output chan<- struct{} dur time.Duration @@ -21,12 +21,12 @@ type ThrottleTimer struct { isSet bool } -type command int32 +type throttleCommand int32 const ( - Set command = iota + Set throttleCommand = iota Unset - Quit + TQuit ) // NewThrottleTimer creates a new ThrottleTimer. @@ -36,7 +36,7 @@ func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { Name: name, Ch: c, dur: dur, - input: make(chan command), + input: make(chan throttleCommand), output: c, timer: time.NewTimer(dur), } @@ -74,14 +74,14 @@ func (t *ThrottleTimer) trySend() { // all modifications of the internal state of ThrottleTimer // happen in this method. It is only called from the run goroutine // so we avoid any race conditions -func (t *ThrottleTimer) processInput(cmd command) (shutdown bool) { +func (t *ThrottleTimer) processInput(cmd throttleCommand) (shutdown bool) { switch cmd { case Set: if !t.isSet { t.isSet = true t.timer.Reset(t.dur) } - case Quit: + case TQuit: shutdown = true fallthrough case Unset: @@ -122,6 +122,6 @@ func (t *ThrottleTimer) Stop() bool { if t == nil { return false } - t.input <- Quit + t.input <- TQuit return true } From 8797197cdfc9920e2dbce274c8aba8c09b15f86f Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Thu, 7 Dec 2017 10:36:03 +0100 Subject: [PATCH 284/515] No more blocking on multiple Stop() --- common/repeat_timer.go | 33 +++++++++++++++++---------------- common/repeat_timer_test.go | 2 +- common/throttle_timer.go | 8 +++++--- common/throttle_timer_test.go | 5 +++++ 4 files changed, 28 insertions(+), 20 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 0f6501131..734c2d32a 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -16,8 +16,9 @@ type RepeatTimer struct { output chan<- time.Time input chan repeatCommand - dur time.Duration - timer *time.Timer + dur time.Duration + timer *time.Timer + stopped bool } type repeatCommand int32 @@ -50,43 +51,42 @@ func (t *RepeatTimer) Reset() { // For ease of .Stop()'ing services before .Start()'ing them, // we ignore .Stop()'s on nil RepeatTimers. func (t *RepeatTimer) Stop() bool { - if t == nil { + if t == nil || t.stopped { return false } t.input <- RQuit + t.stopped = true return true } func (t *RepeatTimer) run() { - for { - fmt.Println("for") + done := false + for !done { select { case cmd := <-t.input: // stop goroutine if the input says so // don't close channels, as closed channels mess up select reads - if t.processInput(cmd) { - t.timer.Stop() - return - } + done = t.processInput(cmd) case <-t.timer.C: - fmt.Println("tick") // send if not blocked, then start the next tick - // for blocking send, just - // t.output <- time.Now() t.trySend() t.timer.Reset(t.dur) } } + fmt.Println("end run") } // trySend performs non-blocking send on t.Ch func (t *RepeatTimer) trySend() { // TODO: this was blocking in previous version (t.Ch <- t_) // should I use that behavior unstead of unblocking as per throttle? - select { - case t.output <- time.Now(): - default: - } + + // select { + // case t.output <- time.Now(): + // default: + // } + + t.output <- time.Now() } // all modifications of the internal state of ThrottleTimer @@ -98,6 +98,7 @@ func (t *RepeatTimer) processInput(cmd repeatCommand) (shutdown bool) { case Reset: t.timer.Reset(t.dur) case RQuit: + fmt.Println("got quit") t.timer.Stop() shutdown = true default: diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index d66cd3152..15ca32c31 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -73,6 +73,6 @@ func TestRepeat(test *testing.T) { time.Sleep(delay(7)) assert.Equal(6, c.Count()) - // close channel to stop counter + // extra calls to stop don't block t.Stop() } diff --git a/common/throttle_timer.go b/common/throttle_timer.go index c148d9904..0e54f1027 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -17,8 +17,9 @@ type ThrottleTimer struct { output chan<- struct{} dur time.Duration - timer *time.Timer - isSet bool + timer *time.Timer + isSet bool + stopped bool } type throttleCommand int32 @@ -82,6 +83,7 @@ func (t *ThrottleTimer) processInput(cmd throttleCommand) (shutdown bool) { t.timer.Reset(t.dur) } case TQuit: + t.stopped = true shutdown = true fallthrough case Unset: @@ -119,7 +121,7 @@ func (t *ThrottleTimer) Unset() { // For ease of stopping services before starting them, we ignore Stop on nil // ThrottleTimers. func (t *ThrottleTimer) Stop() bool { - if t == nil { + if t == nil || t.stopped { return false } t.input <- TQuit diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index a1b6606f5..2a81bb02e 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -95,4 +95,9 @@ func TestThrottle(test *testing.T) { stopped := t.Stop() assert.True(stopped) + time.Sleep(longwait) + assert.Equal(5, c.Count()) + + // extra calls to stop don't block + t.Stop() } From cc7a87e27caa55ca84e984d1d081b09eeb16ffe6 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Thu, 7 Dec 2017 11:22:54 +0100 Subject: [PATCH 285/515] Use Ticker in Repeat again to avoid drift --- common/repeat_timer.go | 34 ++++++++++++++-------------------- common/repeat_timer_test.go | 6 +++--- 2 files changed, 17 insertions(+), 23 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 734c2d32a..b3eb107d2 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -1,7 +1,6 @@ package common import ( - "fmt" "time" ) @@ -17,7 +16,7 @@ type RepeatTimer struct { input chan repeatCommand dur time.Duration - timer *time.Timer + ticker *time.Ticker stopped bool } @@ -36,8 +35,8 @@ func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { output: c, input: make(chan repeatCommand), - timer: time.NewTimer(dur), - dur: dur, + dur: dur, + ticker: time.NewTicker(dur), } go t.run() return t @@ -51,6 +50,7 @@ func (t *RepeatTimer) Reset() { // For ease of .Stop()'ing services before .Start()'ing them, // we ignore .Stop()'s on nil RepeatTimers. func (t *RepeatTimer) Stop() bool { + // use t.stopped to gracefully handle many Stop() without blocking if t == nil || t.stopped { return false } @@ -67,39 +67,33 @@ func (t *RepeatTimer) run() { // stop goroutine if the input says so // don't close channels, as closed channels mess up select reads done = t.processInput(cmd) - case <-t.timer.C: - // send if not blocked, then start the next tick + case <-t.ticker.C: t.trySend() - t.timer.Reset(t.dur) } } - fmt.Println("end run") } // trySend performs non-blocking send on t.Ch func (t *RepeatTimer) trySend() { - // TODO: this was blocking in previous version (t.Ch <- t_) + // NOTE: this was blocking in previous version (t.Ch <- t_) // should I use that behavior unstead of unblocking as per throttle? - - // select { - // case t.output <- time.Now(): - // default: - // } - - t.output <- time.Now() + // probably not: https://golang.org/src/time/sleep.go#L132 + select { + case t.output <- time.Now(): + default: + } } // all modifications of the internal state of ThrottleTimer // happen in this method. It is only called from the run goroutine // so we avoid any race conditions func (t *RepeatTimer) processInput(cmd repeatCommand) (shutdown bool) { - fmt.Printf("process: %d\n", cmd) switch cmd { case Reset: - t.timer.Reset(t.dur) + t.ticker.Stop() + t.ticker = time.NewTicker(t.dur) case RQuit: - fmt.Println("got quit") - t.timer.Stop() + t.ticker.Stop() shutdown = true default: panic("unknown command!") diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 15ca32c31..db53aa614 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -39,11 +39,11 @@ func (c *rCounter) Read() { func TestRepeat(test *testing.T) { assert := asrt.New(test) - dur := time.Duration(50) * time.Millisecond + dur := time.Duration(100) * time.Millisecond short := time.Duration(20) * time.Millisecond // delay waits for cnt durations, an a little extra delay := func(cnt int) time.Duration { - return time.Duration(cnt)*dur + time.Duration(5)*time.Millisecond + return time.Duration(cnt)*dur + time.Duration(10)*time.Millisecond } t := NewRepeatTimer("bar", dur) @@ -70,7 +70,7 @@ func TestRepeat(test *testing.T) { // after a stop, nothing more is sent stopped := t.Stop() assert.True(stopped) - time.Sleep(delay(7)) + time.Sleep(delay(2)) assert.Equal(6, c.Count()) // extra calls to stop don't block From ec4adf21e0451f3fb7da33932d6cac168ddeaa93 Mon Sep 17 00:00:00 2001 From: Ethan Frey Date: Fri, 8 Dec 2017 10:07:04 +0100 Subject: [PATCH 286/515] Cleanup from PR comments --- common/repeat_timer.go | 20 ++++++++++---------- common/throttle_timer.go | 4 ++-- common/throttle_timer_test.go | 3 --- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index b3eb107d2..77f736034 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -20,7 +20,7 @@ type RepeatTimer struct { stopped bool } -type repeatCommand int32 +type repeatCommand int8 const ( Reset repeatCommand = iota @@ -67,21 +67,21 @@ func (t *RepeatTimer) run() { // stop goroutine if the input says so // don't close channels, as closed channels mess up select reads done = t.processInput(cmd) - case <-t.ticker.C: - t.trySend() + case tick := <-t.ticker.C: + t.trySend(tick) } } } // trySend performs non-blocking send on t.Ch -func (t *RepeatTimer) trySend() { +func (t *RepeatTimer) trySend(tick time.Time) { // NOTE: this was blocking in previous version (t.Ch <- t_) - // should I use that behavior unstead of unblocking as per throttle? - // probably not: https://golang.org/src/time/sleep.go#L132 - select { - case t.output <- time.Now(): - default: - } + // probably better not: https://golang.org/src/time/sleep.go#L132 + t.output <- tick + // select { + // case t.output <- tick: + // default: + // } } // all modifications of the internal state of ThrottleTimer diff --git a/common/throttle_timer.go b/common/throttle_timer.go index 0e54f1027..a5bd6ded8 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -22,7 +22,7 @@ type ThrottleTimer struct { stopped bool } -type throttleCommand int32 +type throttleCommand int8 const ( Set throttleCommand = iota @@ -83,7 +83,6 @@ func (t *ThrottleTimer) processInput(cmd throttleCommand) (shutdown bool) { t.timer.Reset(t.dur) } case TQuit: - t.stopped = true shutdown = true fallthrough case Unset: @@ -125,5 +124,6 @@ func (t *ThrottleTimer) Stop() bool { return false } t.input <- TQuit + t.stopped = true return true } diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index 2a81bb02e..94ec1b43c 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -95,9 +95,6 @@ func TestThrottle(test *testing.T) { stopped := t.Stop() assert.True(stopped) - time.Sleep(longwait) - assert.Equal(5, c.Count()) - // extra calls to stop don't block t.Stop() } From ff2fd63bf7db6373e5fb0c1d311c6a139b99dfe0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 8 Dec 2017 11:17:07 -0600 Subject: [PATCH 287/515] rename trySend to send --- common/repeat_timer.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 77f736034..23faf74ae 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -68,20 +68,20 @@ func (t *RepeatTimer) run() { // don't close channels, as closed channels mess up select reads done = t.processInput(cmd) case tick := <-t.ticker.C: - t.trySend(tick) + t.send(tick) } } } -// trySend performs non-blocking send on t.Ch -func (t *RepeatTimer) trySend(tick time.Time) { - // NOTE: this was blocking in previous version (t.Ch <- t_) - // probably better not: https://golang.org/src/time/sleep.go#L132 - t.output <- tick +// send performs blocking send on t.Ch +func (t *RepeatTimer) send(tick time.Time) { + // XXX: possibly it is better to not block: + // https://golang.org/src/time/sleep.go#L132 // select { // case t.output <- tick: // default: // } + t.output <- tick } // all modifications of the internal state of ThrottleTimer From 988e190ef745b93d41d3aabebeb9c4192ee8a2f1 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 9 Dec 2017 09:26:03 -0800 Subject: [PATCH 288/515] Deprecated Panic* --- common/errors.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/common/errors.go b/common/errors.go index 039342a67..4710b9ee0 100644 --- a/common/errors.go +++ b/common/errors.go @@ -22,6 +22,7 @@ func (se StackError) Error() string { // A panic resulting from a sanity check means there is a programmer error // and some guarantee is not satisfied. +// XXX DEPRECATED func PanicSanity(v interface{}) { panic(Fmt("Panicked on a Sanity Check: %v", v)) } @@ -29,17 +30,20 @@ func PanicSanity(v interface{}) { // A panic here means something has gone horribly wrong, in the form of data corruption or // failure of the operating system. In a correct/healthy system, these should never fire. // If they do, it's indicative of a much more serious problem. +// XXX DEPRECATED func PanicCrisis(v interface{}) { panic(Fmt("Panicked on a Crisis: %v", v)) } // Indicates a failure of consensus. Someone was malicious or something has // gone horribly wrong. These should really boot us into an "emergency-recover" mode +// XXX DEPRECATED func PanicConsensus(v interface{}) { panic(Fmt("Panicked on a Consensus Failure: %v", v)) } // For those times when we're not sure if we should panic +// XXX DEPRECATED func PanicQ(v interface{}) { panic(Fmt("Panicked questionably: %v", v)) } From cb4ba522ef643073c1b1ae372ef0a5e32078cb5f Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 9 Dec 2017 23:05:13 -0600 Subject: [PATCH 289/515] add String method to Query interface Required for https://github.com/tendermint/tendermint/issues/945 --- pubsub/pubsub.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 52b8361f8..27f15cbeb 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -38,6 +38,7 @@ type cmd struct { // Query defines an interface for a query to be used for subscribing. type Query interface { Matches(tags map[string]interface{}) bool + String() string } // Server allows clients to subscribe/unsubscribe for messages, publishing From e4ef2835f0081c2ece83b9c1f777cf071f956e81 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 9 Dec 2017 23:35:14 -0600 Subject: [PATCH 290/515] return error if client already subscribed --- pubsub/pubsub.go | 68 ++++++++++++++++++++++++++++++++++--------- pubsub/pubsub_test.go | 32 +++++++++++++++----- 2 files changed, 80 insertions(+), 20 deletions(-) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 27f15cbeb..54a4b8aed 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -13,6 +13,8 @@ package pubsub import ( "context" + "errors" + "sync" cmn "github.com/tendermint/tmlibs/common" ) @@ -48,6 +50,9 @@ type Server struct { cmds chan cmd cmdsCap int + + mtx sync.RWMutex + subscriptions map[string]map[string]struct{} // subscriber -> query -> struct{} } // Option sets a parameter for the server. @@ -57,7 +62,9 @@ type Option func(*Server) // for a detailed description of how to configure buffering. If no options are // provided, the resulting server's queue is unbuffered. func NewServer(options ...Option) *Server { - s := &Server{} + s := &Server{ + subscriptions: make(map[string]map[string]struct{}), + } s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) for _, option := range options { @@ -83,17 +90,33 @@ func BufferCapacity(cap int) Option { } // BufferCapacity returns capacity of the internal server's queue. -func (s Server) BufferCapacity() int { +func (s *Server) BufferCapacity() int { return s.cmdsCap } // Subscribe creates a subscription for the given client. It accepts a channel -// on which messages matching the given query can be received. If the -// subscription already exists, the old channel will be closed. An error will -// be returned to the caller if the context is canceled. +// on which messages matching the given query can be received. An error will be +// returned to the caller if the context is canceled or if subscription already +// exist for pair clientID and query. func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error { + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + _, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if ok { + return errors.New("already subscribed") + } + select { case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: + s.mtx.Lock() + if _, ok = s.subscriptions[clientID]; !ok { + s.subscriptions[clientID] = make(map[string]struct{}) + } + s.subscriptions[clientID][query.String()] = struct{}{} + s.mtx.Unlock() return nil case <-ctx.Done(): return ctx.Err() @@ -101,10 +124,24 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou } // Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled. +// returned to the caller if the context is canceled or if subscription does +// not exist. func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + _, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if !ok { + return errors.New("subscription not found") + } + select { case s.cmds <- cmd{op: unsub, clientID: clientID, query: query}: + s.mtx.Lock() + delete(clientSubscriptions, query.String()) + s.mtx.Unlock() return nil case <-ctx.Done(): return ctx.Err() @@ -112,10 +149,20 @@ func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) } // UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled. +// to the caller if the context is canceled or if subscription does not exist. func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.mtx.RLock() + _, ok := s.subscriptions[clientID] + s.mtx.RUnlock() + if !ok { + return errors.New("subscription not found") + } + select { case s.cmds <- cmd{op: unsub, clientID: clientID}: + s.mtx.Lock() + delete(s.subscriptions, clientID) + s.mtx.Unlock() return nil case <-ctx.Done(): return ctx.Err() @@ -187,13 +234,8 @@ loop: func (state *state) add(clientID string, q Query, ch chan<- interface{}) { // add query if needed - if clientToChannelMap, ok := state.queries[q]; !ok { + if _, ok := state.queries[q]; !ok { state.queries[q] = make(map[string]chan<- interface{}) - } else { - // check if already subscribed - if oldCh, ok := clientToChannelMap[clientID]; ok { - close(oldCh) - } } // create subscription diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 7bf7b41f7..84b6aa218 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -86,14 +86,11 @@ func TestClientSubscribesTwice(t *testing.T) { ch2 := make(chan interface{}, 1) err = s.Subscribe(ctx, clientID, q, ch2) - require.NoError(t, err) - - _, ok := <-ch1 - assert.False(t, ok) + require.Error(t, err) err = s.PublishWithTags(ctx, "Spider-Man", map[string]interface{}{"tm.events.type": "NewBlock"}) require.NoError(t, err) - assertReceive(t, "Spider-Man", ch2) + assertReceive(t, "Spider-Man", ch1) } func TestUnsubscribe(t *testing.T) { @@ -117,6 +114,27 @@ func TestUnsubscribe(t *testing.T) { assert.False(t, ok) } +func TestResubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.Empty{}) + require.NoError(t, err) + ch = make(chan interface{}) + err = s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + + err = s.Publish(ctx, "Cable") + require.NoError(t, err) + assertReceive(t, "Cable", ch) +} + func TestUnsubscribeAll(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) @@ -125,9 +143,9 @@ func TestUnsubscribeAll(t *testing.T) { ctx := context.Background() ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, query.Empty{}, ch1) + err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1) require.NoError(t, err) - err = s.Subscribe(ctx, clientID, query.Empty{}, ch2) + err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2) require.NoError(t, err) err = s.UnsubscribeAll(ctx, clientID) From 03dfb724c7d931fa76fc218c4dbac8056aacd752 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 10 Dec 2017 10:18:35 -0800 Subject: [PATCH 291/515] Change heap.Push to mean int priority --- common/heap.go | 45 ++++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/common/heap.go b/common/heap.go index 4a96d7aaa..f3f55a1c0 100644 --- a/common/heap.go +++ b/common/heap.go @@ -4,25 +4,21 @@ import ( "container/heap" ) -type Comparable interface { - Less(o interface{}) bool -} - -//----------------------------------------------------------------------------- - /* -Example usage: + Example usage: + + ``` h := NewHeap() - h.Push(String("msg1"), 1) - h.Push(String("msg3"), 3) - h.Push(String("msg2"), 2) + h.Push("msg1", 1) + h.Push("msg3", 3) + h.Push("msg2", 2) - fmt.Println(h.Pop()) - fmt.Println(h.Pop()) - fmt.Println(h.Pop()) + fmt.Println(h.Pop()) // msg1 + fmt.Println(h.Pop()) // msg2 + fmt.Println(h.Pop()) // msg3 + ``` */ - type Heap struct { pq priorityQueue } @@ -35,7 +31,11 @@ func (h *Heap) Len() int64 { return int64(len(h.pq)) } -func (h *Heap) Push(value interface{}, priority Comparable) { +func (h *Heap) Push(value interface{}, priority int) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) +} + +func (h *Heap) PushComparable(value interface{}, priority Comparable) { heap.Push(&h.pq, &pqItem{value: value, priority: priority}) } @@ -56,8 +56,6 @@ func (h *Heap) Pop() interface{} { } //----------------------------------------------------------------------------- - -/////////////////////// // From: http://golang.org/pkg/container/heap/#example__priorityQueue type pqItem struct { @@ -101,3 +99,16 @@ func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Compar item.priority = priority heap.Fix(pq, item.index) } + +//-------------------------------------------------------------------------------- +// Comparable + +type Comparable interface { + Less(o interface{}) bool +} + +type cmpInt int + +func (i cmpInt) Less(o interface{}) bool { + return int(i) < int(o.(cmpInt)) +} From a0b692c86d248a7203cab9f5361677bcf6fc11db Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 10 Dec 2017 14:23:27 -0800 Subject: [PATCH 292/515] Add PushBytes to Heap --- common/heap.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/common/heap.go b/common/heap.go index f3f55a1c0..b3bcb9db8 100644 --- a/common/heap.go +++ b/common/heap.go @@ -1,6 +1,7 @@ package common import ( + "bytes" "container/heap" ) @@ -35,6 +36,10 @@ func (h *Heap) Push(value interface{}, priority int) { heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) } +func (h *Heap) PushBytes(value interface{}, priority []byte) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)}) +} + func (h *Heap) PushComparable(value interface{}, priority Comparable) { heap.Push(&h.pq, &pqItem{value: value, priority: priority}) } @@ -112,3 +117,9 @@ type cmpInt int func (i cmpInt) Less(o interface{}) bool { return int(i) < int(o.(cmpInt)) } + +type cmpBytes []byte + +func (bz cmpBytes) Less(o interface{}) bool { + return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0 +} From f39b575503b80cf22753f70ddc2956925b7b1ac4 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Tue, 12 Dec 2017 16:55:41 +0000 Subject: [PATCH 293/515] remove deprecated --root flag --- cli/setup.go | 27 +++++++-------------------- cli/setup_test.go | 20 ++++++++++---------- 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 78151015b..295477598 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -14,7 +14,6 @@ import ( ) const ( - RootFlag = "root" HomeFlag = "home" TraceFlag = "trace" OutputFlag = "output" @@ -28,14 +27,9 @@ type Executable interface { } // PrepareBaseCmd is meant for tendermint and other servers -func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) Executor { +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { cobra.OnInitialize(func() { initEnv(envPrefix) }) - cmd.PersistentFlags().StringP(RootFlag, "r", defautRoot, "DEPRECATED. Use --home") - // -h is already reserved for --help as part of the cobra framework - // do you want to try something else?? - // also, default must be empty, so we can detect this unset and fall back - // to --root / TM_ROOT / TMROOT - cmd.PersistentFlags().String(HomeFlag, "", "root directory for config and data") + cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) return Executor{cmd, os.Exit} @@ -45,11 +39,11 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defautRoot string) Executor { // // This adds --encoding (hex, btc, base64) and --output (text, json) to // the command. These only really make sense in interactive commands. -func PrepareMainCmd(cmd *cobra.Command, envPrefix, defautRoot string) Executor { +func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") cmd.PersistentPreRunE = concatCobraCmdFuncs(setEncoding, validateOutput, cmd.PersistentPreRunE) - return PrepareBaseCmd(cmd, envPrefix, defautRoot) + return PrepareBaseCmd(cmd, envPrefix, defaultHome) } // initEnv sets to use ENV variables if set. @@ -136,17 +130,10 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { return err } - // rootDir is command line flag, env variable, or default $HOME/.tlc - // NOTE: we support both --root and --home for now, but eventually only --home - // Also ensure we set the correct rootDir under HomeFlag so we dont need to - // repeat this logic elsewhere. - rootDir := viper.GetString(HomeFlag) - if rootDir == "" { - rootDir = viper.GetString(RootFlag) - viper.Set(HomeFlag, rootDir) - } + homeDir := viper.GetString(HomeFlag) + viper.Set(HomeFlag, homeDir) viper.SetConfigName("config") // name of config file (without extension) - viper.AddConfigPath(rootDir) // search root directory + viper.AddConfigPath(homeDir) // search root directory // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { diff --git a/cli/setup_test.go b/cli/setup_test.go index 692da26d3..2f085f7d5 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -74,16 +74,16 @@ func TestSetupConfig(t *testing.T) { // setting on the command line {[]string{"--boo", "haha"}, nil, "haha", ""}, {[]string{"--two-words", "rocks"}, nil, "", "rocks"}, - {[]string{"--root", conf1}, nil, cval1, ""}, + {[]string{"--home", conf1}, nil, cval1, ""}, // test both variants of the prefix - {nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, - {nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, - {nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, - {nil, map[string]string{"RD_ROOT": conf1}, cval1, ""}, - {nil, map[string]string{"RDROOT": conf2}, cval2, "WORD"}, - {nil, map[string]string{"RDHOME": conf1}, cval1, ""}, + //{nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, + //{nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, + //{nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, + //{nil, map[string]string{"RD_ROOT": conf1}, cval1, ""}, + //{nil, map[string]string{"RDROOT": conf2}, cval2, "WORD"}, + //{nil, map[string]string{"RDHOME": conf1}, cval1, ""}, // and when both are set??? HOME wins every time! - {[]string{"--root", conf1}, map[string]string{"RDHOME": conf2}, cval2, "WORD"}, + {[]string{"--home", conf1}, map[string]string{"RDHOME": conf2}, cval2, "WORD"}, } for idx, tc := range cases { @@ -156,10 +156,10 @@ func TestSetupUnmarshal(t *testing.T) { {nil, nil, c("", 0)}, // setting on the command line {[]string{"--name", "haha"}, nil, c("haha", 0)}, - {[]string{"--root", conf1}, nil, c(cval1, 0)}, + {[]string{"--home", conf1}, nil, c(cval1, 0)}, // test both variants of the prefix {nil, map[string]string{"MR_AGE": "56"}, c("", 56)}, - {nil, map[string]string{"MR_ROOT": conf1}, c(cval1, 0)}, + //{nil, map[string]string{"MR_ROOT": conf1}, c(cval1, 0)}, {[]string{"--age", "17"}, map[string]string{"MRHOME": conf2}, c(cval2, 17)}, } From 50a30aafc18bfbd5890e4bab20633e843e173843 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 12:44:47 -0800 Subject: [PATCH 294/515] New canonical Iterator --- db/cache_db.go | 2 + db/db.go | 88 --------------------------------------- db/types.go | 111 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+), 88 deletions(-) create mode 100644 db/types.go diff --git a/db/cache_db.go b/db/cache_db.go index ed85e3057..586f2f679 100644 --- a/db/cache_db.go +++ b/db/cache_db.go @@ -1,3 +1,4 @@ +/* package db import ( @@ -229,3 +230,4 @@ func (cww *cwwMutex) TryWriteLock(version interface{}) bool { cww.written = new(int32) return true } +*/ diff --git a/db/db.go b/db/db.go index 08ebaeaac..ba137743b 100644 --- a/db/db.go +++ b/db/db.go @@ -1,93 +1,5 @@ package db -import . "github.com/tendermint/tmlibs/common" - -type DB interface { - Get([]byte) []byte // NOTE: returns nil iff never set or deleted. - Set([]byte, []byte) - SetSync([]byte, []byte) - Delete([]byte) - DeleteSync([]byte) - Close() - NewBatch() Batch - Iterator() Iterator - - // For debugging - Print() - - // Stats returns a map of property values for all keys and the size of the cache. - Stats() map[string]string - - // CacheDB wraps the DB w/ a cache. - CacheDB() CacheDB -} - -type CacheDB interface { - DB - Write() // Write to the underlying DB -} - -type SetDeleter interface { - Set(key, value []byte) - Delete(key []byte) -} - -type Batch interface { - SetDeleter - Write() -} - -/* - Usage: - - for itr.Seek(mykey); itr.Valid(); itr.Next() { - k, v := itr.Key(); itr.Value() - .... - } -*/ -type Iterator interface { - - // Seek moves the iterator the position of the key given or, if the key - // doesn't exist, the next key that does exist in the database. If the key - // doesn't exist, and there is no next key, the Iterator becomes invalid. - Seek(key []byte) - - // Valid returns false only when an Iterator has iterated past either the - // first or the last key in the database. - Valid() bool - - // Next moves the iterator to the next sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Next() - - // Prev moves the iterator to the previous sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Prev() - - // Key returns the key of the cursor. - // - // If Valid returns false, this method will panic. - Key() []byte - - // Value returns the key of the cursor. - // - // If Valid returns false, this method will panic. - Value() []byte - - // GetError returns an IteratorError from LevelDB if it had one during - // iteration. - // - // This method is safe to call when Valid returns false. - GetError() error - - // Close deallocates the given Iterator. - Close() -} - //----------------------------------------------------------------------------- // Main entry diff --git a/db/types.go b/db/types.go new file mode 100644 index 000000000..f343e1d72 --- /dev/null +++ b/db/types.go @@ -0,0 +1,111 @@ +package db + +type DB interface { + + // Get returns nil iff key doesn't exist. Panics on nil key. + Get([]byte) []byte + + // Has checks if a key exists. Panics on nil key. + Has(key []byte) bool + + // Set sets the key. Panics on nil key. + Set([]byte, []byte) + SetSync([]byte, []byte) + + // Delete deletes the key. Panics on nil key. + Delete([]byte) + DeleteSync([]byte) + + // Iterator over a domain of keys in ascending order. End is exclusive. + // Start must be less than end, or the Iterator is invalid. + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + Iterator(start, end []byte) Iterator + + // Iterator over a domain of keys in descending order. End is exclusive. + // Start must be greater than end, or the Iterator is invalid. + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + ReverseIterator(start, end []byte) Iterator + + // Releases the connection. + Close() + + // Creates a batch for atomic updates. + NewBatch() Batch + + // For debugging + Print() + + // Stats returns a map of property values for all keys and the size of the cache. + Stats() map[string]string +} + +//---------------------------------------- +// Batch + +type Batch interface { + SetDeleter + Write() +} + +type SetDeleter interface { + Set(key, value []byte) + Delete(key []byte) +} + +//---------------------------------------- + +/* + Usage: + + for itr.Seek(mykey); itr.Valid(); itr.Next() { + k, v := itr.Key(); itr.Value() + .... + } +*/ +type Iterator interface { + + // The start & end (exclusive) limits to iterate over. + // If end < start, then the Iterator goes in reverse order. + // + // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate + // over anything with the prefix []byte{12, 13}. + // + // The smallest key is the empty byte array []byte{}. + // The largest key is the nil byte array []byte(nil). + Domain() (start []byte, end []byte) + + // Valid returns whether the current position is valid. + // Once invalid, an Iterator is forever invalid. + Valid() bool + + // Next moves the iterator to the next sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Next() + + // Prev moves the iterator to the previous sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Prev() + + // Key returns the key of the cursor. + // + // If Valid returns false, this method will panic. + Key() []byte + + // Value returns the key of the cursor. + // + // If Valid returns false, this method will panic. + Value() []byte + + // GetError returns an IteratorError from LevelDB if it had one during + // iteration. + // + // This method is safe to call when Valid returns false. + GetError() error + + // Close deallocates the given Iterator. + Close() +} From 0d03cd9e31a1cebca1616a42acb65e307535223a Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 14:00:03 -0800 Subject: [PATCH 295/515] Make it compile --- db/c_level_db.go | 37 ++++---- db/cache_db.go | 233 ---------------------------------------------- db/db.go | 4 +- db/fsdb.go | 61 +++++++----- db/go_level_db.go | 42 ++++++--- db/mem_db.go | 53 ++++++----- db/types.go | 19 +++- db/util.go | 97 +++++-------------- 8 files changed, 161 insertions(+), 385 deletions(-) delete mode 100644 db/cache_db.go diff --git a/db/c_level_db.go b/db/c_level_db.go index e4450aaa6..e867b0004 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -22,8 +22,6 @@ type CLevelDB struct { ro *levigo.ReadOptions wo *levigo.WriteOptions woSync *levigo.WriteOptions - - cwwMutex } func NewCLevelDB(name string, dir string) (*CLevelDB, error) { @@ -45,8 +43,6 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { ro: ro, wo: wo, woSync: woSync, - - cwwMutex: NewCWWMutex(), } return database, nil } @@ -59,6 +55,10 @@ func (db *CLevelDB) Get(key []byte) []byte { return res } +func (db *CLevelDB) Has(key []byte) bool { + panic("not implemented yet") +} + func (db *CLevelDB) Set(key []byte, value []byte) { err := db.db.Put(db.wo, key, value) if err != nil { @@ -99,9 +99,9 @@ func (db *CLevelDB) Close() { } func (db *CLevelDB) Print() { - itr := db.Iterator() - defer itr.Close() - for itr.Seek(nil); itr.Valid(); itr.Next() { + itr := db.Iterator(BeginningKey(), EndingKey()) + defer itr.Release() + for ; itr.Valid(); itr.Next() { key := itr.Key() value := itr.Value() fmt.Printf("[%X]:\t[%X]\n", key, value) @@ -120,10 +120,6 @@ func (db *CLevelDB) Stats() map[string]string { return stats } -func (db *CLevelDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- // Batch @@ -155,10 +151,19 @@ func (mBatch *cLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *CLevelDB) Iterator() Iterator { - itr := db.db.NewIterator(db.ro) - itr.Seek([]byte{0x00}) - return cLevelDBIterator{itr} +func (db *CLevelDB) Iterator(start, end []byte) Iterator { + /* + XXX + itr := db.db.NewIterator(db.ro) + itr.Seek([]byte{0x00}) + return cLevelDBIterator{itr} + */ + return nil +} + +func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } type cLevelDBIterator struct { @@ -204,7 +209,7 @@ func (c cLevelDBIterator) Prev() { c.itr.Prev() } -func (c cLevelDBIterator) Close() { +func (c cLevelDBIterator) Release() { c.itr.Close() } diff --git a/db/cache_db.go b/db/cache_db.go deleted file mode 100644 index 586f2f679..000000000 --- a/db/cache_db.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -package db - -import ( - "fmt" - "sort" - "sync" - "sync/atomic" -) - -// If value is nil but deleted is false, -// it means the parent doesn't have the key. -// (No need to delete upon Write()) -type cDBValue struct { - value []byte - deleted bool - dirty bool -} - -// cacheDB wraps an in-memory cache around an underlying DB. -type cacheDB struct { - mtx sync.Mutex - cache map[string]cDBValue - parent DB - lockVersion interface{} - - cwwMutex -} - -// Needed by MultiStore.CacheWrap(). -var _ atomicSetDeleter = (*cacheDB)(nil) -var _ CacheDB = (*cacheDB)(nil) - -// Users should typically not be required to call NewCacheDB directly, as the -// DB implementations here provide a .CacheDB() function already. -// `lockVersion` is typically provided by parent.GetWriteLockVersion(). -func NewCacheDB(parent DB, lockVersion interface{}) CacheDB { - db := &cacheDB{ - cache: make(map[string]cDBValue), - parent: parent, - lockVersion: lockVersion, - cwwMutex: NewCWWMutex(), - } - return db -} - -func (db *cacheDB) Get(key []byte) []byte { - db.mtx.Lock() - defer db.mtx.Unlock() - - dbValue, ok := db.cache[string(key)] - if !ok { - data := db.parent.Get(key) - dbValue = cDBValue{value: data, deleted: false, dirty: false} - db.cache[string(key)] = dbValue - } - return dbValue.value -} - -func (db *cacheDB) Set(key []byte, value []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) -} - -func (db *cacheDB) SetSync(key []byte, value []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) -} - -func (db *cacheDB) SetNoLock(key []byte, value []byte) { - db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true} -} - -func (db *cacheDB) Delete(key []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) -} - -func (db *cacheDB) DeleteSync(key []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) -} - -func (db *cacheDB) DeleteNoLock(key []byte) { - db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true} -} - -func (db *cacheDB) Close() { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.parent.Close() -} - -func (db *cacheDB) Print() { - db.mtx.Lock() - defer db.mtx.Unlock() - - fmt.Println("cacheDB\ncache:") - for key, value := range db.cache { - fmt.Printf("[%X]:\t[%v]\n", []byte(key), value) - } - fmt.Println("\nparent:") - db.parent.Print() -} - -func (db *cacheDB) Stats() map[string]string { - db.mtx.Lock() - defer db.mtx.Unlock() - - stats := make(map[string]string) - stats["cache.size"] = fmt.Sprintf("%d", len(db.cache)) - stats["cache.lock_version"] = fmt.Sprintf("%v", db.lockVersion) - mergeStats(db.parent.Stats(), stats, "parent.") - return stats -} - -func (db *cacheDB) Iterator() Iterator { - panic("cacheDB.Iterator() not yet supported") -} - -func (db *cacheDB) NewBatch() Batch { - return &memBatch{db, nil} -} - -// Implements `atomicSetDeleter` for Batch support. -func (db *cacheDB) Mutex() *sync.Mutex { - return &(db.mtx) -} - -// Write writes pending updates to the parent database and clears the cache. -func (db *cacheDB) Write() { - db.mtx.Lock() - defer db.mtx.Unlock() - - // Optional sanity check to ensure that cacheDB is valid - if parent, ok := db.parent.(WriteLocker); ok { - if parent.TryWriteLock(db.lockVersion) { - // All good! - } else { - panic("cacheDB.Write() failed. Did this CacheDB expire?") - } - } - - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys := make([]string, 0, len(db.cache)) - for key, dbValue := range db.cache { - if dbValue.dirty { - keys = append(keys, key) - } - } - sort.Strings(keys) - - batch := db.parent.NewBatch() - for _, key := range keys { - dbValue := db.cache[key] - if dbValue.deleted { - batch.Delete([]byte(key)) - } else if dbValue.value == nil { - // Skip, it already doesn't exist in parent. - } else { - batch.Set([]byte(key), dbValue.value) - } - } - batch.Write() - - // Clear the cache - db.cache = make(map[string]cDBValue) -} - -//---------------------------------------- -// To cache-wrap this cacheDB further. - -func (db *cacheDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - -// If the parent parent DB implements this, (e.g. such as a cacheDB parent to a -// cacheDB child), cacheDB will call `parent.TryWriteLock()` before attempting -// to write. -type WriteLocker interface { - GetWriteLockVersion() (lockVersion interface{}) - TryWriteLock(lockVersion interface{}) bool -} - -// Implements TryWriteLocker. Embed this in DB structs if desired. -type cwwMutex struct { - mtx sync.Mutex - // CONTRACT: reading/writing to `*written` should use `atomic.*`. - // CONTRACT: replacing `written` with another *int32 should use `.mtx`. - written *int32 -} - -func NewCWWMutex() cwwMutex { - return cwwMutex{ - written: new(int32), - } -} - -func (cww *cwwMutex) GetWriteLockVersion() interface{} { - cww.mtx.Lock() - defer cww.mtx.Unlock() - - // `written` works as a "version" object because it gets replaced upon - // successful TryWriteLock. - return cww.written -} - -func (cww *cwwMutex) TryWriteLock(version interface{}) bool { - cww.mtx.Lock() - defer cww.mtx.Unlock() - - if version != cww.written { - return false // wrong "WriteLockVersion" - } - if !atomic.CompareAndSwapInt32(cww.written, 0, 1) { - return false // already written - } - - // New "WriteLockVersion" - cww.written = new(int32) - return true -} -*/ diff --git a/db/db.go b/db/db.go index ba137743b..7eec04d56 100644 --- a/db/db.go +++ b/db/db.go @@ -1,5 +1,7 @@ package db +import "fmt" + //----------------------------------------------------------------------------- // Main entry @@ -26,7 +28,7 @@ func registerDBCreator(backend string, creator dbCreator, force bool) { func NewDB(name string, backend string, dir string) DB { db, err := backends[backend](name, dir) if err != nil { - PanicSanity(Fmt("Error initializing DB: %v", err)) + panic(fmt.Sprintf("Error initializing DB: %v", err)) } return db } diff --git a/db/fsdb.go b/db/fsdb.go index 4b1914453..b6e08daf5 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -7,7 +7,6 @@ import ( "os" "path" "path/filepath" - "sort" "sync" "github.com/pkg/errors" @@ -29,8 +28,6 @@ func init() { type FSDB struct { mtx sync.Mutex dir string - - cwwMutex } func NewFSDB(dir string) *FSDB { @@ -39,8 +36,7 @@ func NewFSDB(dir string) *FSDB { panic(errors.Wrap(err, "Creating FSDB dir "+dir)) } database := &FSDB{ - dir: dir, - cwwMutex: NewCWWMutex(), + dir: dir, } return database } @@ -59,6 +55,20 @@ func (db *FSDB) Get(key []byte) []byte { return value } +func (db *FSDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + + path := db.nameToPath(key) + _, err := read(path) + if os.IsNotExist(err) { + return false + } else if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + } + return true +} + func (db *FSDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -140,27 +150,32 @@ func (db *FSDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *FSDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} +func (db *FSDB) Iterator(start, end []byte) Iterator { + /* + XXX + it := newMemDBIterator() + it.db = db + it.cur = 0 -func (db *FSDB) Iterator() Iterator { - it := newMemDBIterator() - it.db = db - it.cur = 0 + db.mtx.Lock() + defer db.mtx.Unlock() - db.mtx.Lock() - defer db.mtx.Unlock() + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + } + sort.Strings(keys) + it.keys = keys + return it + */ + return nil +} - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys, err := list(db.dir) - if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) - } - sort.Strings(keys) - it.keys = keys - return it +func (db *FSDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } func (db *FSDB) nameToPath(name []byte) string { diff --git a/db/go_level_db.go b/db/go_level_db.go index cffe7329c..e8ed99dee 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -22,8 +22,6 @@ func init() { type GoLevelDB struct { db *leveldb.DB - - cwwMutex } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { @@ -33,8 +31,7 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { return nil, err } database := &GoLevelDB{ - db: db, - cwwMutex: NewCWWMutex(), + db: db, } return database, nil } @@ -51,6 +48,18 @@ func (db *GoLevelDB) Get(key []byte) []byte { return res } +func (db *GoLevelDB) Has(key []byte) bool { + _, err := db.db.Get(key, nil) + if err != nil { + if err == errors.ErrNotFound { + return false + } else { + PanicCrisis(err) + } + } + return true +} + func (db *GoLevelDB) Set(key []byte, value []byte) { err := db.db.Put(key, value, nil) if err != nil { @@ -121,10 +130,6 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } -func (db *GoLevelDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- // Batch @@ -156,12 +161,21 @@ func (mBatch *goLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *GoLevelDB) Iterator() Iterator { - itr := &goLevelDBIterator{ - source: db.db.NewIterator(nil, nil), - } - itr.Seek(nil) - return itr +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + /* + XXX + itr := &goLevelDBIterator{ + source: db.db.NewIterator(nil, nil), + } + itr.Seek(nil) + return itr + */ + return nil +} + +func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } type goLevelDBIterator struct { diff --git a/db/mem_db.go b/db/mem_db.go index f5d55f3ae..3127030ae 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -3,7 +3,6 @@ package db import ( "bytes" "fmt" - "sort" "sync" ) @@ -16,14 +15,11 @@ func init() { type MemDB struct { mtx sync.Mutex db map[string][]byte - - cwwMutex } func NewMemDB() *MemDB { database := &MemDB{ - db: make(map[string][]byte), - cwwMutex: NewCWWMutex(), + db: make(map[string][]byte), } return database } @@ -35,6 +31,14 @@ func (db *MemDB) Get(key []byte) []byte { return db.db[string(key)] } +func (db *MemDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + + _, ok := db.db[string(key)] + return ok +} + func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -114,27 +118,32 @@ func (db *MemDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *MemDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- -func (db *MemDB) Iterator() Iterator { - it := newMemDBIterator() - it.db = db - it.cur = 0 +func (db *MemDB) Iterator(start, end []byte) Iterator { + /* + XXX + it := newMemDBIterator() + it.db = db + it.cur = 0 - db.mtx.Lock() - defer db.mtx.Unlock() + db.mtx.Lock() + defer db.mtx.Unlock() - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - for key, _ := range db.db { - it.keys = append(it.keys, key) - } - sort.Strings(it.keys) - return it + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + for key, _ := range db.db { + it.keys = append(it.keys, key) + } + sort.Strings(it.keys) + return it + */ + return nil +} + +func (db *MemDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } type memDBIterator struct { diff --git a/db/types.go b/db/types.go index f343e1d72..7422a5155 100644 --- a/db/types.go +++ b/db/types.go @@ -54,12 +54,23 @@ type SetDeleter interface { //---------------------------------------- +func BeginningKey() []byte { + return []byte{} +} + +func EndingKey() []byte { + return nil +} + /* Usage: - for itr.Seek(mykey); itr.Valid(); itr.Next() { + var itr Iterator = ... + defer itr.Release() + + for ; itr.Valid(); itr.Next() { k, v := itr.Key(); itr.Value() - .... + // ... } */ type Iterator interface { @@ -106,6 +117,6 @@ type Iterator interface { // This method is safe to call when Valid returns false. GetError() error - // Close deallocates the given Iterator. - Close() + // Release deallocates the given Iterator. + Release() } diff --git a/db/util.go b/db/util.go index 5f381a5be..89c777622 100644 --- a/db/util.go +++ b/db/util.go @@ -1,82 +1,35 @@ package db -import "bytes" - -// A wrapper around itr that tries to keep the iterator -// within the bounds as defined by `prefix` -type prefixIterator struct { - itr Iterator - prefix []byte - invalid bool -} - -func (pi *prefixIterator) Seek(key []byte) { - if !bytes.HasPrefix(key, pi.prefix) { - pi.invalid = true - return - } - pi.itr.Seek(key) - pi.checkInvalid() -} - -func (pi *prefixIterator) checkInvalid() { - if !pi.itr.Valid() { - pi.invalid = true - } -} - -func (pi *prefixIterator) Valid() bool { - if pi.invalid { - return false - } - key := pi.itr.Key() - ok := bytes.HasPrefix(key, pi.prefix) - if !ok { - pi.invalid = true - return false - } - return true -} - -func (pi *prefixIterator) Next() { - if pi.invalid { - panic("prefixIterator Next() called when invalid") +func IteratePrefix(db DB, prefix []byte) Iterator { + var start, end []byte + if len(prefix) == 0 { + start = BeginningKey() + end = EndingKey() + } else { + start = cp(prefix) + end = cpIncr(prefix) } - pi.itr.Next() - pi.checkInvalid() + return db.Iterator(start, end) } -func (pi *prefixIterator) Prev() { - if pi.invalid { - panic("prefixIterator Prev() called when invalid") - } - pi.itr.Prev() - pi.checkInvalid() -} +//---------------------------------------- -func (pi *prefixIterator) Key() []byte { - if pi.invalid { - panic("prefixIterator Key() called when invalid") - } - return pi.itr.Key() +func cp(bz []byte) (ret []byte) { + ret = make([]byte, len(bz)) + copy(ret, bz) + return ret } -func (pi *prefixIterator) Value() []byte { - if pi.invalid { - panic("prefixIterator Value() called when invalid") - } - return pi.itr.Value() -} - -func (pi *prefixIterator) Close() { pi.itr.Close() } -func (pi *prefixIterator) GetError() error { return pi.itr.GetError() } - -func IteratePrefix(db DB, prefix []byte) Iterator { - itr := db.Iterator() - pi := &prefixIterator{ - itr: itr, - prefix: prefix, +// CONTRACT: len(bz) > 0 +func cpIncr(bz []byte) (ret []byte) { + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i] += 1 + return + } else { + ret[i] = byte(0x00) + } } - pi.Seek(prefix) - return pi + return EndingKey() } From 7f650cea8673ee3169e51a41c1a8038e38c59ef4 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 14:45:31 -0800 Subject: [PATCH 296/515] Remove Prev from Iterator --- db/c_level_db.go | 7 ---- db/cache_db_test.go | 83 --------------------------------------------- db/common_test.go | 51 ++++------------------------ db/mem_db_test.go | 2 +- db/types.go | 13 ++++--- db/util_test.go | 82 +------------------------------------------- 6 files changed, 15 insertions(+), 223 deletions(-) delete mode 100644 db/cache_db_test.go diff --git a/db/c_level_db.go b/db/c_level_db.go index e867b0004..11a6e5ff7 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -202,13 +202,6 @@ func (c cLevelDBIterator) Next() { c.itr.Next() } -func (c cLevelDBIterator) Prev() { - if !c.itr.Valid() { - panic("cLevelDBIterator Prev() called when invalid") - } - c.itr.Prev() -} - func (c cLevelDBIterator) Release() { c.itr.Close() } diff --git a/db/cache_db_test.go b/db/cache_db_test.go deleted file mode 100644 index 2a2684fe2..000000000 --- a/db/cache_db_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package db - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func bz(s string) []byte { return []byte(s) } - -func TestCacheDB(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - - require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") - - mem.Set(bz("key1"), bz("value1")) - cdb.Set(bz("key1"), bz("value1")) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - - cdb.Set(bz("key1"), bz("value2")) - require.Equal(t, bz("value2"), cdb.Get(bz("key1"))) - require.Equal(t, bz("value1"), mem.Get(bz("key1"))) - - cdb.Write() - require.Equal(t, bz("value2"), mem.Get(bz("key1"))) - - require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail") - - cdb = mem.CacheDB() - cdb.Delete(bz("key1")) - require.Empty(t, cdb.Get(bz("key1"))) - require.Equal(t, mem.Get(bz("key1")), bz("value2")) - - cdb.Write() - require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") - require.Empty(t, mem.Get(bz("key1")), "Expected `key1` to be empty") -} - -func TestCacheDBWriteLock(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - require.NotPanics(t, func() { cdb.Write() }) - require.Panics(t, func() { cdb.Write() }) - cdb = mem.CacheDB() - require.NotPanics(t, func() { cdb.Write() }) - require.Panics(t, func() { cdb.Write() }) -} - -func TestCacheDBWriteLockNested(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - cdb2 := cdb.CacheDB() - require.NotPanics(t, func() { cdb2.Write() }) - require.Panics(t, func() { cdb2.Write() }) - cdb2 = cdb.CacheDB() - require.NotPanics(t, func() { cdb2.Write() }) - require.Panics(t, func() { cdb2.Write() }) -} - -func TestCacheDBNested(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - cdb.Set(bz("key1"), bz("value1")) - - require.Empty(t, mem.Get(bz("key1"))) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - cdb2 := cdb.CacheDB() - require.Equal(t, bz("value1"), cdb2.Get(bz("key1"))) - - cdb2.Set(bz("key1"), bz("VALUE2")) - require.Equal(t, []byte(nil), mem.Get(bz("key1"))) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - require.Equal(t, bz("VALUE2"), cdb2.Get(bz("key1"))) - - cdb2.Write() - require.Equal(t, []byte(nil), mem.Get(bz("key1"))) - require.Equal(t, bz("VALUE2"), cdb.Get(bz("key1"))) - - cdb.Write() - require.Equal(t, bz("VALUE2"), mem.Get(bz("key1"))) - -} diff --git a/db/common_test.go b/db/common_test.go index 505864c20..09fad8424 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -23,16 +23,6 @@ func checkNextPanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") } -func checkPrevPanics(t *testing.T, itr Iterator) { - assert.Panics(t, func() { itr.Prev() }, "checkPrevPanics expected panic but didn't") -} - -func checkPrev(t *testing.T, itr Iterator, expected bool) { - itr.Prev() - valid := itr.Valid() - assert.Equal(t, expected, valid) -} - func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { k, v := itr.Key(), itr.Value() assert.Exactly(t, key, k) @@ -44,7 +34,6 @@ func checkInvalid(t *testing.T, itr Iterator) { checkKeyPanics(t, itr) checkValuePanics(t, itr) checkNextPanics(t, itr) - checkPrevPanics(t, itr) } func checkKeyPanics(t *testing.T, itr Iterator) { @@ -67,7 +56,7 @@ func TestDBIteratorSingleKey(t *testing.T) { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) checkNext(t, itr, false) @@ -88,15 +77,12 @@ func TestDBIteratorTwoKeys(t *testing.T) { db.SetSync(bz("2"), bz("value_1")) { // Fail by calling Next too much - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) for i := 0; i < 10; i++ { checkNext(t, itr, true) checkValid(t, itr, true) - - checkPrev(t, itr, true) - checkValid(t, itr, true) } checkNext(t, itr, true) @@ -110,27 +96,6 @@ func TestDBIteratorTwoKeys(t *testing.T) { // Once invalid... checkInvalid(t, itr) } - - { // Fail by calling Prev too much - itr := db.Iterator() - checkValid(t, itr, true) - - for i := 0; i < 10; i++ { - checkNext(t, itr, true) - checkValid(t, itr, true) - - checkPrev(t, itr, true) - checkValid(t, itr, true) - } - - checkPrev(t, itr, false) - checkValid(t, itr, false) - - checkPrevPanics(t, itr) - - // Once invalid... - checkInvalid(t, itr) - } }) } } @@ -139,32 +104,30 @@ func TestDBIteratorEmpty(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkInvalid(t, itr) }) } } -func TestDBIteratorEmptySeek(t *testing.T) { +func TestDBIteratorEmptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator() - itr.Seek(bz("1")) + itr := db.Iterator(bz("1"), EndingKey()) checkInvalid(t, itr) }) } } -func TestDBIteratorBadSeek(t *testing.T) { +func TestDBIteratorNonemptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator() - itr.Seek(bz("2")) + itr := db.Iterator(bz("2"), EndingKey()) checkInvalid(t, itr) }) diff --git a/db/mem_db_test.go b/db/mem_db_test.go index b5c9167c8..42e242857 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -19,7 +19,7 @@ func TestMemDbIterator(t *testing.T) { db.Set(k, value) } - iter := db.Iterator() + iter := db.Iterator(BeginningKey(), EndingKey()) i := 0 for ; iter.Valid(); iter.Next() { assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") diff --git a/db/types.go b/db/types.go index 7422a5155..8306813c7 100644 --- a/db/types.go +++ b/db/types.go @@ -90,17 +90,11 @@ type Iterator interface { Valid() bool // Next moves the iterator to the next sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. + // defined by order of iteration. // // If Valid returns false, this method will panic. Next() - // Prev moves the iterator to the previous sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Prev() - // Key returns the key of the cursor. // // If Valid returns false, this method will panic. @@ -120,3 +114,8 @@ type Iterator interface { // Release deallocates the given Iterator. Release() } + +// For testing convenience. +func bz(s string) []byte { + return []byte(s) +} diff --git a/db/util_test.go b/db/util_test.go index 55a41bf5b..4f8b9c456 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -66,7 +66,6 @@ func TestPrefixIteratorMatches1N(t *testing.T) { db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/1")) checkValid(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) @@ -82,32 +81,6 @@ func TestPrefixIteratorMatches1N(t *testing.T) { } } -// Search for a/1, fail by too much Prev() -func TestPrefixIteratorMatches1P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/1")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - - // Bad! - checkPrev(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - // Search for a/2, fail by too much Next() func TestPrefixIteratorMatches2N(t *testing.T) { for backend, _ := range backends { @@ -116,41 +89,15 @@ func TestPrefixIteratorMatches2N(t *testing.T) { db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/2")) checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/2, fail by too much Prev() -func TestPrefixIteratorMatches2P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/2")) - checkValid(t, itr, true) checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) // Bad! - checkPrev(t, itr, false) + checkNext(t, itr, false) // Once invalid... checkInvalid(t, itr) @@ -166,11 +113,8 @@ func TestPrefixIteratorMatches3N(t *testing.T) { db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/3")) checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) checkNext(t, itr, true) checkItem(t, itr, bz("a/3"), bz("value_3")) @@ -183,27 +127,3 @@ func TestPrefixIteratorMatches3N(t *testing.T) { }) } } - -// Search for a/3, fail by too much Prev() -func TestPrefixIteratorMatches3P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/3")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - - // Bad! - checkPrev(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} From 781f6c5d228c34227dcd059673175ea86398218f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 18:04:05 -0500 Subject: [PATCH 297/515] db: some comments in types.go --- db/types.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/db/types.go b/db/types.go index 8306813c7..a6edbdd85 100644 --- a/db/types.go +++ b/db/types.go @@ -54,10 +54,12 @@ type SetDeleter interface { //---------------------------------------- +// BeginningKey is the smallest key. func BeginningKey() []byte { return []byte{} } +// EndingKey is the largest key. func EndingKey() []byte { return nil } @@ -81,8 +83,8 @@ type Iterator interface { // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate // over anything with the prefix []byte{12, 13}. // - // The smallest key is the empty byte array []byte{}. - // The largest key is the nil byte array []byte(nil). + // The smallest key is the empty byte array []byte{} - see BeginningKey(). + // The largest key is the nil byte array []byte(nil) - see EndingKey(). Domain() (start []byte, end []byte) // Valid returns whether the current position is valid. @@ -100,7 +102,7 @@ type Iterator interface { // If Valid returns false, this method will panic. Key() []byte - // Value returns the key of the cursor. + // Value returns the value of the cursor. // // If Valid returns false, this method will panic. Value() []byte From 5b7f90dfb258c4b26f8209b42181a814b6978eb1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 18:42:47 -0500 Subject: [PATCH 298/515] db: test panic on nil key --- db/backend_test.go | 59 +++++++++++++++++++++++++++++++++++++--------- db/fsdb.go | 8 +++++++ db/go_level_db.go | 6 +++++ db/mem_db.go | 14 ++++++----- db/types.go | 7 ++++++ 5 files changed, 77 insertions(+), 17 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index b4ffecdc6..b21ce0037 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -2,42 +2,79 @@ package db import ( "fmt" + "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" cmn "github.com/tendermint/tmlibs/common" ) -func testBackend(t *testing.T, backend string) { +func testBackendGetSetDelete(t *testing.T, backend string) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() db := NewDB("testdb", backend, dirname) require.Nil(t, db.Get([]byte(""))) - require.Nil(t, db.Get(nil)) // Set empty ("") db.Set([]byte(""), []byte("")) require.NotNil(t, db.Get([]byte(""))) - require.NotNil(t, db.Get(nil)) require.Empty(t, db.Get([]byte(""))) - require.Empty(t, db.Get(nil)) // Set empty (nil) db.Set([]byte(""), nil) require.NotNil(t, db.Get([]byte(""))) - require.NotNil(t, db.Get(nil)) require.Empty(t, db.Get([]byte(""))) - require.Empty(t, db.Get(nil)) // Delete db.Delete([]byte("")) require.Nil(t, db.Get([]byte(""))) - require.Nil(t, db.Get(nil)) } -func TestBackends(t *testing.T) { - testBackend(t, CLevelDBBackendStr) - testBackend(t, GoLevelDBBackendStr) - testBackend(t, MemDBBackendStr) +func TestBackendsGetSetDelete(t *testing.T) { + for dbType, _ := range backends { + if dbType == "fsdb" { + // TODO: handle + // fsdb cant deal with length 0 keys + continue + } + testBackendGetSetDelete(t, dbType) + } +} + +func assertPanics(t *testing.T, dbType, name string, fn func()) { + defer func() { + r := recover() + assert.NotNil(t, r, cmn.Fmt("expecting %s.%s to panic", dbType, name)) + }() + + fn() +} + +func TestBackendsNilKeys(t *testing.T) { + // test all backends + for dbType, creator := range backends { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db, err := creator(name, "") + assert.Nil(t, err) + defer os.RemoveAll(name) + + assertPanics(t, dbType, "get", func() { db.Get(nil) }) + assertPanics(t, dbType, "has", func() { db.Has(nil) }) + assertPanics(t, dbType, "set", func() { db.Set(nil, []byte("abc")) }) + assertPanics(t, dbType, "setsync", func() { db.SetSync(nil, []byte("abc")) }) + assertPanics(t, dbType, "delete", func() { db.Delete(nil) }) + assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) + + db.Close() + } +} + +func TestLevelDBBackendStr(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer os.RemoveAll(name) + _, ok := db.(*GoLevelDB) + assert.True(t, ok) } diff --git a/db/fsdb.go b/db/fsdb.go index b6e08daf5..19ea9fa3c 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -44,6 +44,7 @@ func NewFSDB(dir string) *FSDB { func (db *FSDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) path := db.nameToPath(key) value, err := read(path) @@ -58,6 +59,7 @@ func (db *FSDB) Get(key []byte) []byte { func (db *FSDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) path := db.nameToPath(key) _, err := read(path) @@ -72,6 +74,7 @@ func (db *FSDB) Has(key []byte) bool { func (db *FSDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.SetNoLock(key, value) } @@ -79,12 +82,14 @@ func (db *FSDB) Set(key []byte, value []byte) { func (db *FSDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.SetNoLock(key, value) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) SetNoLock(key []byte, value []byte) { + panicNilKey(key) if value == nil { value = []byte{} } @@ -98,6 +103,7 @@ func (db *FSDB) SetNoLock(key []byte, value []byte) { func (db *FSDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.DeleteNoLock(key) } @@ -105,12 +111,14 @@ func (db *FSDB) Delete(key []byte) { func (db *FSDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.DeleteNoLock(key) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) DeleteNoLock(key []byte) { + panicNilKey(key) err := remove(string(key)) if os.IsNotExist(err) { return diff --git a/db/go_level_db.go b/db/go_level_db.go index e8ed99dee..201a31949 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -37,6 +37,7 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { } func (db *GoLevelDB) Get(key []byte) []byte { + panicNilKey(key) res, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { @@ -49,6 +50,7 @@ func (db *GoLevelDB) Get(key []byte) []byte { } func (db *GoLevelDB) Has(key []byte) bool { + panicNilKey(key) _, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { @@ -61,6 +63,7 @@ func (db *GoLevelDB) Has(key []byte) bool { } func (db *GoLevelDB) Set(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(key, value, nil) if err != nil { PanicCrisis(err) @@ -68,6 +71,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { } func (db *GoLevelDB) SetSync(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -75,6 +79,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { } func (db *GoLevelDB) Delete(key []byte) { + panicNilKey(key) err := db.db.Delete(key, nil) if err != nil { PanicCrisis(err) @@ -82,6 +87,7 @@ func (db *GoLevelDB) Delete(key []byte) { } func (db *GoLevelDB) DeleteSync(key []byte) { + panicNilKey(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) diff --git a/db/mem_db.go b/db/mem_db.go index 3127030ae..ebeb2dded 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -27,14 +27,14 @@ func NewMemDB() *MemDB { func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) return db.db[string(key)] } func (db *MemDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) _, ok := db.db[string(key)] return ok } @@ -42,14 +42,14 @@ func (db *MemDB) Has(key []byte) bool { func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) db.SetNoLock(key, value) } func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) db.SetNoLock(key, value) } @@ -58,25 +58,27 @@ func (db *MemDB) SetNoLock(key []byte, value []byte) { if value == nil { value = []byte{} } + panicNilKey(key) db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) delete(db.db, string(key)) } func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) delete(db.db, string(key)) } // NOTE: Implements atomicSetDeleter func (db *MemDB) DeleteNoLock(key []byte) { + panicNilKey(key) delete(db.db, string(key)) } diff --git a/db/types.go b/db/types.go index a6edbdd85..54c1025a0 100644 --- a/db/types.go +++ b/db/types.go @@ -121,3 +121,10 @@ type Iterator interface { func bz(s string) []byte { return []byte(s) } + +// All DB funcs should panic on nil key. +func panicNilKey(key []byte) { + if key == nil { + panic("nil key") + } +} From c547caf04f17dee0390733fc9167e68975aecdb9 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 19:08:22 -0500 Subject: [PATCH 299/515] db: some test cleanup --- db/backend_test.go | 14 +++++++++----- db/c_level_db_test.go | 11 +++++++++++ db/db.go | 2 +- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index b21ce0037..3d10c66cb 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -58,7 +58,6 @@ func TestBackendsNilKeys(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db, err := creator(name, "") assert.Nil(t, err) - defer os.RemoveAll(name) assertPanics(t, dbType, "get", func() { db.Get(nil) }) assertPanics(t, dbType, "has", func() { db.Has(nil) }) @@ -68,13 +67,18 @@ func TestBackendsNilKeys(t *testing.T) { assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) db.Close() + err = os.RemoveAll(name + ".db") + assert.Nil(t, err) } } -func TestLevelDBBackendStr(t *testing.T) { +func TestGoLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db := NewDB(name, LevelDBBackendStr, "") - defer os.RemoveAll(name) - _, ok := db.(*GoLevelDB) - assert.True(t, ok) + defer os.RemoveAll(name + ".db") + + if _, ok := backends[CLevelDBBackendStr]; !ok { + _, ok := db.(*GoLevelDB) + assert.True(t, ok) + } } diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 864362332..606c34519 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -5,8 +5,10 @@ package db import ( "bytes" "fmt" + "os" "testing" + "github.com/stretchr/testify/assert" cmn "github.com/tendermint/tmlibs/common" ) @@ -84,3 +86,12 @@ func bytes2Int64(buf []byte) int64 { return int64(binary.BigEndian.Uint64(buf)) } */ + +func TestCLevelDBBackendStr(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer os.RemoveAll(name) + + _, ok := db.(*CLevelDB) + assert.True(t, ok) +} diff --git a/db/db.go b/db/db.go index 7eec04d56..b43b06554 100644 --- a/db/db.go +++ b/db/db.go @@ -6,7 +6,7 @@ import "fmt" // Main entry const ( - LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb. + LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb unless +gcc CLevelDBBackendStr = "cleveldb" GoLevelDBBackendStr = "goleveldb" MemDBBackendStr = "memdb" From ba8c5045b5c67df9fbba08974d0194b6e735cbd2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 19:22:08 -0500 Subject: [PATCH 300/515] db: fixes to fsdb and clevledb --- db/backend_test.go | 24 ++++++++++-------------- db/c_level_db.go | 6 ++++++ db/fsdb.go | 5 +++-- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 3d10c66cb..9dc17201a 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,30 +15,26 @@ func testBackendGetSetDelete(t *testing.T, backend string) { dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() db := NewDB("testdb", backend, dirname) - require.Nil(t, db.Get([]byte(""))) + key := []byte("abc") + require.Nil(t, db.Get(key)) // Set empty ("") - db.Set([]byte(""), []byte("")) - require.NotNil(t, db.Get([]byte(""))) - require.Empty(t, db.Get([]byte(""))) + db.Set(key, []byte("")) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) // Set empty (nil) - db.Set([]byte(""), nil) - require.NotNil(t, db.Get([]byte(""))) - require.Empty(t, db.Get([]byte(""))) + db.Set(key, nil) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) // Delete - db.Delete([]byte("")) - require.Nil(t, db.Get([]byte(""))) + db.Delete(key) + require.Nil(t, db.Get(key)) } func TestBackendsGetSetDelete(t *testing.T) { for dbType, _ := range backends { - if dbType == "fsdb" { - // TODO: handle - // fsdb cant deal with length 0 keys - continue - } testBackendGetSetDelete(t, dbType) } } diff --git a/db/c_level_db.go b/db/c_level_db.go index 11a6e5ff7..47e79dfa6 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -48,6 +48,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { } func (db *CLevelDB) Get(key []byte) []byte { + panicNilKey(key) res, err := db.db.Get(db.ro, key) if err != nil { panic(err) @@ -56,10 +57,12 @@ func (db *CLevelDB) Get(key []byte) []byte { } func (db *CLevelDB) Has(key []byte) bool { + panicNilKey(key) panic("not implemented yet") } func (db *CLevelDB) Set(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(db.wo, key, value) if err != nil { panic(err) @@ -67,6 +70,7 @@ func (db *CLevelDB) Set(key []byte, value []byte) { } func (db *CLevelDB) SetSync(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(db.woSync, key, value) if err != nil { panic(err) @@ -74,6 +78,7 @@ func (db *CLevelDB) SetSync(key []byte, value []byte) { } func (db *CLevelDB) Delete(key []byte) { + panicNilKey(key) err := db.db.Delete(db.wo, key) if err != nil { panic(err) @@ -81,6 +86,7 @@ func (db *CLevelDB) Delete(key []byte) { } func (db *CLevelDB) DeleteSync(key []byte) { + panicNilKey(key) err := db.db.Delete(db.woSync, key) if err != nil { panic(err) diff --git a/db/fsdb.go b/db/fsdb.go index 19ea9fa3c..116dc3eef 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -119,7 +119,8 @@ func (db *FSDB) DeleteSync(key []byte) { // NOTE: Implements atomicSetDeleter. func (db *FSDB) DeleteNoLock(key []byte) { panicNilKey(key) - err := remove(string(key)) + path := db.nameToPath(key) + err := remove(path) if os.IsNotExist(err) { return } else if err != nil { @@ -210,7 +211,7 @@ func read(path string) ([]byte, error) { // Write some bytes from a file. // CONTRACT: returns os errors directly without wrapping. func write(path string, d []byte) error { - f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, keyPerm) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm) if err != nil { return err } From bb115d4d615bdbe04e664b76346900151f83729e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 19:28:52 -0500 Subject: [PATCH 301/515] cleanupDBDir --- db/backend_test.go | 11 ++++++++--- db/c_level_db_test.go | 3 +-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 9dc17201a..16649cd20 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -3,6 +3,7 @@ package db import ( "fmt" "os" + "path" "testing" "github.com/stretchr/testify/assert" @@ -10,11 +11,16 @@ import ( cmn "github.com/tendermint/tmlibs/common" ) +func cleanupDBDir(dir, name string) { + os.RemoveAll(path.Join(dir, name) + ".db") +} + func testBackendGetSetDelete(t *testing.T, backend string) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() db := NewDB("testdb", backend, dirname) + key := []byte("abc") require.Nil(t, db.Get(key)) @@ -53,6 +59,7 @@ func TestBackendsNilKeys(t *testing.T) { for dbType, creator := range backends { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db, err := creator(name, "") + defer cleanupDBDir("", name) assert.Nil(t, err) assertPanics(t, dbType, "get", func() { db.Get(nil) }) @@ -63,15 +70,13 @@ func TestBackendsNilKeys(t *testing.T) { assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) db.Close() - err = os.RemoveAll(name + ".db") - assert.Nil(t, err) } } func TestGoLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db := NewDB(name, LevelDBBackendStr, "") - defer os.RemoveAll(name + ".db") + defer cleanupDBDir("", name) if _, ok := backends[CLevelDBBackendStr]; !ok { _, ok := db.(*GoLevelDB) diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 606c34519..89993fbac 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -5,7 +5,6 @@ package db import ( "bytes" "fmt" - "os" "testing" "github.com/stretchr/testify/assert" @@ -90,7 +89,7 @@ func bytes2Int64(buf []byte) int64 { func TestCLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db := NewDB(name, LevelDBBackendStr, "") - defer os.RemoveAll(name) + defer cleanupDBDir("", name) _, ok := db.(*CLevelDB) assert.True(t, ok) From 39e40ff5ce8dd496475db872426cd7d5860b2a05 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 20:06:50 -0500 Subject: [PATCH 302/515] db: memdb iterator --- db/mem_db.go | 88 ++++++++++++++++++++++++++++++----------------- db/mem_db_test.go | 2 +- 2 files changed, 57 insertions(+), 33 deletions(-) diff --git a/db/mem_db.go b/db/mem_db.go index ebeb2dded..84d14de98 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -3,6 +3,8 @@ package db import ( "bytes" "fmt" + "sort" + "strings" "sync" ) @@ -12,6 +14,8 @@ func init() { }, false) } +var _ DB = (*MemDB)(nil) + type MemDB struct { mtx sync.Mutex db map[string][]byte @@ -123,49 +127,67 @@ func (db *MemDB) Mutex() *sync.Mutex { //---------------------------------------- func (db *MemDB) Iterator(start, end []byte) Iterator { - /* - XXX - it := newMemDBIterator() - it.db = db - it.cur = 0 - - db.mtx.Lock() - defer db.mtx.Unlock() - - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - for key, _ := range db.db { - it.keys = append(it.keys, key) - } - sort.Strings(it.keys) - return it - */ - return nil + it := newMemDBIterator(db, start, end) + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + it.keys = db.getSortedKeys(start, end) + return it } func (db *MemDB) ReverseIterator(start, end []byte) Iterator { - // XXX + it := newMemDBIterator(db, start, end) + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + it.keys = db.getSortedKeys(end, start) + // reverse the order + l := len(it.keys) - 1 + for i, v := range it.keys { + it.keys[i] = it.keys[l-i] + it.keys[l-i] = v + } return nil } -type memDBIterator struct { - cur int - keys []string - db DB +func (db *MemDB) getSortedKeys(start, end []byte) []string { + keys := []string{} + for key, _ := range db.db { + leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 + rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 + if leftCondition && rightCondition { + keys = append(keys, key) + } + } + sort.Strings(keys) + return keys } -func newMemDBIterator() *memDBIterator { - return &memDBIterator{} +var _ Iterator = (*memDBIterator)(nil) + +type memDBIterator struct { + cur int + keys []string + db DB + start, end []byte } -func (it *memDBIterator) Seek(key []byte) { - for i, ik := range it.keys { - it.cur = i - if bytes.Compare(key, []byte(ik)) <= 0 { - return - } +func newMemDBIterator(db DB, start, end []byte) *memDBIterator { + return &memDBIterator{ + db: db, + start: start, + end: end, } - it.cur += 1 // If not found, becomes invalid. +} + +func (it *memDBIterator) Domain() ([]byte, []byte) { + return it.start, it.end } func (it *memDBIterator) Valid() bool { @@ -208,3 +230,5 @@ func (it *memDBIterator) Close() { func (it *memDBIterator) GetError() error { return nil } + +func (it *memDBIterator) Release() {} diff --git a/db/mem_db_test.go b/db/mem_db_test.go index 42e242857..a08a3679b 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMemDbIterator(t *testing.T) { +func TestMemDBIterator(t *testing.T) { db := NewMemDB() keys := make([][]byte, 100) for i := 0; i < 100; i++ { From 3e5dbef6a216a8fcd24ac72c6f65f793753d1a4b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 20:38:03 -0500 Subject: [PATCH 303/515] db: goleveldb iterator --- db/common_test.go | 5 ----- db/go_level_db.go | 37 ++++++++++++++++++++++--------------- db/util_test.go | 2 +- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/db/common_test.go b/db/common_test.go index 09fad8424..59c86a2c7 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -80,11 +80,6 @@ func TestDBIteratorTwoKeys(t *testing.T) { itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) - for i := 0; i < 10; i++ { - checkNext(t, itr, true) - checkValid(t, itr, true) - } - checkNext(t, itr, true) checkValid(t, itr, true) diff --git a/db/go_level_db.go b/db/go_level_db.go index 201a31949..45b437d2b 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -20,6 +20,8 @@ func init() { registerDBCreator(GoLevelDBBackendStr, dbCreator, false) } +var _ DB = (*GoLevelDB)(nil) + type GoLevelDB struct { db *leveldb.DB } @@ -168,15 +170,13 @@ func (mBatch *goLevelDBBatch) Write() { // Iterator func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - /* - XXX - itr := &goLevelDBIterator{ - source: db.db.NewIterator(nil, nil), - } - itr.Seek(nil) - return itr - */ - return nil + itr := &goLevelDBIterator{ + source: db.db.NewIterator(nil, nil), + start: start, + end: end, + } + itr.source.Seek(start) + return itr } func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { @@ -184,9 +184,16 @@ func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { return nil } +var _ Iterator = (*goLevelDBIterator)(nil) + type goLevelDBIterator struct { - source iterator.Iterator - invalid bool + source iterator.Iterator + invalid bool + start, end []byte +} + +func (it *goLevelDBIterator) Domain() ([]byte, []byte) { + return it.start, it.end } // Key returns a copy of the current key. @@ -217,10 +224,6 @@ func (it *goLevelDBIterator) GetError() error { return it.source.Error() } -func (it *goLevelDBIterator) Seek(key []byte) { - it.source.Seek(key) -} - func (it *goLevelDBIterator) Valid() bool { if it.invalid { return false @@ -246,3 +249,7 @@ func (it *goLevelDBIterator) Prev() { func (it *goLevelDBIterator) Close() { it.source.Release() } + +func (it *goLevelDBIterator) Release() { + it.source.Release() +} diff --git a/db/util_test.go b/db/util_test.go index 4f8b9c456..a0ce9cd51 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -50,7 +50,7 @@ func TestPrefixIteratorMatch3(t *testing.T) { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("3"), bz("value_3")) - itr := IteratePrefix(db, []byte("2")) + itr := IteratePrefix(db, []byte("4")) // Once invalid... checkInvalid(t, itr) From bcacaf164b8b79cc09ff2abec2ff4ec212315aba Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 20:58:35 -0500 Subject: [PATCH 304/515] db: cleveldb iterator --- db/c_level_db.go | 32 +++++++++++++++++++------------- db/common_test.go | 5 +++-- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 47e79dfa6..527fd7da3 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -17,6 +17,8 @@ func init() { registerDBCreator(CLevelDBBackendStr, dbCreator, false) } +var _ DB = (*CLevelDB)(nil) + type CLevelDB struct { db *levigo.DB ro *levigo.ReadOptions @@ -158,13 +160,17 @@ func (mBatch *cLevelDBBatch) Write() { // Iterator func (db *CLevelDB) Iterator(start, end []byte) Iterator { - /* - XXX - itr := db.db.NewIterator(db.ro) - itr.Seek([]byte{0x00}) - return cLevelDBIterator{itr} - */ - return nil + itr := db.db.NewIterator(db.ro) + if len(start) > 0 { + itr.Seek(start) + } else { + itr.SeekToFirst() + } + return cLevelDBIterator{ + itr: itr, + start: start, + end: end, + } } func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { @@ -172,15 +178,15 @@ func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { return nil } +var _ Iterator = (*cLevelDBIterator)(nil) + type cLevelDBIterator struct { - itr *levigo.Iterator + itr *levigo.Iterator + start, end []byte } -func (c cLevelDBIterator) Seek(key []byte) { - if key == nil { - key = []byte{0x00} - } - c.itr.Seek(key) +func (c cLevelDBIterator) Domain() ([]byte, []byte) { + return c.start, c.end } func (c cLevelDBIterator) Valid() bool { diff --git a/db/common_test.go b/db/common_test.go index 59c86a2c7..6b3009795 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tmlibs/common" ) func checkValid(t *testing.T, itr Iterator, expected bool) { valid := itr.Valid() - assert.Equal(t, expected, valid) + require.Equal(t, expected, valid) } func checkNext(t *testing.T, itr Iterator, expected bool) { itr.Next() valid := itr.Valid() - assert.Equal(t, expected, valid) + require.Equal(t, expected, valid) } func checkNextPanics(t *testing.T, itr Iterator) { From edf07760d6f45663a992cf8a0978521084a6c597 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 21:08:38 -0500 Subject: [PATCH 305/515] db: fsdb iterator --- db/fsdb.go | 48 ++++++++++++++++++++++++------------------------ db/mem_db.go | 6 +----- db/util.go | 11 +++++++++++ 3 files changed, 36 insertions(+), 29 deletions(-) diff --git a/db/fsdb.go b/db/fsdb.go index 116dc3eef..ac9cdd741 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -7,6 +7,7 @@ import ( "os" "path" "path/filepath" + "sort" "sync" "github.com/pkg/errors" @@ -24,6 +25,8 @@ func init() { }, false) } +var _ DB = (*FSDB)(nil) + // It's slow. type FSDB struct { mtx sync.Mutex @@ -160,26 +163,20 @@ func (db *FSDB) Mutex() *sync.Mutex { } func (db *FSDB) Iterator(start, end []byte) Iterator { - /* - XXX - it := newMemDBIterator() - it.db = db - it.cur = 0 - - db.mtx.Lock() - defer db.mtx.Unlock() - - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys, err := list(db.dir) - if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) - } - sort.Strings(keys) - it.keys = keys - return it - */ - return nil + it := newMemDBIterator(db, start, end) + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir, start, end) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + } + sort.Strings(keys) + it.keys = keys + return it } func (db *FSDB) ReverseIterator(start, end []byte) Iterator { @@ -233,7 +230,7 @@ func remove(path string) error { // List files of a path. // Paths will NOT include dir as the prefix. // CONTRACT: returns os errors directly without wrapping. -func list(dirPath string) (paths []string, err error) { +func list(dirPath string, start, end []byte) ([]string, error) { dir, err := os.Open(dirPath) if err != nil { return nil, err @@ -244,12 +241,15 @@ func list(dirPath string) (paths []string, err error) { if err != nil { return nil, err } - for i, name := range names { + var paths []string + for _, name := range names { n, err := url.PathUnescape(name) if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - names[i] = n + if checkKeyCondition(n, start, end) { + paths = append(paths, n) + } } - return names, nil + return paths, nil } diff --git a/db/mem_db.go b/db/mem_db.go index 84d14de98..a9f21d526 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -1,10 +1,8 @@ package db import ( - "bytes" "fmt" "sort" - "strings" "sync" ) @@ -159,9 +157,7 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { func (db *MemDB) getSortedKeys(start, end []byte) []string { keys := []string{} for key, _ := range db.db { - leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 - rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 - if leftCondition && rightCondition { + if checkKeyCondition(key, start, end) { keys = append(keys, key) } } diff --git a/db/util.go b/db/util.go index 89c777622..02f4a52f0 100644 --- a/db/util.go +++ b/db/util.go @@ -1,5 +1,10 @@ package db +import ( + "bytes" + "strings" +) + func IteratePrefix(db DB, prefix []byte) Iterator { var start, end []byte if len(prefix) == 0 { @@ -33,3 +38,9 @@ func cpIncr(bz []byte) (ret []byte) { } return EndingKey() } + +func checkKeyCondition(key string, start, end []byte) bool { + leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 + rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 + return leftCondition && rightCondition +} From 5636a02d035258701974da39c62d13c1d76f8ae8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 19:19:26 -0800 Subject: [PATCH 306/515] Remove GetError() from Iterator --- db/c_level_db.go | 7 +++++-- db/go_level_db.go | 11 +++++++---- db/mem_db.go | 4 ---- db/types.go | 6 ------ 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 527fd7da3..93bc1dfb7 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -190,6 +190,7 @@ func (c cLevelDBIterator) Domain() ([]byte, []byte) { } func (c cLevelDBIterator) Valid() bool { + c.assertNoError() return c.itr.Valid() } @@ -218,6 +219,8 @@ func (c cLevelDBIterator) Release() { c.itr.Close() } -func (c cLevelDBIterator) GetError() error { - return c.itr.GetError() +func (c cLevelDBIterator) assertNoError() { + if err := c.itr.GetError(); err != nil { + panic(err) + } } diff --git a/db/go_level_db.go b/db/go_level_db.go index 45b437d2b..89015547a 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -220,11 +220,8 @@ func (it *goLevelDBIterator) Value() []byte { return v } -func (it *goLevelDBIterator) GetError() error { - return it.source.Error() -} - func (it *goLevelDBIterator) Valid() bool { + it.assertNoError() if it.invalid { return false } @@ -253,3 +250,9 @@ func (it *goLevelDBIterator) Close() { func (it *goLevelDBIterator) Release() { it.source.Release() } + +func (it *goLevelDBIterator) assertNoError() { + if err := it.source.Error(); err != nil { + panic(err) + } +} diff --git a/db/mem_db.go b/db/mem_db.go index a9f21d526..81e209648 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -223,8 +223,4 @@ func (it *memDBIterator) Close() { it.keys = nil } -func (it *memDBIterator) GetError() error { - return nil -} - func (it *memDBIterator) Release() {} diff --git a/db/types.go b/db/types.go index 54c1025a0..8370ff2da 100644 --- a/db/types.go +++ b/db/types.go @@ -107,12 +107,6 @@ type Iterator interface { // If Valid returns false, this method will panic. Value() []byte - // GetError returns an IteratorError from LevelDB if it had one during - // iteration. - // - // This method is safe to call when Valid returns false. - GetError() error - // Release deallocates the given Iterator. Release() } From 541780c6dff65a2d3554ac297ae2c7e61d8217f6 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 12 Dec 2017 23:23:49 -0600 Subject: [PATCH 307/515] uncomment tests --- cli/setup_test.go | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/cli/setup_test.go b/cli/setup_test.go index 2f085f7d5..e0fd75d8a 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -57,12 +57,9 @@ func TestSetupEnv(t *testing.T) { func TestSetupConfig(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. - cval1, cval2 := "fubble", "wubble" + cval1 := "fubble" conf1, err := WriteDemoConfig(map[string]string{"boo": cval1}) require.Nil(t, err) - // make sure it handles dashed-words in the config, and ignores random info - conf2, err := WriteDemoConfig(map[string]string{"boo": cval2, "foo": "bar", "two-words": "WORD"}) - require.Nil(t, err) cases := []struct { args []string @@ -76,14 +73,11 @@ func TestSetupConfig(t *testing.T) { {[]string{"--two-words", "rocks"}, nil, "", "rocks"}, {[]string{"--home", conf1}, nil, cval1, ""}, // test both variants of the prefix - //{nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, - //{nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, - //{nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, - //{nil, map[string]string{"RD_ROOT": conf1}, cval1, ""}, - //{nil, map[string]string{"RDROOT": conf2}, cval2, "WORD"}, - //{nil, map[string]string{"RDHOME": conf1}, cval1, ""}, - // and when both are set??? HOME wins every time! - {[]string{"--home", conf1}, map[string]string{"RDHOME": conf2}, cval2, "WORD"}, + {nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, + {nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RD_HOME": conf1}, cval1, ""}, + {nil, map[string]string{"RDHOME": conf1}, cval1, ""}, } for idx, tc := range cases { @@ -159,7 +153,7 @@ func TestSetupUnmarshal(t *testing.T) { {[]string{"--home", conf1}, nil, c(cval1, 0)}, // test both variants of the prefix {nil, map[string]string{"MR_AGE": "56"}, c("", 56)}, - //{nil, map[string]string{"MR_ROOT": conf1}, c(cval1, 0)}, + {nil, map[string]string{"MR_HOME": conf1}, c(cval1, 0)}, {[]string{"--age", "17"}, map[string]string{"MRHOME": conf2}, c(cval2, 17)}, } From 318982c0babe627c7dda57e23a1eae2bf0d2c1bf Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 13 Dec 2017 01:33:38 -0500 Subject: [PATCH 308/515] checkKeyCondition -> IsKeyInDomain --- db/fsdb.go | 2 +- db/mem_db.go | 2 +- db/util.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/db/fsdb.go b/db/fsdb.go index ac9cdd741..8a40d4f19 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -247,7 +247,7 @@ func list(dirPath string, start, end []byte) ([]string, error) { if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - if checkKeyCondition(n, start, end) { + if IsKeyInDomain(n, start, end) { paths = append(paths, n) } } diff --git a/db/mem_db.go b/db/mem_db.go index 81e209648..d20d0e7ea 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -157,7 +157,7 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { func (db *MemDB) getSortedKeys(start, end []byte) []string { keys := []string{} for key, _ := range db.db { - if checkKeyCondition(key, start, end) { + if IsKeyInDomain(key, start, end) { keys = append(keys, key) } } diff --git a/db/util.go b/db/util.go index 02f4a52f0..203ddcfaf 100644 --- a/db/util.go +++ b/db/util.go @@ -39,7 +39,7 @@ func cpIncr(bz []byte) (ret []byte) { return EndingKey() } -func checkKeyCondition(key string, start, end []byte) bool { +func IsKeyInDomain(key string, start, end []byte) bool { leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 return leftCondition && rightCondition From d4aeca8ce30c1cdf84f45e5160240d292fb848bb Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 13 Dec 2017 20:11:11 -0500 Subject: [PATCH 309/515] fixes from @melekes --- db/backend_test.go | 26 +++++++++----------------- db/c_level_db.go | 4 ++-- db/fsdb.go | 13 ++++++------- db/go_level_db.go | 16 ++++++++++------ 4 files changed, 27 insertions(+), 32 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 16649cd20..7ead549b0 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -3,7 +3,7 @@ package db import ( "fmt" "os" - "path" + "path/filepath" "testing" "github.com/stretchr/testify/assert" @@ -12,7 +12,7 @@ import ( ) func cleanupDBDir(dir, name string) { - os.RemoveAll(path.Join(dir, name) + ".db") + os.RemoveAll(filepath.Join(dir, name) + ".db") } func testBackendGetSetDelete(t *testing.T, backend string) { @@ -45,15 +45,6 @@ func TestBackendsGetSetDelete(t *testing.T) { } } -func assertPanics(t *testing.T, dbType, name string, fn func()) { - defer func() { - r := recover() - assert.NotNil(t, r, cmn.Fmt("expecting %s.%s to panic", dbType, name)) - }() - - fn() -} - func TestBackendsNilKeys(t *testing.T) { // test all backends for dbType, creator := range backends { @@ -62,12 +53,13 @@ func TestBackendsNilKeys(t *testing.T) { defer cleanupDBDir("", name) assert.Nil(t, err) - assertPanics(t, dbType, "get", func() { db.Get(nil) }) - assertPanics(t, dbType, "has", func() { db.Has(nil) }) - assertPanics(t, dbType, "set", func() { db.Set(nil, []byte("abc")) }) - assertPanics(t, dbType, "setsync", func() { db.SetSync(nil, []byte("abc")) }) - assertPanics(t, dbType, "delete", func() { db.Delete(nil) }) - assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) + panicMsg := "expecting %s.%s to panic" + assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") + assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") + assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") + assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") + assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") + assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") db.Close() } diff --git a/db/c_level_db.go b/db/c_level_db.go index 93bc1dfb7..ff8347cc7 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -4,7 +4,7 @@ package db import ( "fmt" - "path" + "path/filepath" "github.com/jmhodges/levigo" ) @@ -27,7 +27,7 @@ type CLevelDB struct { } func NewCLevelDB(name string, dir string) (*CLevelDB, error) { - dbPath := path.Join(dir, name+".db") + dbPath := filepath.Join(dir, name+".db") opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1 << 30)) diff --git a/db/fsdb.go b/db/fsdb.go index 8a40d4f19..85adae630 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "net/url" "os" - "path" "path/filepath" "sort" "sync" @@ -54,7 +53,7 @@ func (db *FSDB) Get(key []byte) []byte { if os.IsNotExist(err) { return nil } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) } return value } @@ -69,7 +68,7 @@ func (db *FSDB) Has(key []byte) bool { if os.IsNotExist(err) { return false } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) } return true } @@ -99,7 +98,7 @@ func (db *FSDB) SetNoLock(key []byte, value []byte) { path := db.nameToPath(key) err := write(path, value) if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Setting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key)) } } @@ -127,7 +126,7 @@ func (db *FSDB) DeleteNoLock(key []byte) { if os.IsNotExist(err) { return } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Removing key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key)) } } @@ -172,7 +171,7 @@ func (db *FSDB) Iterator(start, end []byte) Iterator { // Not the best, but probably not a bottleneck depending. keys, err := list(db.dir, start, end) if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) } sort.Strings(keys) it.keys = keys @@ -186,7 +185,7 @@ func (db *FSDB) ReverseIterator(start, end []byte) Iterator { func (db *FSDB) nameToPath(name []byte) string { n := url.PathEscape(string(name)) - return path.Join(db.dir, n) + return filepath.Join(db.dir, n) } // Read some bytes to a file. diff --git a/db/go_level_db.go b/db/go_level_db.go index 89015547a..d741c6904 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -2,7 +2,7 @@ package db import ( "fmt" - "path" + "path/filepath" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" @@ -27,7 +27,7 @@ type GoLevelDB struct { } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { - dbPath := path.Join(dir, name+".db") + dbPath := filepath.Join(dir, name+".db") db, err := leveldb.OpenFile(dbPath, nil) if err != nil { return nil, err @@ -170,13 +170,17 @@ func (mBatch *goLevelDBBatch) Write() { // Iterator func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itr := &goLevelDBIterator{ - source: db.db.NewIterator(nil, nil), + itr := db.db.NewIterator(nil, nil) + if len(start) > 0 { + itr.Seek(start) + } else { + itr.First() + } + return &goLevelDBIterator{ + source: itr, start: start, end: end, } - itr.source.Seek(start) - return itr } func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { From a2f7898b6d89b2d2ef9563f622a7ddee8b193a13 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 13 Dec 2017 22:28:37 -0500 Subject: [PATCH 310/515] db: fix c and go iterators --- db/c_level_db.go | 40 +++++++++++++++++-------- db/go_level_db.go | 24 +++++++++++---- db/util_test.go | 75 +++++++++++++---------------------------------- 3 files changed, 66 insertions(+), 73 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index ff8347cc7..8e2a9372d 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -3,6 +3,7 @@ package db import ( + "bytes" "fmt" "path/filepath" @@ -166,7 +167,7 @@ func (db *CLevelDB) Iterator(start, end []byte) Iterator { } else { itr.SeekToFirst() } - return cLevelDBIterator{ + return &cLevelDBIterator{ itr: itr, start: start, end: end, @@ -183,43 +184,58 @@ var _ Iterator = (*cLevelDBIterator)(nil) type cLevelDBIterator struct { itr *levigo.Iterator start, end []byte + invalid bool } -func (c cLevelDBIterator) Domain() ([]byte, []byte) { +func (c *cLevelDBIterator) Domain() ([]byte, []byte) { return c.start, c.end } -func (c cLevelDBIterator) Valid() bool { +func (c *cLevelDBIterator) Valid() bool { c.assertNoError() - return c.itr.Valid() + if c.invalid { + return false + } + c.invalid = !c.itr.Valid() + return !c.invalid } -func (c cLevelDBIterator) Key() []byte { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Key() []byte { + if !c.Valid() { panic("cLevelDBIterator Key() called when invalid") } return c.itr.Key() } -func (c cLevelDBIterator) Value() []byte { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Value() []byte { + if !c.Valid() { panic("cLevelDBIterator Value() called when invalid") } return c.itr.Value() } -func (c cLevelDBIterator) Next() { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Next() { + if !c.Valid() { panic("cLevelDBIterator Next() called when invalid") } c.itr.Next() + c.checkEndKey() // if we've exceeded the range, we're now invalid +} + +// levigo has no upper bound when iterating, so need to check ourselves +func (c *cLevelDBIterator) checkEndKey() []byte { + key := c.itr.Key() + if c.end != nil && bytes.Compare(key, c.end) > 0 { + c.invalid = true + } + return key } -func (c cLevelDBIterator) Release() { +func (c *cLevelDBIterator) Release() { c.itr.Close() } -func (c cLevelDBIterator) assertNoError() { +func (c *cLevelDBIterator) assertNoError() { if err := c.itr.GetError(); err != nil { panic(err) } diff --git a/db/go_level_db.go b/db/go_level_db.go index d741c6904..0d24020e0 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -8,6 +8,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -169,13 +170,24 @@ func (mBatch *goLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itr := db.db.NewIterator(nil, nil) - if len(start) > 0 { - itr.Seek(start) - } else { - itr.First() +// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +func goLevelDBIterRange(start, end []byte) *util.Range { + // XXX: what if start == nil ? + if len(start) == 0 { + start = nil + } + return &util.Range{ + Start: start, + Limit: end, } +} + +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + itrRange := goLevelDBIterRange(start, end) + itr := db.db.NewIterator(itrRange, nil) + itr.Seek(start) // if we don't call this the itr is never valid (?!) return &goLevelDBIterator{ source: itr, start: start, diff --git a/db/util_test.go b/db/util_test.go index a0ce9cd51..b273f8d46 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -5,6 +5,7 @@ import ( "testing" ) +// empty iterator for empty db func TestPrefixIteratorNoMatchNil(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -16,6 +17,7 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { } } +// empty iterator for db populated after iterator created func TestPrefixIteratorNoMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -28,24 +30,8 @@ func TestPrefixIteratorNoMatch1(t *testing.T) { } } -func TestPrefixIteratorMatch2(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("2"), bz("value_2")) - itr := IteratePrefix(db, []byte("2")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("2"), bz("value_2")) - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -func TestPrefixIteratorMatch3(t *testing.T) { +// empty iterator for prefix starting above db entry +func TestPrefixIteratorNoMatch2(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) @@ -58,21 +44,16 @@ func TestPrefixIteratorMatch3(t *testing.T) { } } -// Search for a/1, fail by too much Next() -func TestPrefixIteratorMatches1N(t *testing.T) { +// iterator with single val for db with single val, starting from that val +func TestPrefixIteratorMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) + db.SetSync(bz("2"), bz("value_2")) + itr := IteratePrefix(db, bz("2")) checkValid(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! + checkItem(t, itr, bz("2"), bz("value_2")) checkNext(t, itr, false) // Once invalid... @@ -81,38 +62,22 @@ func TestPrefixIteratorMatches1N(t *testing.T) { } } -// Search for a/2, fail by too much Next() -func TestPrefixIteratorMatches2N(t *testing.T) { +// iterator with prefix iterates over everything with same prefix +func TestPrefixIteratorMatches1N(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} -// Search for a/3, fail by too much Next() -func TestPrefixIteratorMatches3N(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + // prefixed db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) + + // not + db.SetSync(bz("b/3"), bz("value_3")) + db.SetSync(bz("a-3"), bz("value_3")) + db.SetSync(bz("a.3"), bz("value_3")) + db.SetSync(bz("abcdefg"), bz("value_3")) + itr := IteratePrefix(db, bz("a/")) checkValid(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) @@ -122,7 +87,7 @@ func TestPrefixIteratorMatches3N(t *testing.T) { // Bad! checkNext(t, itr, false) - // Once invalid... + //Once invalid... checkInvalid(t, itr) }) } From 29471d75cb50eb4cea5878b8bd1be25e8150564c Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Wed, 13 Dec 2017 22:53:02 -0700 Subject: [PATCH 311/515] common: no more relying on math/rand.DefaultSource Fixes https://github.com/tendermint/tmlibs/issues/99 Updates https://github.com/tendermint/tendermint/issues/973 Removed usages of math/rand.DefaultSource in favour of our own source that's seeded with a completely random source and is safe for use in concurrent in multiple goroutines. Also extend some functionality that the stdlib exposes such as * RandPerm * RandIntn * RandInt31 * RandInt63 Also added an integration test whose purpose is to be run as a consistency check to ensure that our results never repeat hence that our internal PRNG is uniquely seeded each time. This integration test can be triggered by setting environment variable: `TENDERMINT_INTEGRATION_TESTS=true` for example ```shell TENDERMINT_INTEGRATION_TESTS=true go test ``` --- common/bit_array.go | 7 ++- common/random.go | 89 +++++++++++++++++++++++++--------- common/random_test.go | 108 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 178 insertions(+), 26 deletions(-) create mode 100644 common/random_test.go diff --git a/common/bit_array.go b/common/bit_array.go index 5590fe61b..848763b48 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -3,7 +3,6 @@ package common import ( "encoding/binary" "fmt" - "math/rand" "strings" "sync" ) @@ -212,12 +211,12 @@ func (bA *BitArray) PickRandom() (int, bool) { if length == 0 { return 0, false } - randElemStart := rand.Intn(length) + randElemStart := RandIntn(length) for i := 0; i < length; i++ { elemIdx := ((i + randElemStart) % length) if elemIdx < length-1 { if bA.Elems[elemIdx] > 0 { - randBitStart := rand.Intn(64) + randBitStart := RandIntn(64) for j := 0; j < 64; j++ { bitIdx := ((j + randBitStart) % 64) if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { @@ -232,7 +231,7 @@ func (bA *BitArray) PickRandom() (int, bool) { if elemBits == 0 { elemBits = 64 } - randBitStart := rand.Intn(elemBits) + randBitStart := RandIntn(elemBits) for j := 0; j < elemBits; j++ { bitIdx := ((j + randBitStart) % elemBits) if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { diff --git a/common/random.go b/common/random.go index 73bd16356..f0d169e09 100644 --- a/common/random.go +++ b/common/random.go @@ -3,6 +3,7 @@ package common import ( crand "crypto/rand" "math/rand" + "sync" "time" ) @@ -10,6 +11,11 @@ const ( strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters ) +var rng struct { + sync.Mutex + *rand.Rand +} + func init() { b := cRandBytes(8) var seed uint64 @@ -17,7 +23,7 @@ func init() { seed |= uint64(b[i]) seed <<= 8 } - rand.Seed(int64(seed)) + rng.Rand = rand.New(rand.NewSource(int64(seed))) } // Constructs an alphanumeric string of given length. @@ -25,7 +31,7 @@ func RandStr(length int) string { chars := []byte{} MAIN_LOOP: for { - val := rand.Int63() + val := rng.Int63() for i := 0; i < 10; i++ { v := int(val & 0x3f) // rightmost 6 bits if v >= 62 { // only 62 characters in strChars @@ -45,72 +51,98 @@ MAIN_LOOP: } func RandUint16() uint16 { - return uint16(rand.Uint32() & (1<<16 - 1)) + return uint16(RandUint32() & (1<<16 - 1)) } func RandUint32() uint32 { - return rand.Uint32() + rng.Lock() + u32 := rng.Uint32() + rng.Unlock() + return u32 } func RandUint64() uint64 { - return uint64(rand.Uint32())<<32 + uint64(rand.Uint32()) + return uint64(RandUint32())<<32 + uint64(RandUint32()) } func RandUint() uint { - return uint(rand.Int()) + rng.Lock() + i := rng.Int() + rng.Unlock() + return uint(i) } func RandInt16() int16 { - return int16(rand.Uint32() & (1<<16 - 1)) + return int16(RandUint32() & (1<<16 - 1)) } func RandInt32() int32 { - return int32(rand.Uint32()) + return int32(RandUint32()) } func RandInt64() int64 { - return int64(rand.Uint32())<<32 + int64(rand.Uint32()) + return int64(RandUint64()) } func RandInt() int { - return rand.Int() + rng.Lock() + i := rng.Int() + rng.Unlock() + return i +} + +func RandInt31() int32 { + rng.Lock() + i31 := rng.Int31() + rng.Unlock() + return i31 +} + +func RandInt63() int64 { + rng.Lock() + i63 := rng.Int63() + rng.Unlock() + return i63 } // Distributed pseudo-exponentially to test for various cases func RandUint16Exp() uint16 { - bits := rand.Uint32() % 16 + bits := RandUint32() % 16 if bits == 0 { return 0 } n := uint16(1 << (bits - 1)) - n += uint16(rand.Int31()) & ((1 << (bits - 1)) - 1) + n += uint16(RandInt31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases func RandUint32Exp() uint32 { - bits := rand.Uint32() % 32 + bits := RandUint32() % 32 if bits == 0 { return 0 } n := uint32(1 << (bits - 1)) - n += uint32(rand.Int31()) & ((1 << (bits - 1)) - 1) + n += uint32(RandInt31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases func RandUint64Exp() uint64 { - bits := rand.Uint32() % 64 + bits := RandUint32() % 64 if bits == 0 { return 0 } n := uint64(1 << (bits - 1)) - n += uint64(rand.Int63()) & ((1 << (bits - 1)) - 1) + n += uint64(RandInt63()) & ((1 << (bits - 1)) - 1) return n } func RandFloat32() float32 { - return rand.Float32() + rng.Lock() + f32 := rng.Float32() + rng.Unlock() + return f32 } func RandTime() time.Time { @@ -118,11 +150,24 @@ func RandTime() time.Time { } func RandBytes(n int) []byte { - bs := make([]byte, n) - for i := 0; i < n; i++ { - bs[i] = byte(rand.Intn(256)) - } - return bs + return cRandBytes(n) +} + +// RandIntn returns, as an int, a non-negative pseudo-random number in [0, n). +// It panics if n <= 0 +func RandIntn(n int) int { + rng.Lock() + i := rng.Intn(n) + rng.Unlock() + return i +} + +// RandPerm returns a pseudo-random permutation of n integers in [0, n). +func RandPerm(n int) []int { + rng.Lock() + perm := rng.Perm(n) + rng.Unlock() + return perm } // NOTE: This relies on the os's random number generator. diff --git a/common/random_test.go b/common/random_test.go new file mode 100644 index 000000000..dd803b3f6 --- /dev/null +++ b/common/random_test.go @@ -0,0 +1,108 @@ +package common_test + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/tendermint/tmlibs/common" +) + +// It is essential that these tests run and never repeat their outputs +// lest we've been pwned and the behavior of our randomness is controlled. +// See Issues: +// * https://github.com/tendermint/tmlibs/issues/99 +// * https://github.com/tendermint/tendermint/issues/973 +func TestUniqueRng(t *testing.T) { + if os.Getenv("TENDERMINT_INTEGRATION_TESTS") == "" { + t.Skipf("Can only be run as an integration test") + } + + // The goal of this test is to invoke the + // Rand* tests externally with no repeating results, booted up. + // Any repeated results indicate that the seed is the same or that + // perhaps we are using math/rand directly. + tmpDir, err := ioutil.TempDir("", "rng-tests") + if err != nil { + t.Fatalf("Creating tempDir: %v", err) + } + defer os.RemoveAll(tmpDir) + + outpath := filepath.Join(tmpDir, "main.go") + f, err := os.Create(outpath) + if err != nil { + t.Fatalf("Setting up %q err: %v", outpath, err) + } + f.Write([]byte(integrationTestProgram)) + if err := f.Close(); err != nil { + t.Fatalf("Closing: %v", err) + } + + outputs := make(map[string][]int) + for i := 0; i < 100; i++ { + cmd := exec.Command("go", "run", outpath) + bOutput, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("Run #%d: err: %v output: %s", i, err, bOutput) + continue + } + output := string(bOutput) + runs, seen := outputs[output] + if seen { + t.Errorf("Run #%d's output was already seen in previous runs: %v", i, runs) + } + outputs[output] = append(outputs[output], i) + } +} + +const integrationTestProgram = ` +package main + +import ( + "encoding/json" + "fmt" + "math/rand" + + "github.com/tendermint/tmlibs/common" +) + +func main() { + // Set math/rand's Seed so that any direct invocations + // of math/rand will reveal themselves. + rand.Seed(1) + perm := common.RandPerm(10) + blob, _ := json.Marshal(perm) + fmt.Printf("perm: %s\n", blob) + + fmt.Printf("randInt: %d\n", common.RandInt()) + fmt.Printf("randUint: %d\n", common.RandUint()) + fmt.Printf("randIntn: %d\n", common.RandIntn(97)) + fmt.Printf("randInt31: %d\n", common.RandInt31()) + fmt.Printf("randInt32: %d\n", common.RandInt32()) + fmt.Printf("randInt63: %d\n", common.RandInt63()) + fmt.Printf("randInt64: %d\n", common.RandInt64()) + fmt.Printf("randUint32: %d\n", common.RandUint32()) + fmt.Printf("randUint64: %d\n", common.RandUint64()) + fmt.Printf("randUint16Exp: %d\n", common.RandUint16Exp()) + fmt.Printf("randUint32Exp: %d\n", common.RandUint32Exp()) + fmt.Printf("randUint64Exp: %d\n", common.RandUint64Exp()) +}` + +func TestRngConcurrencySafety(t *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + _ = common.RandUint64() + <-time.After(time.Millisecond * time.Duration(common.RandIntn(100))) + _ = common.RandPerm(3) + }() + } + wg.Wait() +} From b5f465b4ecb6ef85a6ced14728a971570ed477e0 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 15 Dec 2017 00:23:25 -0500 Subject: [PATCH 312/515] common: use names prng and mrand --- common/random.go | 61 +++++++++++++++++++++++-------------------- common/random_test.go | 21 +++++++++++++++ 2 files changed, 53 insertions(+), 29 deletions(-) diff --git a/common/random.go b/common/random.go index f0d169e09..b8304e898 100644 --- a/common/random.go +++ b/common/random.go @@ -2,7 +2,7 @@ package common import ( crand "crypto/rand" - "math/rand" + mrand "math/rand" "sync" "time" ) @@ -11,9 +11,11 @@ const ( strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters ) -var rng struct { +// pseudo random number generator. +// seeded with OS randomness (crand) +var prng struct { sync.Mutex - *rand.Rand + *mrand.Rand } func init() { @@ -23,7 +25,7 @@ func init() { seed |= uint64(b[i]) seed <<= 8 } - rng.Rand = rand.New(rand.NewSource(int64(seed))) + prng.Rand = mrand.New(mrand.NewSource(int64(seed))) } // Constructs an alphanumeric string of given length. @@ -31,7 +33,7 @@ func RandStr(length int) string { chars := []byte{} MAIN_LOOP: for { - val := rng.Int63() + val := prng.Int63() for i := 0; i < 10; i++ { v := int(val & 0x3f) // rightmost 6 bits if v >= 62 { // only 62 characters in strChars @@ -55,9 +57,9 @@ func RandUint16() uint16 { } func RandUint32() uint32 { - rng.Lock() - u32 := rng.Uint32() - rng.Unlock() + prng.Lock() + u32 := prng.Uint32() + prng.Unlock() return u32 } @@ -66,9 +68,9 @@ func RandUint64() uint64 { } func RandUint() uint { - rng.Lock() - i := rng.Int() - rng.Unlock() + prng.Lock() + i := prng.Int() + prng.Unlock() return uint(i) } @@ -85,23 +87,23 @@ func RandInt64() int64 { } func RandInt() int { - rng.Lock() - i := rng.Int() - rng.Unlock() + prng.Lock() + i := prng.Int() + prng.Unlock() return i } func RandInt31() int32 { - rng.Lock() - i31 := rng.Int31() - rng.Unlock() + prng.Lock() + i31 := prng.Int31() + prng.Unlock() return i31 } func RandInt63() int64 { - rng.Lock() - i63 := rng.Int63() - rng.Unlock() + prng.Lock() + i63 := prng.Int63() + prng.Unlock() return i63 } @@ -139,9 +141,9 @@ func RandUint64Exp() uint64 { } func RandFloat32() float32 { - rng.Lock() - f32 := rng.Float32() - rng.Unlock() + prng.Lock() + f32 := prng.Float32() + prng.Unlock() return f32 } @@ -149,6 +151,7 @@ func RandTime() time.Time { return time.Unix(int64(RandUint64Exp()), 0) } +// RandBytes returns n random bytes from the OS's source of entropy ie. via crypto/rand. func RandBytes(n int) []byte { return cRandBytes(n) } @@ -156,17 +159,17 @@ func RandBytes(n int) []byte { // RandIntn returns, as an int, a non-negative pseudo-random number in [0, n). // It panics if n <= 0 func RandIntn(n int) int { - rng.Lock() - i := rng.Intn(n) - rng.Unlock() + prng.Lock() + i := prng.Intn(n) + prng.Unlock() return i } // RandPerm returns a pseudo-random permutation of n integers in [0, n). func RandPerm(n int) []int { - rng.Lock() - perm := rng.Perm(n) - rng.Unlock() + prng.Lock() + perm := prng.Perm(n) + prng.Unlock() return perm } diff --git a/common/random_test.go b/common/random_test.go index dd803b3f6..3fe0bbc06 100644 --- a/common/random_test.go +++ b/common/random_test.go @@ -9,9 +9,30 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/tendermint/tmlibs/common" ) +func TestRandStr(t *testing.T) { + l := 243 + s := common.RandStr(l) + assert.Equal(t, l, len(s)) +} + +func TestRandBytes(t *testing.T) { + l := 243 + b := common.RandBytes(l) + assert.Equal(t, l, len(b)) +} + +func TestRandIntn(t *testing.T) { + n := 243 + for i := 0; i < 100; i++ { + x := common.RandIntn(n) + assert.True(t, x < n) + } +} + // It is essential that these tests run and never repeat their outputs // lest we've been pwned and the behavior of our randomness is controlled. // See Issues: From cdc798882326a722040706a87ec0397e7c91d517 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Fri, 15 Dec 2017 02:14:05 -0700 Subject: [PATCH 313/515] common: use genius simplification of tests from @ebuchman Massive test simplication for more reliable tests from @ebuchman --- common/random.go | 8 ++- common/random_test.go | 115 +++++++++++++++--------------------------- 2 files changed, 47 insertions(+), 76 deletions(-) diff --git a/common/random.go b/common/random.go index b8304e898..37b8b2773 100644 --- a/common/random.go +++ b/common/random.go @@ -18,14 +18,20 @@ var prng struct { *mrand.Rand } -func init() { +func reset() { b := cRandBytes(8) var seed uint64 for i := 0; i < 8; i++ { seed |= uint64(b[i]) seed <<= 8 } + prng.Lock() prng.Rand = mrand.New(mrand.NewSource(int64(seed))) + prng.Unlock() +} + +func init() { + reset() } // Constructs an alphanumeric string of given length. diff --git a/common/random_test.go b/common/random_test.go index 3fe0bbc06..bed8e7650 100644 --- a/common/random_test.go +++ b/common/random_test.go @@ -1,34 +1,34 @@ -package common_test +package common import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" + "bytes" + "encoding/json" + "fmt" + "io" + mrand "math/rand" "sync" "testing" "time" "github.com/stretchr/testify/assert" - "github.com/tendermint/tmlibs/common" ) func TestRandStr(t *testing.T) { l := 243 - s := common.RandStr(l) + s := RandStr(l) assert.Equal(t, l, len(s)) } func TestRandBytes(t *testing.T) { l := 243 - b := common.RandBytes(l) + b := RandBytes(l) assert.Equal(t, l, len(b)) } func TestRandIntn(t *testing.T) { n := 243 for i := 0; i < 100; i++ { - x := common.RandIntn(n) + x := RandIntn(n) assert.True(t, x < n) } } @@ -39,39 +39,12 @@ func TestRandIntn(t *testing.T) { // * https://github.com/tendermint/tmlibs/issues/99 // * https://github.com/tendermint/tendermint/issues/973 func TestUniqueRng(t *testing.T) { - if os.Getenv("TENDERMINT_INTEGRATION_TESTS") == "" { - t.Skipf("Can only be run as an integration test") - } - - // The goal of this test is to invoke the - // Rand* tests externally with no repeating results, booted up. - // Any repeated results indicate that the seed is the same or that - // perhaps we are using math/rand directly. - tmpDir, err := ioutil.TempDir("", "rng-tests") - if err != nil { - t.Fatalf("Creating tempDir: %v", err) - } - defer os.RemoveAll(tmpDir) - - outpath := filepath.Join(tmpDir, "main.go") - f, err := os.Create(outpath) - if err != nil { - t.Fatalf("Setting up %q err: %v", outpath, err) - } - f.Write([]byte(integrationTestProgram)) - if err := f.Close(); err != nil { - t.Fatalf("Closing: %v", err) - } - + buf := new(bytes.Buffer) outputs := make(map[string][]int) for i := 0; i < 100; i++ { - cmd := exec.Command("go", "run", outpath) - bOutput, err := cmd.CombinedOutput() - if err != nil { - t.Errorf("Run #%d: err: %v output: %s", i, err, bOutput) - continue - } - output := string(bOutput) + testThemAll(buf) + output := buf.String() + buf.Reset() runs, seen := outputs[output] if seen { t.Errorf("Run #%d's output was already seen in previous runs: %v", i, runs) @@ -80,38 +53,30 @@ func TestUniqueRng(t *testing.T) { } } -const integrationTestProgram = ` -package main - -import ( - "encoding/json" - "fmt" - "math/rand" - - "github.com/tendermint/tmlibs/common" -) - -func main() { - // Set math/rand's Seed so that any direct invocations - // of math/rand will reveal themselves. - rand.Seed(1) - perm := common.RandPerm(10) - blob, _ := json.Marshal(perm) - fmt.Printf("perm: %s\n", blob) - - fmt.Printf("randInt: %d\n", common.RandInt()) - fmt.Printf("randUint: %d\n", common.RandUint()) - fmt.Printf("randIntn: %d\n", common.RandIntn(97)) - fmt.Printf("randInt31: %d\n", common.RandInt31()) - fmt.Printf("randInt32: %d\n", common.RandInt32()) - fmt.Printf("randInt63: %d\n", common.RandInt63()) - fmt.Printf("randInt64: %d\n", common.RandInt64()) - fmt.Printf("randUint32: %d\n", common.RandUint32()) - fmt.Printf("randUint64: %d\n", common.RandUint64()) - fmt.Printf("randUint16Exp: %d\n", common.RandUint16Exp()) - fmt.Printf("randUint32Exp: %d\n", common.RandUint32Exp()) - fmt.Printf("randUint64Exp: %d\n", common.RandUint64Exp()) -}` +func testThemAll(out io.Writer) { + // Reset the internal PRNG + reset() + + // Set math/rand's Seed so that any direct invocations + // of math/rand will reveal themselves. + mrand.Seed(1) + perm := RandPerm(10) + blob, _ := json.Marshal(perm) + fmt.Fprintf(out, "perm: %s\n", blob) + + fmt.Fprintf(out, "randInt: %d\n", RandInt()) + fmt.Fprintf(out, "randUint: %d\n", RandUint()) + fmt.Fprintf(out, "randIntn: %d\n", RandIntn(97)) + fmt.Fprintf(out, "randInt31: %d\n", RandInt31()) + fmt.Fprintf(out, "randInt32: %d\n", RandInt32()) + fmt.Fprintf(out, "randInt63: %d\n", RandInt63()) + fmt.Fprintf(out, "randInt64: %d\n", RandInt64()) + fmt.Fprintf(out, "randUint32: %d\n", RandUint32()) + fmt.Fprintf(out, "randUint64: %d\n", RandUint64()) + fmt.Fprintf(out, "randUint16Exp: %d\n", RandUint16Exp()) + fmt.Fprintf(out, "randUint32Exp: %d\n", RandUint32Exp()) + fmt.Fprintf(out, "randUint64Exp: %d\n", RandUint64Exp()) +} func TestRngConcurrencySafety(t *testing.T) { var wg sync.WaitGroup @@ -120,9 +85,9 @@ func TestRngConcurrencySafety(t *testing.T) { go func() { defer wg.Done() - _ = common.RandUint64() - <-time.After(time.Millisecond * time.Duration(common.RandIntn(100))) - _ = common.RandPerm(3) + _ = RandUint64() + <-time.After(time.Millisecond * time.Duration(RandIntn(100))) + _ = RandPerm(3) }() } wg.Wait() From a7b20d4e46db417d2256dfe81d910834348e2dc5 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Fri, 15 Dec 2017 02:48:40 -0700 Subject: [PATCH 314/515] db: Simplify exists check, fix IsKeyInDomain signature, Iterator Close + *FSDB.HasKey now uses common.FileExists to test for file existence + IsKeyInDomain takes key as a []byte slice instead of as a string to avoid extraneous []byte<-->string conversions for start and end + Iterator.Close() instead of Iterator.Release() + withDB helper to encapsulate DB creation, deferred cleanups so that for loops can use opened DBs and discard them ASAP Addressing accepted changes from review with @jaekwon --- db/backend_test.go | 48 +++++++++++++++++++++++++--------------------- db/c_level_db.go | 4 ++-- db/fsdb.go | 11 +++-------- db/go_level_db.go | 4 ---- db/mem_db.go | 4 +--- db/types.go | 4 ++-- db/util.go | 7 +++---- 7 files changed, 37 insertions(+), 45 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 7ead549b0..00fece515 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -45,33 +45,37 @@ func TestBackendsGetSetDelete(t *testing.T) { } } +func withDB(t *testing.T, creator dbCreator, fn func(DB)) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db, err := creator(name, "") + defer cleanupDBDir("", name) + assert.Nil(t, err) + fn(db) + db.Close() +} + func TestBackendsNilKeys(t *testing.T) { // test all backends for dbType, creator := range backends { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db, err := creator(name, "") - defer cleanupDBDir("", name) - assert.Nil(t, err) - - panicMsg := "expecting %s.%s to panic" - assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") - assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") - assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") - assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") - assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") - assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") - - db.Close() + withDB(t, creator, func(db DB) { + panicMsg := "expecting %s.%s to panic" + assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") + assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") + assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") + assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") + assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") + assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") + }) } } func TestGoLevelDBBackendStr(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") - defer cleanupDBDir("", name) - - if _, ok := backends[CLevelDBBackendStr]; !ok { - _, ok := db.(*GoLevelDB) - assert.True(t, ok) - } + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer cleanupDBDir("", name) + + if _, ok := backends[CLevelDBBackendStr]; !ok { + _, ok := db.(*GoLevelDB) + assert.True(t, ok) + } } diff --git a/db/c_level_db.go b/db/c_level_db.go index 8e2a9372d..961e4d090 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -109,7 +109,7 @@ func (db *CLevelDB) Close() { func (db *CLevelDB) Print() { itr := db.Iterator(BeginningKey(), EndingKey()) - defer itr.Release() + defer itr.Close() for ; itr.Valid(); itr.Next() { key := itr.Key() value := itr.Value() @@ -231,7 +231,7 @@ func (c *cLevelDBIterator) checkEndKey() []byte { return key } -func (c *cLevelDBIterator) Release() { +func (c *cLevelDBIterator) Close() { c.itr.Close() } diff --git a/db/fsdb.go b/db/fsdb.go index 85adae630..056cc3982 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -10,6 +10,7 @@ import ( "sync" "github.com/pkg/errors" + cmn "github.com/tendermint/tmlibs/common" ) const ( @@ -64,13 +65,7 @@ func (db *FSDB) Has(key []byte) bool { panicNilKey(key) path := db.nameToPath(key) - _, err := read(path) - if os.IsNotExist(err) { - return false - } else if err != nil { - panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) - } - return true + return cmn.FileExists(path) } func (db *FSDB) Set(key []byte, value []byte) { @@ -246,7 +241,7 @@ func list(dirPath string, start, end []byte) ([]string, error) { if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - if IsKeyInDomain(n, start, end) { + if IsKeyInDomain([]byte(n), start, end) { paths = append(paths, n) } } diff --git a/db/go_level_db.go b/db/go_level_db.go index 0d24020e0..45cb04984 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -263,10 +263,6 @@ func (it *goLevelDBIterator) Close() { it.source.Release() } -func (it *goLevelDBIterator) Release() { - it.source.Release() -} - func (it *goLevelDBIterator) assertNoError() { if err := it.source.Error(); err != nil { panic(err) diff --git a/db/mem_db.go b/db/mem_db.go index d20d0e7ea..44254870a 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -157,7 +157,7 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { func (db *MemDB) getSortedKeys(start, end []byte) []string { keys := []string{} for key, _ := range db.db { - if IsKeyInDomain(key, start, end) { + if IsKeyInDomain([]byte(key), start, end) { keys = append(keys, key) } } @@ -222,5 +222,3 @@ func (it *memDBIterator) Close() { it.db = nil it.keys = nil } - -func (it *memDBIterator) Release() {} diff --git a/db/types.go b/db/types.go index 8370ff2da..ee8d69cc1 100644 --- a/db/types.go +++ b/db/types.go @@ -68,7 +68,7 @@ func EndingKey() []byte { Usage: var itr Iterator = ... - defer itr.Release() + defer itr.Close() for ; itr.Valid(); itr.Next() { k, v := itr.Key(); itr.Value() @@ -108,7 +108,7 @@ type Iterator interface { Value() []byte // Release deallocates the given Iterator. - Release() + Close() } // For testing convenience. diff --git a/db/util.go b/db/util.go index 203ddcfaf..661d0a16f 100644 --- a/db/util.go +++ b/db/util.go @@ -2,7 +2,6 @@ package db import ( "bytes" - "strings" ) func IteratePrefix(db DB, prefix []byte) Iterator { @@ -39,8 +38,8 @@ func cpIncr(bz []byte) (ret []byte) { return EndingKey() } -func IsKeyInDomain(key string, start, end []byte) bool { - leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 - rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 +func IsKeyInDomain(key, start, end []byte) bool { + leftCondition := bytes.Equal(start, BeginningKey()) || bytes.Compare(key, start) >= 0 + rightCondition := bytes.Equal(end, EndingKey()) || bytes.Compare(key, end) < 0 return leftCondition && rightCondition } From 225eace3169c1d1b3c5f0a6786b9f060f2784d38 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 15 Dec 2017 15:14:48 -0500 Subject: [PATCH 315/515] dont run metalinter on circle ... --- circle.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 3dba976be..104cfa6f3 100644 --- a/circle.yml +++ b/circle.yml @@ -15,7 +15,7 @@ dependencies: test: override: - - cd $PROJECT_PATH && make get_vendor_deps && make metalinter_test && bash ./test.sh + - cd $PROJECT_PATH && make get_vendor_deps && bash ./test.sh post: - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" From 66b0e8fa2d3b8632bad1eb84076dfe70c84657bf Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 15 Dec 2017 15:58:23 -0500 Subject: [PATCH 316/515] fix c level db iterator --- db/backend_test.go | 16 ++++++++-------- db/c_level_db.go | 34 ++++++++++++++++++++++------------ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 00fece515..3362fecf6 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -70,12 +70,12 @@ func TestBackendsNilKeys(t *testing.T) { } func TestGoLevelDBBackendStr(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") - defer cleanupDBDir("", name) - - if _, ok := backends[CLevelDBBackendStr]; !ok { - _, ok := db.(*GoLevelDB) - assert.True(t, ok) - } + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer cleanupDBDir("", name) + + if _, ok := backends[CLevelDBBackendStr]; !ok { + _, ok := db.(*GoLevelDB) + assert.True(t, ok) + } } diff --git a/db/c_level_db.go b/db/c_level_db.go index 961e4d090..60198d84c 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -162,16 +162,7 @@ func (mBatch *cLevelDBBatch) Write() { func (db *CLevelDB) Iterator(start, end []byte) Iterator { itr := db.db.NewIterator(db.ro) - if len(start) > 0 { - itr.Seek(start) - } else { - itr.SeekToFirst() - } - return &cLevelDBIterator{ - itr: itr, - start: start, - end: end, - } + return newCLevelDBIterator(itr, start, end) } func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { @@ -187,6 +178,21 @@ type cLevelDBIterator struct { invalid bool } +func newCLevelDBIterator(itr *levigo.Iterator, start, end []byte) *cLevelDBIterator { + + if len(start) > 0 { + itr.Seek(start) + } else { + itr.SeekToFirst() + } + + return &cLevelDBIterator{ + itr: itr, + start: start, + end: end, + } +} + func (c *cLevelDBIterator) Domain() ([]byte, []byte) { return c.start, c.end } @@ -223,12 +229,16 @@ func (c *cLevelDBIterator) Next() { } // levigo has no upper bound when iterating, so need to check ourselves -func (c *cLevelDBIterator) checkEndKey() []byte { +func (c *cLevelDBIterator) checkEndKey() { + if !c.itr.Valid() { + c.invalid = true + return + } + key := c.itr.Key() if c.end != nil && bytes.Compare(key, c.end) > 0 { c.invalid = true } - return key } func (c *cLevelDBIterator) Close() { From e3bffd8fbd9427fbfe0413ad9c813fe8b702f71f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 16 Dec 2017 00:03:40 -0500 Subject: [PATCH 317/515] readme --- README.md | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..d5a11c7b4 --- /dev/null +++ b/README.md @@ -0,0 +1,53 @@ +# TMLIBS + +This repo is a home for various small packages. + +## autofile + +Autofile is file access with automatic log rotation. A group of files is maintained and rotation happens +when the leading file gets too big. Provides a reader for reading from the file group. + +## cli + +CLI wraps the `cobra` and `viper` packages and handles some common elements of building a CLI like flags and env vars for the home directory and the logger. + +## clist + +Clist provides a linekd list that is safe for concurrent access by many readers. + +## common + +Common provides a hodgepodge of useful functions. + +## db + +DB provides a database interface and a number of implementions, including ones using an in-memory map, the filesystem directory structure, +an implemention of LevelDB in Go, and the official LevelDB in C. + +## events + +Events is a synchronous PubSub package. + +## flowrate + +Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` method. + +## log + +Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. + +## logger + +Logger is DEPRECATED. It's a simple wrapper around `log15`. + +## merkle + +Merkle provides a simple static merkle tree and corresponding proofs. + +## process + +Process is a simple utility for spawning OS processes. + +## pubsub + +PubSub is an asynchronous PubSub package. From 8638961f02833def91f743cbccaa2cecdccffa74 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Fri, 15 Dec 2017 22:40:12 -0700 Subject: [PATCH 318/515] common: Rand* warnings about cryptographic unsafety Lesson articulated by @jaekwon on why we need 80 bits of entropy at least before we can think of cryptographic safety. math/rand's seed is a max of 64 bits so can never be cryptographically secure. Also added some benchmarks for RandBytes --- common/random.go | 29 +++++++++++++++++++++++++++-- common/random_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/common/random.go b/common/random.go index 37b8b2773..9df55ff81 100644 --- a/common/random.go +++ b/common/random.go @@ -35,6 +35,7 @@ func init() { } // Constructs an alphanumeric string of given length. +// It is not safe for cryptographic usage. func RandStr(length int) string { chars := []byte{} MAIN_LOOP: @@ -58,10 +59,12 @@ MAIN_LOOP: return string(chars) } +// It is not safe for cryptographic usage. func RandUint16() uint16 { return uint16(RandUint32() & (1<<16 - 1)) } +// It is not safe for cryptographic usage. func RandUint32() uint32 { prng.Lock() u32 := prng.Uint32() @@ -69,10 +72,12 @@ func RandUint32() uint32 { return u32 } +// It is not safe for cryptographic usage. func RandUint64() uint64 { return uint64(RandUint32())<<32 + uint64(RandUint32()) } +// It is not safe for cryptographic usage. func RandUint() uint { prng.Lock() i := prng.Int() @@ -80,18 +85,22 @@ func RandUint() uint { return uint(i) } +// It is not safe for cryptographic usage. func RandInt16() int16 { return int16(RandUint32() & (1<<16 - 1)) } +// It is not safe for cryptographic usage. func RandInt32() int32 { return int32(RandUint32()) } +// It is not safe for cryptographic usage. func RandInt64() int64 { return int64(RandUint64()) } +// It is not safe for cryptographic usage. func RandInt() int { prng.Lock() i := prng.Int() @@ -99,6 +108,7 @@ func RandInt() int { return i } +// It is not safe for cryptographic usage. func RandInt31() int32 { prng.Lock() i31 := prng.Int31() @@ -106,6 +116,7 @@ func RandInt31() int32 { return i31 } +// It is not safe for cryptographic usage. func RandInt63() int64 { prng.Lock() i63 := prng.Int63() @@ -114,6 +125,7 @@ func RandInt63() int64 { } // Distributed pseudo-exponentially to test for various cases +// It is not safe for cryptographic usage. func RandUint16Exp() uint16 { bits := RandUint32() % 16 if bits == 0 { @@ -125,6 +137,7 @@ func RandUint16Exp() uint16 { } // Distributed pseudo-exponentially to test for various cases +// It is not safe for cryptographic usage. func RandUint32Exp() uint32 { bits := RandUint32() % 32 if bits == 0 { @@ -136,6 +149,7 @@ func RandUint32Exp() uint32 { } // Distributed pseudo-exponentially to test for various cases +// It is not safe for cryptographic usage. func RandUint64Exp() uint64 { bits := RandUint32() % 64 if bits == 0 { @@ -146,6 +160,7 @@ func RandUint64Exp() uint64 { return n } +// It is not safe for cryptographic usage. func RandFloat32() float32 { prng.Lock() f32 := prng.Float32() @@ -153,17 +168,26 @@ func RandFloat32() float32 { return f32 } +// It is not safe for cryptographic usage. func RandTime() time.Time { return time.Unix(int64(RandUint64Exp()), 0) } // RandBytes returns n random bytes from the OS's source of entropy ie. via crypto/rand. +// It is not safe for cryptographic usage. func RandBytes(n int) []byte { - return cRandBytes(n) + // cRandBytes isn't guaranteed to be fast so instead + // use random bytes generated from the internal PRNG + bs := make([]byte, n) + for i := 0; i < len(bs); i++ { + bs[i] = byte(RandInt() & 0xFF) + } + return bs } // RandIntn returns, as an int, a non-negative pseudo-random number in [0, n). -// It panics if n <= 0 +// It panics if n <= 0. +// It is not safe for cryptographic usage. func RandIntn(n int) int { prng.Lock() i := prng.Intn(n) @@ -172,6 +196,7 @@ func RandIntn(n int) int { } // RandPerm returns a pseudo-random permutation of n integers in [0, n). +// It is not safe for cryptographic usage. func RandPerm(n int) []int { prng.Lock() perm := prng.Perm(n) diff --git a/common/random_test.go b/common/random_test.go index bed8e7650..216f2f8bc 100644 --- a/common/random_test.go +++ b/common/random_test.go @@ -92,3 +92,29 @@ func TestRngConcurrencySafety(t *testing.T) { } wg.Wait() } + +func BenchmarkRandBytes10B(b *testing.B) { + benchmarkRandBytes(b, 10) +} +func BenchmarkRandBytes100B(b *testing.B) { + benchmarkRandBytes(b, 100) +} +func BenchmarkRandBytes1KiB(b *testing.B) { + benchmarkRandBytes(b, 1024) +} +func BenchmarkRandBytes10KiB(b *testing.B) { + benchmarkRandBytes(b, 10*1024) +} +func BenchmarkRandBytes100KiB(b *testing.B) { + benchmarkRandBytes(b, 100*1024) +} +func BenchmarkRandBytes1MiB(b *testing.B) { + benchmarkRandBytes(b, 1024*1024) +} + +func benchmarkRandBytes(b *testing.B, n int) { + for i := 0; i < b.N; i++ { + _ = RandBytes(n) + } + b.ReportAllocs() +} From aab2d70dd34ec8a1aa780f7562193110fe8cb809 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 17 Dec 2017 13:04:15 -0800 Subject: [PATCH 319/515] Sdk2 kvpair (#102) * Canonical KVPair in common * Simplify common/Bytes to just hex encode --- common/bytes.go | 53 ++++++++++++++++++++++++++ common/bytes_test.go | 68 ++++++++++++++++++++++++++++++++++ common/kvpair.go | 30 +++++++++++++++ glide.lock | 48 ++++++++++++------------ merkle/kvpairs.go | 48 ------------------------ merkle/simple_map.go | 78 ++++++++++++++++++++++++++++++++++----- merkle/simple_map_test.go | 12 +++--- merkle/simple_tree.go | 7 +++- 8 files changed, 254 insertions(+), 90 deletions(-) create mode 100644 common/bytes.go create mode 100644 common/bytes_test.go create mode 100644 common/kvpair.go delete mode 100644 merkle/kvpairs.go diff --git a/common/bytes.go b/common/bytes.go new file mode 100644 index 000000000..d9ede98df --- /dev/null +++ b/common/bytes.go @@ -0,0 +1,53 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of Bytes is to enable HEX-encoding for json/encoding. +type Bytes []byte + +// Marshal needed for protobuf compatibility +func (b Bytes) Marshal() ([]byte, error) { + return b, nil +} + +// Unmarshal needed for protobuf compatibility +func (b *Bytes) Unmarshal(data []byte) error { + *b = data + return nil +} + +// This is the point of Bytes. +func (b Bytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(b)) + jb := make([]byte, len(s)+2) + jb[0] = '"' + copy(jb[1:], []byte(s)) + jb[1] = '"' + return jb, nil +} + +// This is the point of Bytes. +func (b *Bytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("Invalid hex string: %s", data) + } + bytes, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *b = bytes + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (b Bytes) Bytes() []byte { + return b +} + +func (b Bytes) String() string { + return strings.ToUpper(hex.EncodeToString(b)) +} diff --git a/common/bytes_test.go b/common/bytes_test.go new file mode 100644 index 000000000..0c0eacc33 --- /dev/null +++ b/common/bytes_test.go @@ -0,0 +1,68 @@ +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + assert := assert.New(t) + + b := []byte("hello world") + dataB := Bytes(b) + b2, err := dataB.Marshal() + assert.Nil(err) + assert.Equal(b, b2) + + var dataB2 Bytes + err = (&dataB2).Unmarshal(b) + assert.Nil(err) + assert.Equal(dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + assert := assert.New(t) + + type TestStruct struct { + B1 []byte + B2 Bytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(ts2.B1, tc.input) + assert.Equal(ts2.B2, Bytes(tc.input)) + }) + } +} diff --git a/common/kvpair.go b/common/kvpair.go new file mode 100644 index 000000000..b9e45733f --- /dev/null +++ b/common/kvpair.go @@ -0,0 +1,30 @@ +package common + +import ( + "bytes" + "sort" +) + +type KVPair struct { + Key Bytes + Value Bytes +} + +type KVPairs []KVPair + +// Sorting +func (kvs KVPairs) Len() int { return len(kvs) } +func (kvs KVPairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0 + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KVPairs) Sort() { sort.Sort(kvs) } diff --git a/glide.lock b/glide.lock index b0b3ff3c7..e87782d21 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-08-11T20:28:34.550901198Z +updated: 2017-12-17T12:50:35.983353926-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: 0873e56b0faeae3a1d661b10d629135508ea5504 + version: e3b2152e0063c5f05efea89ecbe297852af2a92d subpackages: - log - log/level @@ -12,17 +12,17 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-playground/locales - version: 1e5f1161c6416a5ff48840eb8724a394e48cc534 + version: e4cbcb5d0652150d40ad0646651076b6bd2be4f6 subpackages: - currency - name: github.com/go-playground/universal-translator version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 7a2f19628aabfe68f0766b59e74d6315f8347d22 + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl - version: a4b07c25de5ff55ad3b8936cea69a79a3d95a855 + version: 23c074d0eceb2b8a5bfdbb271ab780cde70f05a8 subpackages: - hcl/ast - hcl/parser @@ -39,35 +39,33 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 51463bfca2576e06c62a8504b5c0f06d61312647 + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mattn/go-colorable - version: ded68f7a9561c023e790de24279db7ebf473ea80 + version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 - name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe + version: a5cdd64afdee435007ee3e9f6ed4684af949d568 - name: github.com/mitchellh/mapstructure - version: cc8532a8e9a55ea36402aa21efdf403a60d34096 -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: 06020f85339e21b2478f756a78e295255ffa4d6a - name: github.com/pelletier/go-toml - version: 97253b98df84f9eef872866d079e74b8265150f1 + version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd - name: github.com/pkg/errors - version: c605e284fe17294bda444b34710735b29d1a9d90 + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 9be650865eab0c12963d8753212f4f9c66cdcf12 + version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536 subpackages: - mem - name: github.com/spf13/cast version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 - name: github.com/spf13/cobra - version: db6b9a8b3f3f400c8ecb4a4d7d02245b8facad66 + version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: fa7ca7e836cf3a8bb4ebf799f472c12d7e903d66 + version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 80fe0fb4eba54167e2ccae1c6c950e72abf61b73 + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper - version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb - version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4 + version: adf24ef3f94bd13ec4163060b21a5678f22b429b subpackages: - leveldb - leveldb/cache @@ -82,7 +80,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b53add0b622662731985485f3a19be7f684660b8 + version: b6fc872b42d41158a60307db4da051dd6f179415 subpackages: - data - data/base58 @@ -91,22 +89,22 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: 5a033cc77e57eca05bdb50522851d29e03569cbe + version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 subpackages: - ripemd160 - name: golang.org/x/sys - version: 9ccfe848b9db8435a24c424abbc07a921adf1df5 + version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840 subpackages: - unix - name: golang.org/x/text - version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4 + version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 subpackages: - transform - unicode/norm - name: gopkg.in/go-playground/validator.v9 - version: d529ee1b0f30352444f507cc6cdac96bfd12decc + version: 61caf9d3038e1af346dbf5c2e16f6678e1548364 - name: gopkg.in/yaml.v2 - version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b + version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 testImports: - name: github.com/davecgh/go-spew version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 diff --git a/merkle/kvpairs.go b/merkle/kvpairs.go deleted file mode 100644 index 3d67049f2..000000000 --- a/merkle/kvpairs.go +++ /dev/null @@ -1,48 +0,0 @@ -package merkle - -import ( - "sort" - - wire "github.com/tendermint/go-wire" - "golang.org/x/crypto/ripemd160" -) - -// NOTE: Behavior is undefined with dup keys. -type KVPair struct { - Key string - Value interface{} // Can be Hashable or not. -} - -func (kv KVPair) Hash() []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteString(kv.Key, hasher, n, err) - if kvH, ok := kv.Value.(Hashable); ok { - wire.WriteByteSlice(kvH.Hash(), hasher, n, err) - } else { - wire.WriteBinary(kv.Value, hasher, n, err) - } - if *err != nil { - panic(*err) - } - return hasher.Sum(nil) -} - -type KVPairs []KVPair - -func (kvps KVPairs) Len() int { return len(kvps) } -func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } -func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } -func (kvps KVPairs) Sort() { sort.Sort(kvps) } - -func MakeSortedKVPairs(m map[string]interface{}) []Hashable { - kvPairs := make([]KVPair, 0, len(m)) - for k, v := range m { - kvPairs = append(kvPairs, KVPair{k, v}) - } - KVPairs(kvPairs).Sort() - kvPairsH := make([]Hashable, 0, len(kvPairs)) - for _, kvp := range kvPairs { - kvPairsH = append(kvPairsH, kvp) - } - return kvPairsH -} diff --git a/merkle/simple_map.go b/merkle/simple_map.go index 43dce990f..003c7cd42 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -1,26 +1,86 @@ package merkle +import ( + "github.com/tendermint/go-wire" + cmn "github.com/tendermint/tmlibs/common" + "golang.org/x/crypto/ripemd160" +) + type SimpleMap struct { - kvz KVPairs + kvs cmn.KVPairs + sorted bool } func NewSimpleMap() *SimpleMap { return &SimpleMap{ - kvz: nil, + kvs: nil, + sorted: false, } } -func (sm *SimpleMap) Set(k string, o interface{}) { - sm.kvz = append(sm.kvz, KVPair{Key: k, Value: o}) +func (sm *SimpleMap) Set(key string, value interface{}) { + sm.sorted = false + + // Is value Hashable? + var vBytes []byte + if hashable, ok := value.(Hashable); ok { + vBytes = hashable.Hash() + } else { + vBytes = wire.BinaryBytes(value) + } + + sm.kvs = append(sm.kvs, cmn.KVPair{ + Key: []byte(key), + Value: vBytes, + }) } // Merkle root hash of items sorted by key. // NOTE: Behavior is undefined when key is duplicate. func (sm *SimpleMap) Hash() []byte { - sm.kvz.Sort() - kvPairsH := make([]Hashable, 0, len(sm.kvz)) - for _, kvp := range sm.kvz { - kvPairsH = append(kvPairsH, kvp) + sm.Sort() + return hashKVPairs(sm.kvs) +} + +func (sm *SimpleMap) Sort() { + if sm.sorted { + return + } + sm.kvs.Sort() + sm.sorted = true +} + +// Returns a copy of sorted KVPairs. +// CONTRACT: The returned slice must not be mutated. +func (sm *SimpleMap) KVPairs() cmn.KVPairs { + sm.Sort() + kvs := make(cmn.KVPairs, len(sm.kvs)) + copy(kvs, sm.kvs) + return kvs +} + +//---------------------------------------- + +// A local extension to KVPair that can be hashed. +type kvPair cmn.KVPair + +func (kv kvPair) Hash() []byte { + hasher, n, err := ripemd160.New(), new(int), new(error) + wire.WriteByteSlice(kv.Key, hasher, n, err) + if *err != nil { + panic(*err) + } + wire.WriteByteSlice(kv.Value, hasher, n, err) + if *err != nil { + panic(*err) + } + return hasher.Sum(nil) +} + +func hashKVPairs(kvs cmn.KVPairs) []byte { + kvsH := make([]Hashable, 0, len(kvs)) + for _, kvp := range kvs { + kvsH = append(kvsH, kvPair(kvp)) } - return SimpleHashFromHashables(kvPairsH) + return SimpleHashFromHashables(kvsH) } diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index 5eb218274..8ba7ce66b 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -11,37 +11,37 @@ func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() db.Set("key1", "value1") - assert.Equal(t, "376bf717ebe3659a34f68edb833dfdcf4a2d3c10", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "3bb53f017d2f5b4f144692aa829a5c245ac2b123", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value2") - assert.Equal(t, "72fd3a7224674377952214cb10ef21753ec803eb", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "14a68db29e3f930ffaafeff5e07c17a439384f39", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") - assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") - assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") db.Set("key3", "value3") - assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") db.Set("key3", "value3") - assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index d64082b43..3a82f4edc 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -88,6 +88,9 @@ func SimpleHashFromHashables(items []Hashable) []byte { // Convenience for SimpleHashFromHashes. func SimpleHashFromMap(m map[string]interface{}) []byte { - kpPairsH := MakeSortedKVPairs(m) - return SimpleHashFromHashables(kpPairsH) + sm := NewSimpleMap() + for k, v := range m { + sm.Set(k, v) + } + return sm.Hash() } From 4ce8448d7fcf92b040046f894474ce2f7e779b67 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 17 Dec 2017 13:11:28 -0800 Subject: [PATCH 320/515] Nil keys are OK, deprecate BeginningKey/EndingKey (#101) * Nil keys are OK, deprecate BeginningKey/EndingKey --- db/backend_test.go | 95 ++++++++++++++++++++++---- db/c_level_db.go | 132 ++++++++++++++++++++---------------- db/common_test.go | 34 ++++++++-- db/fsdb.go | 51 +++++++------- db/go_level_db.go | 163 ++++++++++++++++++++++----------------------- db/mem_db.go | 124 ++++++++++++++++------------------ db/mem_db_test.go | 48 ------------- db/types.go | 45 +++++++------ db/util.go | 29 ++++++-- db/util_test.go | 11 ++- 10 files changed, 400 insertions(+), 332 deletions(-) delete mode 100644 db/mem_db_test.go diff --git a/db/backend_test.go b/db/backend_test.go index 3362fecf6..e103843dc 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -21,6 +21,13 @@ func testBackendGetSetDelete(t *testing.T, backend string) { defer dir.Close() db := NewDB("testdb", backend, dirname) + // A nonexistent key should return nil, even if the key is empty. + require.Nil(t, db.Get([]byte(""))) + + // A nonexistent key should return nil, even if the key is nil. + require.Nil(t, db.Get(nil)) + + // A nonexistent key should return nil. key := []byte("abc") require.Nil(t, db.Get(key)) @@ -55,27 +62,89 @@ func withDB(t *testing.T, creator dbCreator, fn func(DB)) { } func TestBackendsNilKeys(t *testing.T) { - // test all backends + // test all backends. + // nil keys are treated as the empty key for most operations. for dbType, creator := range backends { withDB(t, creator, func(db DB) { - panicMsg := "expecting %s.%s to panic" - assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") - assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") - assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") - assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") - assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") - assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") + t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) { + + expect := func(key, value []byte) { + if len(key) == 0 { // nil or empty + assert.Equal(t, db.Get(nil), db.Get([]byte(""))) + assert.Equal(t, db.Has(nil), db.Has([]byte(""))) + } + assert.Equal(t, db.Get(key), value) + assert.Equal(t, db.Has(key), value != nil) + } + + // Not set + expect(nil, nil) + + // Set nil value + db.Set(nil, nil) + expect(nil, []byte("")) + + // Set empty value + db.Set(nil, []byte("")) + expect(nil, []byte("")) + + // Set nil, Delete nil + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set nil, Delete empty + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // Set empty, Delete nil + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set empty, Delete empty + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // SetSync nil, DeleteSync nil + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync nil, DeleteSync empty + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + + // SetSync empty, DeleteSync nil + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync empty, DeleteSync empty + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + }) }) } } func TestGoLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") + db := NewDB(name, GoLevelDBBackendStr, "") defer cleanupDBDir("", name) - if _, ok := backends[CLevelDBBackendStr]; !ok { - _, ok := db.(*GoLevelDB) - assert.True(t, ok) - } + _, ok := db.(*GoLevelDB) + assert.True(t, ok) } diff --git a/db/c_level_db.go b/db/c_level_db.go index 60198d84c..c9f8d419b 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -51,7 +51,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { } func (db *CLevelDB) Get(key []byte) []byte { - panicNilKey(key) + key = nonNilBytes(key) res, err := db.db.Get(db.ro, key) if err != nil { panic(err) @@ -60,12 +60,12 @@ func (db *CLevelDB) Get(key []byte) []byte { } func (db *CLevelDB) Has(key []byte) bool { - panicNilKey(key) - panic("not implemented yet") + return db.Get(key) != nil } func (db *CLevelDB) Set(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(db.wo, key, value) if err != nil { panic(err) @@ -73,7 +73,8 @@ func (db *CLevelDB) Set(key []byte, value []byte) { } func (db *CLevelDB) SetSync(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(db.woSync, key, value) if err != nil { panic(err) @@ -81,7 +82,7 @@ func (db *CLevelDB) SetSync(key []byte, value []byte) { } func (db *CLevelDB) Delete(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(db.wo, key) if err != nil { panic(err) @@ -89,7 +90,7 @@ func (db *CLevelDB) Delete(key []byte) { } func (db *CLevelDB) DeleteSync(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(db.woSync, key) if err != nil { panic(err) @@ -108,7 +109,7 @@ func (db *CLevelDB) Close() { } func (db *CLevelDB) Print() { - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) defer itr.Close() for ; itr.Valid(); itr.Next() { key := itr.Key() @@ -159,94 +160,107 @@ func (mBatch *cLevelDBBatch) Write() { //---------------------------------------- // Iterator +// NOTE This is almost identical to db/go_level_db.Iterator +// Before creating a third version, refactor. func (db *CLevelDB) Iterator(start, end []byte) Iterator { itr := db.db.NewIterator(db.ro) - return newCLevelDBIterator(itr, start, end) + return newCLevelDBIterator(itr, start, end, false) } func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { - // XXX - return nil + panic("not implemented yet") // XXX } var _ Iterator = (*cLevelDBIterator)(nil) type cLevelDBIterator struct { - itr *levigo.Iterator + source *levigo.Iterator start, end []byte - invalid bool + isReverse bool + isInvalid bool } -func newCLevelDBIterator(itr *levigo.Iterator, start, end []byte) *cLevelDBIterator { - - if len(start) > 0 { - itr.Seek(start) +func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { + if isReverse { + panic("not implemented yet") // XXX + } + if start != nil { + source.Seek(start) } else { - itr.SeekToFirst() + source.SeekToFirst() } - return &cLevelDBIterator{ - itr: itr, - start: start, - end: end, + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, } } -func (c *cLevelDBIterator) Domain() ([]byte, []byte) { - return c.start, c.end +func (itr *cLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end } -func (c *cLevelDBIterator) Valid() bool { - c.assertNoError() - if c.invalid { +func (itr *cLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { return false } - c.invalid = !c.itr.Valid() - return !c.invalid -} -func (c *cLevelDBIterator) Key() []byte { - if !c.Valid() { - panic("cLevelDBIterator Key() called when invalid") + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false } - return c.itr.Key() -} -func (c *cLevelDBIterator) Value() []byte { - if !c.Valid() { - panic("cLevelDBIterator Value() called when invalid") + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false } - return c.itr.Value() + + // Valid + return true } -func (c *cLevelDBIterator) Next() { - if !c.Valid() { - panic("cLevelDBIterator Next() called when invalid") - } - c.itr.Next() - c.checkEndKey() // if we've exceeded the range, we're now invalid +func (itr *cLevelDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Key() } -// levigo has no upper bound when iterating, so need to check ourselves -func (c *cLevelDBIterator) checkEndKey() { - if !c.itr.Valid() { - c.invalid = true - return - } +func (itr *cLevelDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Value() +} - key := c.itr.Key() - if c.end != nil && bytes.Compare(key, c.end) > 0 { - c.invalid = true - } +func (itr *cLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + itr.source.Next() } -func (c *cLevelDBIterator) Close() { - c.itr.Close() +func (itr *cLevelDBIterator) Close() { + itr.source.Close() } -func (c *cLevelDBIterator) assertNoError() { - if err := c.itr.GetError(); err != nil { +func (itr *cLevelDBIterator) assertNoError() { + if err := itr.source.GetError(); err != nil { panic(err) } } + +func (itr cLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("cLevelDBIterator is invalid") + } +} diff --git a/db/common_test.go b/db/common_test.go index 6b3009795..2a5d01818 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -57,7 +57,7 @@ func TestDBIteratorSingleKey(t *testing.T) { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) checkValid(t, itr, true) checkNext(t, itr, false) @@ -78,7 +78,7 @@ func TestDBIteratorTwoKeys(t *testing.T) { db.SetSync(bz("2"), bz("value_1")) { // Fail by calling Next too much - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) checkValid(t, itr, true) checkNext(t, itr, true) @@ -96,11 +96,35 @@ func TestDBIteratorTwoKeys(t *testing.T) { } } +func TestDBIteratorMany(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + + keys := make([][]byte, 100) + for i := 0; i < 100; i++ { + keys[i] = []byte{byte(i)} + } + + value := []byte{5} + for _, k := range keys { + db.Set(k, value) + } + + itr := db.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + assert.Equal(t, db.Get(itr.Key()), itr.Value()) + } + }) + } +} + func TestDBIteratorEmpty(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) checkInvalid(t, itr) }) @@ -111,7 +135,7 @@ func TestDBIteratorEmptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator(bz("1"), EndingKey()) + itr := db.Iterator(bz("1"), nil) checkInvalid(t, itr) }) @@ -123,7 +147,7 @@ func TestDBIteratorNonemptyBeginAfter(t *testing.T) { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator(bz("2"), EndingKey()) + itr := db.Iterator(bz("2"), nil) checkInvalid(t, itr) }) diff --git a/db/fsdb.go b/db/fsdb.go index 056cc3982..45c3231f6 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -47,7 +47,7 @@ func NewFSDB(dir string) *FSDB { func (db *FSDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = escapeKey(key) path := db.nameToPath(key) value, err := read(path) @@ -62,7 +62,7 @@ func (db *FSDB) Get(key []byte) []byte { func (db *FSDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = escapeKey(key) path := db.nameToPath(key) return cmn.FileExists(path) @@ -71,7 +71,6 @@ func (db *FSDB) Has(key []byte) bool { func (db *FSDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.SetNoLock(key, value) } @@ -79,17 +78,14 @@ func (db *FSDB) Set(key []byte, value []byte) { func (db *FSDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.SetNoLock(key, value) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) SetNoLock(key []byte, value []byte) { - panicNilKey(key) - if value == nil { - value = []byte{} - } + key = escapeKey(key) + value = nonNilBytes(value) path := db.nameToPath(key) err := write(path, value) if err != nil { @@ -100,7 +96,6 @@ func (db *FSDB) SetNoLock(key []byte, value []byte) { func (db *FSDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.DeleteNoLock(key) } @@ -108,14 +103,13 @@ func (db *FSDB) Delete(key []byte) { func (db *FSDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.DeleteNoLock(key) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) DeleteNoLock(key []byte) { - panicNilKey(key) + key = escapeKey(key) path := db.nameToPath(key) err := remove(path) if os.IsNotExist(err) { @@ -157,8 +151,6 @@ func (db *FSDB) Mutex() *sync.Mutex { } func (db *FSDB) Iterator(start, end []byte) Iterator { - it := newMemDBIterator(db, start, end) - db.mtx.Lock() defer db.mtx.Unlock() @@ -169,13 +161,11 @@ func (db *FSDB) Iterator(start, end []byte) Iterator { panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) } sort.Strings(keys) - it.keys = keys - return it + return newMemDBIterator(db, keys, start, end) } func (db *FSDB) ReverseIterator(start, end []byte) Iterator { - // XXX - return nil + panic("not implemented yet") // XXX } func (db *FSDB) nameToPath(name []byte) string { @@ -221,8 +211,7 @@ func remove(path string) error { return os.Remove(path) } -// List files of a path. -// Paths will NOT include dir as the prefix. +// List keys in a directory, stripping of escape sequences and dir portions. // CONTRACT: returns os errors directly without wrapping. func list(dirPath string, start, end []byte) ([]string, error) { dir, err := os.Open(dirPath) @@ -235,15 +224,31 @@ func list(dirPath string, start, end []byte) ([]string, error) { if err != nil { return nil, err } - var paths []string + var keys []string for _, name := range names { n, err := url.PathUnescape(name) if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - if IsKeyInDomain([]byte(n), start, end) { - paths = append(paths, n) + key := unescapeKey([]byte(n)) + if IsKeyInDomain(key, start, end, false) { + keys = append(keys, string(key)) } } - return paths, nil + return keys, nil +} + +// To support empty or nil keys, while the file system doesn't allow empty +// filenames. +func escapeKey(key []byte) []byte { + return []byte("k_" + string(key)) +} +func unescapeKey(escKey []byte) []byte { + if len(escKey) < 2 { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + if string(escKey[:2]) != "k_" { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + return escKey[2:] } diff --git a/db/go_level_db.go b/db/go_level_db.go index 45cb04984..bf2b3bf76 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -1,6 +1,7 @@ package db import ( + "bytes" "fmt" "path/filepath" @@ -8,7 +9,6 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -40,33 +40,25 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { } func (db *GoLevelDB) Get(key []byte) []byte { - panicNilKey(key) + key = nonNilBytes(key) res, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { return nil } else { - PanicCrisis(err) + panic(err) } } return res } func (db *GoLevelDB) Has(key []byte) bool { - panicNilKey(key) - _, err := db.db.Get(key, nil) - if err != nil { - if err == errors.ErrNotFound { - return false - } else { - PanicCrisis(err) - } - } - return true + return db.Get(key) != nil } func (db *GoLevelDB) Set(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(key, value, nil) if err != nil { PanicCrisis(err) @@ -74,7 +66,8 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { } func (db *GoLevelDB) SetSync(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -82,7 +75,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { } func (db *GoLevelDB) Delete(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(key, nil) if err != nil { PanicCrisis(err) @@ -90,7 +83,7 @@ func (db *GoLevelDB) Delete(key []byte) { } func (db *GoLevelDB) DeleteSync(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -169,102 +162,104 @@ func (mBatch *goLevelDBBatch) Write() { //---------------------------------------- // Iterator +// NOTE This is almost identical to db/c_level_db.Iterator +// Before creating a third version, refactor. + +type goLevelDBIterator struct { + source iterator.Iterator + start []byte + end []byte + isReverse bool + isInvalid bool +} + +var _ Iterator = (*goLevelDBIterator)(nil) -// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -func goLevelDBIterRange(start, end []byte) *util.Range { - // XXX: what if start == nil ? - if len(start) == 0 { - start = nil +func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { + if isReverse { + panic("not implemented yet") // XXX } - return &util.Range{ - Start: start, - Limit: end, + source.Seek(start) + return &goLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, } } func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itrRange := goLevelDBIterRange(start, end) - itr := db.db.NewIterator(itrRange, nil) - itr.Seek(start) // if we don't call this the itr is never valid (?!) - return &goLevelDBIterator{ - source: itr, - start: start, - end: end, - } + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, false) } func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { - // XXX - return nil + panic("not implemented yet") // XXX } -var _ Iterator = (*goLevelDBIterator)(nil) - -type goLevelDBIterator struct { - source iterator.Iterator - invalid bool - start, end []byte +func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end } -func (it *goLevelDBIterator) Domain() ([]byte, []byte) { - return it.start, it.end -} +func (itr *goLevelDBIterator) Valid() bool { -// Key returns a copy of the current key. -func (it *goLevelDBIterator) Key() []byte { - if !it.Valid() { - panic("goLevelDBIterator Key() called when invalid") + // Once invalid, forever invalid. + if itr.isInvalid { + return false } - key := it.source.Key() - k := make([]byte, len(key)) - copy(k, key) - return k -} + // Panic on DB error. No way to recover. + itr.assertNoError() -// Value returns a copy of the current value. -func (it *goLevelDBIterator) Value() []byte { - if !it.Valid() { - panic("goLevelDBIterator Value() called when invalid") + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false } - val := it.source.Value() - v := make([]byte, len(val)) - copy(v, val) - - return v -} -func (it *goLevelDBIterator) Valid() bool { - it.assertNoError() - if it.invalid { + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true return false } - it.invalid = !it.source.Valid() - return !it.invalid + + // Valid + return true } -func (it *goLevelDBIterator) Next() { - if !it.Valid() { - panic("goLevelDBIterator Next() called when invalid") - } - it.source.Next() +func (itr *goLevelDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Key() } -func (it *goLevelDBIterator) Prev() { - if !it.Valid() { - panic("goLevelDBIterator Prev() called when invalid") - } - it.source.Prev() +func (itr *goLevelDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Value() +} + +func (itr *goLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + itr.source.Next() } -func (it *goLevelDBIterator) Close() { - it.source.Release() +func (itr *goLevelDBIterator) Close() { + itr.source.Release() } -func (it *goLevelDBIterator) assertNoError() { - if err := it.source.Error(); err != nil { +func (itr *goLevelDBIterator) assertNoError() { + if err := itr.source.Error(); err != nil { panic(err) } } + +func (itr goLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("goLevelDBIterator is invalid") + } +} diff --git a/db/mem_db.go b/db/mem_db.go index 44254870a..e9d9174dc 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -29,14 +29,16 @@ func NewMemDB() *MemDB { func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = nonNilBytes(key) + return db.db[string(key)] } func (db *MemDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = nonNilBytes(key) + _, ok := db.db[string(key)] return ok } @@ -44,43 +46,43 @@ func (db *MemDB) Has(key []byte) bool { func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + db.SetNoLock(key, value) } func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + db.SetNoLock(key, value) } // NOTE: Implements atomicSetDeleter func (db *MemDB) SetNoLock(key []byte, value []byte) { - if value == nil { - value = []byte{} - } - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) + db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) - delete(db.db, string(key)) + + db.DeleteNoLock(key) } func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) - delete(db.db, string(key)) + + db.DeleteNoLock(key) } // NOTE: Implements atomicSetDeleter func (db *MemDB) DeleteNoLock(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) + delete(db.db, string(key)) } @@ -125,100 +127,92 @@ func (db *MemDB) Mutex() *sync.Mutex { //---------------------------------------- func (db *MemDB) Iterator(start, end []byte) Iterator { - it := newMemDBIterator(db, start, end) - db.mtx.Lock() defer db.mtx.Unlock() - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - it.keys = db.getSortedKeys(start, end) - return it + keys := db.getSortedKeys(start, end, false) + return newMemDBIterator(db, keys, start, end) } func (db *MemDB) ReverseIterator(start, end []byte) Iterator { - it := newMemDBIterator(db, start, end) - db.mtx.Lock() defer db.mtx.Unlock() - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - it.keys = db.getSortedKeys(end, start) - // reverse the order - l := len(it.keys) - 1 - for i, v := range it.keys { - it.keys[i] = it.keys[l-i] - it.keys[l-i] = v - } - return nil + keys := db.getSortedKeys(end, start, true) + return newMemDBIterator(db, keys, start, end) } -func (db *MemDB) getSortedKeys(start, end []byte) []string { +func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { keys := []string{} for key, _ := range db.db { - if IsKeyInDomain([]byte(key), start, end) { + if IsKeyInDomain([]byte(key), start, end, false) { keys = append(keys, key) } } sort.Strings(keys) + if reverse { + nkeys := len(keys) + for i := 0; i < nkeys/2; i++ { + keys[i] = keys[nkeys-i-1] + } + } return keys } var _ Iterator = (*memDBIterator)(nil) +// We need a copy of all of the keys. +// Not the best, but probably not a bottleneck depending. type memDBIterator struct { - cur int - keys []string - db DB - start, end []byte + db DB + cur int + keys []string + start []byte + end []byte } -func newMemDBIterator(db DB, start, end []byte) *memDBIterator { +// Keys is expected to be in reverse order for reverse iterators. +func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { return &memDBIterator{ db: db, + cur: 0, + keys: keys, start: start, end: end, } } -func (it *memDBIterator) Domain() ([]byte, []byte) { - return it.start, it.end +func (itr *memDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end } -func (it *memDBIterator) Valid() bool { - return 0 <= it.cur && it.cur < len(it.keys) +func (itr *memDBIterator) Valid() bool { + return 0 <= itr.cur && itr.cur < len(itr.keys) } -func (it *memDBIterator) Next() { - if !it.Valid() { - panic("memDBIterator Next() called when invalid") - } - it.cur++ +func (itr *memDBIterator) Next() { + itr.assertIsValid() + itr.cur++ } -func (it *memDBIterator) Prev() { - if !it.Valid() { - panic("memDBIterator Next() called when invalid") - } - it.cur-- +func (itr *memDBIterator) Key() []byte { + itr.assertIsValid() + return []byte(itr.keys[itr.cur]) } -func (it *memDBIterator) Key() []byte { - if !it.Valid() { - panic("memDBIterator Key() called when invalid") - } - return []byte(it.keys[it.cur]) +func (itr *memDBIterator) Value() []byte { + itr.assertIsValid() + key := []byte(itr.keys[itr.cur]) + return itr.db.Get(key) } -func (it *memDBIterator) Value() []byte { - if !it.Valid() { - panic("memDBIterator Value() called when invalid") - } - return it.db.Get(it.Key()) +func (itr *memDBIterator) Close() { + itr.keys = nil + itr.db = nil } -func (it *memDBIterator) Close() { - it.db = nil - it.keys = nil +func (itr *memDBIterator) assertIsValid() { + if !itr.Valid() { + panic("memDBIterator is invalid") + } } diff --git a/db/mem_db_test.go b/db/mem_db_test.go deleted file mode 100644 index a08a3679b..000000000 --- a/db/mem_db_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package db - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestMemDBIterator(t *testing.T) { - db := NewMemDB() - keys := make([][]byte, 100) - for i := 0; i < 100; i++ { - keys[i] = []byte{byte(i)} - } - - value := []byte{5} - for _, k := range keys { - db.Set(k, value) - } - - iter := db.Iterator(BeginningKey(), EndingKey()) - i := 0 - for ; iter.Valid(); iter.Next() { - assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") - i += 1 - } - assert.Equal(t, i, len(db.db), "iterator didnt cover whole db") -} - -func TestMemDBClose(t *testing.T) { - db := NewMemDB() - copyDB := func(orig map[string][]byte) map[string][]byte { - copy := make(map[string][]byte) - for k, v := range orig { - copy[k] = v - } - return copy - } - k, v := []byte("foo"), []byte("bar") - db.Set(k, v) - require.Equal(t, db.Get(k), v, "expecting a successful get") - copyBefore := copyDB(db.db) - db.Close() - require.Equal(t, db.Get(k), v, "Close is a noop, expecting a successful get") - copyAfter := copyDB(db.db) - require.Equal(t, copyBefore, copyAfter, "Close is a noop and shouldn't modify any internal data") -} diff --git a/db/types.go b/db/types.go index ee8d69cc1..6e5d2408d 100644 --- a/db/types.go +++ b/db/types.go @@ -2,31 +2,39 @@ package db type DB interface { - // Get returns nil iff key doesn't exist. Panics on nil key. + // Get returns nil iff key doesn't exist. + // A nil key is interpreted as an empty byteslice. Get([]byte) []byte - // Has checks if a key exists. Panics on nil key. + // Has checks if a key exists. + // A nil key is interpreted as an empty byteslice. Has(key []byte) bool - // Set sets the key. Panics on nil key. + // Set sets the key. + // A nil key is interpreted as an empty byteslice. Set([]byte, []byte) SetSync([]byte, []byte) - // Delete deletes the key. Panics on nil key. + // Delete deletes the key. + // A nil key is interpreted as an empty byteslice. Delete([]byte) DeleteSync([]byte) - // Iterator over a domain of keys in ascending order. End is exclusive. + // Iterate over a domain of keys in ascending order. End is exclusive. // Start must be less than end, or the Iterator is invalid. + // A nil start is interpreted as an empty byteslice. + // If end is nil, iterates up to the last item (inclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. Iterator(start, end []byte) Iterator - // Iterator over a domain of keys in descending order. End is exclusive. + // Iterate over a domain of keys in descending order. End is exclusive. // Start must be greater than end, or the Iterator is invalid. + // If start is nil, iterates from the last/greatest item (inclusive). + // If end is nil, iterates up to the first/least item (iclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. ReverseIterator(start, end []byte) Iterator - // Releases the connection. + // Closes the connection. Close() // Creates a batch for atomic updates. @@ -54,16 +62,6 @@ type SetDeleter interface { //---------------------------------------- -// BeginningKey is the smallest key. -func BeginningKey() []byte { - return []byte{} -} - -// EndingKey is the largest key. -func EndingKey() []byte { - return nil -} - /* Usage: @@ -107,7 +105,7 @@ type Iterator interface { // If Valid returns false, this method will panic. Value() []byte - // Release deallocates the given Iterator. + // Close releases the Iterator. Close() } @@ -116,9 +114,12 @@ func bz(s string) []byte { return []byte(s) } -// All DB funcs should panic on nil key. -func panicNilKey(key []byte) { - if key == nil { - panic("nil key") +// We defensively turn nil keys or values into []byte{} for +// most operations. +func nonNilBytes(bz []byte) []byte { + if bz == nil { + return []byte{} + } else { + return bz } } diff --git a/db/util.go b/db/util.go index 661d0a16f..b0ab7f6ad 100644 --- a/db/util.go +++ b/db/util.go @@ -7,8 +7,8 @@ import ( func IteratePrefix(db DB, prefix []byte) Iterator { var start, end []byte if len(prefix) == 0 { - start = BeginningKey() - end = EndingKey() + start = nil + end = nil } else { start = cp(prefix) end = cpIncr(prefix) @@ -35,11 +35,26 @@ func cpIncr(bz []byte) (ret []byte) { ret[i] = byte(0x00) } } - return EndingKey() + return nil } -func IsKeyInDomain(key, start, end []byte) bool { - leftCondition := bytes.Equal(start, BeginningKey()) || bytes.Compare(key, start) >= 0 - rightCondition := bytes.Equal(end, EndingKey()) || bytes.Compare(key, end) < 0 - return leftCondition && rightCondition +// See DB interface documentation for more information. +func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { + if !isReverse { + if bytes.Compare(key, start) < 0 { + return false + } + if end != nil && bytes.Compare(end, key) <= 0 { + return false + } + return true + } else { + if start != nil && bytes.Compare(start, key) < 0 { + return false + } + if end != nil && bytes.Compare(key, end) <= 0 { + return false + } + return true + } } diff --git a/db/util_test.go b/db/util_test.go index b273f8d46..854448af3 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -// empty iterator for empty db +// Empty iterator for empty db. func TestPrefixIteratorNoMatchNil(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -17,7 +17,7 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { } } -// empty iterator for db populated after iterator created +// Empty iterator for db populated after iterator created. func TestPrefixIteratorNoMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -30,7 +30,7 @@ func TestPrefixIteratorNoMatch1(t *testing.T) { } } -// empty iterator for prefix starting above db entry +// Empty iterator for prefix starting after db entry. func TestPrefixIteratorNoMatch2(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -38,13 +38,12 @@ func TestPrefixIteratorNoMatch2(t *testing.T) { db.SetSync(bz("3"), bz("value_3")) itr := IteratePrefix(db, []byte("4")) - // Once invalid... checkInvalid(t, itr) }) } } -// iterator with single val for db with single val, starting from that val +// Iterator with single val for db with single val, starting from that val. func TestPrefixIteratorMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -62,7 +61,7 @@ func TestPrefixIteratorMatch1(t *testing.T) { } } -// iterator with prefix iterates over everything with same prefix +// Iterator with prefix iterates over everything with same prefix. func TestPrefixIteratorMatches1N(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { From 70e30f74e60b2710c3c270178b0be2c4c7319722 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 19 Dec 2017 16:16:16 -0600 Subject: [PATCH 321/515] Revert "Refactor repeat timer" --- common/repeat_timer.go | 116 +++++++++++++++------------------- common/repeat_timer_test.go | 12 ++-- common/throttle_timer.go | 24 ++++--- common/throttle_timer_test.go | 2 - 4 files changed, 67 insertions(+), 87 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 23faf74ae..d7d9154d4 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -1,6 +1,7 @@ package common import ( + "sync" "time" ) @@ -10,93 +11,76 @@ It's good for keeping connections alive. A RepeatTimer must be Stop()'d or it will keep a goroutine alive. */ type RepeatTimer struct { - Name string - Ch <-chan time.Time - output chan<- time.Time - input chan repeatCommand + Ch chan time.Time - dur time.Duration - ticker *time.Ticker - stopped bool + mtx sync.Mutex + name string + ticker *time.Ticker + quit chan struct{} + wg *sync.WaitGroup + dur time.Duration } -type repeatCommand int8 - -const ( - Reset repeatCommand = iota - RQuit -) - func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { - c := make(chan time.Time) var t = &RepeatTimer{ - Name: name, - Ch: c, - output: c, - input: make(chan repeatCommand), - - dur: dur, + Ch: make(chan time.Time), ticker: time.NewTicker(dur), + quit: make(chan struct{}), + wg: new(sync.WaitGroup), + name: name, + dur: dur, } - go t.run() + t.wg.Add(1) + go t.fireRoutine(t.ticker) return t } +func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { + for { + select { + case t_ := <-ticker.C: + t.Ch <- t_ + case <-t.quit: + // needed so we know when we can reset t.quit + t.wg.Done() + return + } + } +} + // Wait the duration again before firing. func (t *RepeatTimer) Reset() { - t.input <- Reset + t.Stop() + + t.mtx.Lock() // Lock + defer t.mtx.Unlock() + + t.ticker = time.NewTicker(t.dur) + t.quit = make(chan struct{}) + t.wg.Add(1) + go t.fireRoutine(t.ticker) } // For ease of .Stop()'ing services before .Start()'ing them, // we ignore .Stop()'s on nil RepeatTimers. func (t *RepeatTimer) Stop() bool { - // use t.stopped to gracefully handle many Stop() without blocking - if t == nil || t.stopped { + if t == nil { return false } - t.input <- RQuit - t.stopped = true - return true -} + t.mtx.Lock() // Lock + defer t.mtx.Unlock() -func (t *RepeatTimer) run() { - done := false - for !done { + exists := t.ticker != nil + if exists { + t.ticker.Stop() // does not close the channel select { - case cmd := <-t.input: - // stop goroutine if the input says so - // don't close channels, as closed channels mess up select reads - done = t.processInput(cmd) - case tick := <-t.ticker.C: - t.send(tick) + case <-t.Ch: + // read off channel if there's anything there + default: } + close(t.quit) + t.wg.Wait() // must wait for quit to close else we race Reset + t.ticker = nil } -} - -// send performs blocking send on t.Ch -func (t *RepeatTimer) send(tick time.Time) { - // XXX: possibly it is better to not block: - // https://golang.org/src/time/sleep.go#L132 - // select { - // case t.output <- tick: - // default: - // } - t.output <- tick -} - -// all modifications of the internal state of ThrottleTimer -// happen in this method. It is only called from the run goroutine -// so we avoid any race conditions -func (t *RepeatTimer) processInput(cmd repeatCommand) (shutdown bool) { - switch cmd { - case Reset: - t.ticker.Stop() - t.ticker = time.NewTicker(t.dur) - case RQuit: - t.ticker.Stop() - shutdown = true - default: - panic("unknown command!") - } - return shutdown + return exists } diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index db53aa614..87f34b950 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -10,7 +10,7 @@ import ( ) type rCounter struct { - input <-chan time.Time + input chan time.Time mtx sync.Mutex count int } @@ -39,11 +39,11 @@ func (c *rCounter) Read() { func TestRepeat(test *testing.T) { assert := asrt.New(test) - dur := time.Duration(100) * time.Millisecond + dur := time.Duration(50) * time.Millisecond short := time.Duration(20) * time.Millisecond // delay waits for cnt durations, an a little extra delay := func(cnt int) time.Duration { - return time.Duration(cnt)*dur + time.Duration(10)*time.Millisecond + return time.Duration(cnt)*dur + time.Duration(5)*time.Millisecond } t := NewRepeatTimer("bar", dur) @@ -70,9 +70,9 @@ func TestRepeat(test *testing.T) { // after a stop, nothing more is sent stopped := t.Stop() assert.True(stopped) - time.Sleep(delay(2)) + time.Sleep(delay(7)) assert.Equal(6, c.Count()) - // extra calls to stop don't block - t.Stop() + // close channel to stop counter + close(t.Ch) } diff --git a/common/throttle_timer.go b/common/throttle_timer.go index a5bd6ded8..ab2ad2e62 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -13,21 +13,20 @@ at most once every "dur". type ThrottleTimer struct { Name string Ch <-chan struct{} - input chan throttleCommand + input chan command output chan<- struct{} dur time.Duration - timer *time.Timer - isSet bool - stopped bool + timer *time.Timer + isSet bool } -type throttleCommand int8 +type command int32 const ( - Set throttleCommand = iota + Set command = iota Unset - TQuit + Quit ) // NewThrottleTimer creates a new ThrottleTimer. @@ -37,7 +36,7 @@ func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { Name: name, Ch: c, dur: dur, - input: make(chan throttleCommand), + input: make(chan command), output: c, timer: time.NewTimer(dur), } @@ -75,14 +74,14 @@ func (t *ThrottleTimer) trySend() { // all modifications of the internal state of ThrottleTimer // happen in this method. It is only called from the run goroutine // so we avoid any race conditions -func (t *ThrottleTimer) processInput(cmd throttleCommand) (shutdown bool) { +func (t *ThrottleTimer) processInput(cmd command) (shutdown bool) { switch cmd { case Set: if !t.isSet { t.isSet = true t.timer.Reset(t.dur) } - case TQuit: + case Quit: shutdown = true fallthrough case Unset: @@ -120,10 +119,9 @@ func (t *ThrottleTimer) Unset() { // For ease of stopping services before starting them, we ignore Stop on nil // ThrottleTimers. func (t *ThrottleTimer) Stop() bool { - if t == nil || t.stopped { + if t == nil { return false } - t.input <- TQuit - t.stopped = true + t.input <- Quit return true } diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index 94ec1b43c..a1b6606f5 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -95,6 +95,4 @@ func TestThrottle(test *testing.T) { stopped := t.Stop() assert.True(stopped) - // extra calls to stop don't block - t.Stop() } From e17e8e425f43890b207e5e316f5190d278e849c3 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 19 Dec 2017 16:23:20 -0600 Subject: [PATCH 322/515] Revert "Refactor throttle timer" --- common/repeat_timer_test.go | 2 +- common/throttle_timer.go | 120 ++++++++++------------------------ common/throttle_timer_test.go | 24 +------ 3 files changed, 37 insertions(+), 109 deletions(-) diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 87f34b950..9f03f41df 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -43,7 +43,7 @@ func TestRepeat(test *testing.T) { short := time.Duration(20) * time.Millisecond // delay waits for cnt durations, an a little extra delay := func(cnt int) time.Duration { - return time.Duration(cnt)*dur + time.Duration(5)*time.Millisecond + return time.Duration(cnt)*dur + time.Millisecond } t := NewRepeatTimer("bar", dur) diff --git a/common/throttle_timer.go b/common/throttle_timer.go index ab2ad2e62..38ef4e9a3 100644 --- a/common/throttle_timer.go +++ b/common/throttle_timer.go @@ -1,6 +1,7 @@ package common import ( + "sync" "time" ) @@ -11,117 +12,64 @@ If a long continuous burst of .Set() calls happens, ThrottleTimer fires at most once every "dur". */ type ThrottleTimer struct { - Name string - Ch <-chan struct{} - input chan command - output chan<- struct{} - dur time.Duration + Name string + Ch chan struct{} + quit chan struct{} + dur time.Duration + mtx sync.Mutex timer *time.Timer isSet bool } -type command int32 - -const ( - Set command = iota - Unset - Quit -) - -// NewThrottleTimer creates a new ThrottleTimer. func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { - c := make(chan struct{}) - var t = &ThrottleTimer{ - Name: name, - Ch: c, - dur: dur, - input: make(chan command), - output: c, - timer: time.NewTimer(dur), - } + var ch = make(chan struct{}) + var quit = make(chan struct{}) + var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} + t.mtx.Lock() + t.timer = time.AfterFunc(dur, t.fireRoutine) + t.mtx.Unlock() t.timer.Stop() - go t.run() return t } -func (t *ThrottleTimer) run() { - for { - select { - case cmd := <-t.input: - // stop goroutine if the input says so - // don't close channels, as closed channels mess up select reads - if t.processInput(cmd) { - return - } - case <-t.timer.C: - t.trySend() - } - } -} - -// trySend performs non-blocking send on t.Ch -func (t *ThrottleTimer) trySend() { +func (t *ThrottleTimer) fireRoutine() { + t.mtx.Lock() + defer t.mtx.Unlock() select { - case t.output <- struct{}{}: + case t.Ch <- struct{}{}: t.isSet = false + case <-t.quit: + // do nothing default: - // if we just want to drop, replace this with t.isSet = false t.timer.Reset(t.dur) } } -// all modifications of the internal state of ThrottleTimer -// happen in this method. It is only called from the run goroutine -// so we avoid any race conditions -func (t *ThrottleTimer) processInput(cmd command) (shutdown bool) { - switch cmd { - case Set: - if !t.isSet { - t.isSet = true - t.timer.Reset(t.dur) - } - case Quit: - shutdown = true - fallthrough - case Unset: - if t.isSet { - t.isSet = false - t.timer.Stop() - } - default: - panic("unknown command!") - } - return shutdown -} - func (t *ThrottleTimer) Set() { - t.input <- Set + t.mtx.Lock() + defer t.mtx.Unlock() + if !t.isSet { + t.isSet = true + t.timer.Reset(t.dur) + } } func (t *ThrottleTimer) Unset() { - t.input <- Unset + t.mtx.Lock() + defer t.mtx.Unlock() + t.isSet = false + t.timer.Stop() } -// Stop prevents the ThrottleTimer from firing. It always returns true. Stop does not -// close the channel, to prevent a read from the channel succeeding -// incorrectly. -// -// To prevent a timer created with NewThrottleTimer from firing after a call to -// Stop, check the return value and drain the channel. -// -// For example, assuming the program has not received from t.C already: -// -// if !t.Stop() { -// <-t.C -// } -// -// For ease of stopping services before starting them, we ignore Stop on nil -// ThrottleTimers. +// For ease of .Stop()'ing services before .Start()'ing them, +// we ignore .Stop()'s on nil ThrottleTimers func (t *ThrottleTimer) Stop() bool { if t == nil { return false } - t.input <- Quit - return true + close(t.quit) + t.mtx.Lock() + defer t.mtx.Unlock() + return t.timer.Stop() } diff --git a/common/throttle_timer_test.go b/common/throttle_timer_test.go index a1b6606f5..00f5abdec 100644 --- a/common/throttle_timer_test.go +++ b/common/throttle_timer_test.go @@ -10,7 +10,7 @@ import ( ) type thCounter struct { - input <-chan struct{} + input chan struct{} mtx sync.Mutex count int } @@ -31,9 +31,6 @@ func (c *thCounter) Count() int { // Read should run in a go-routine and // updates count by one every time a packet comes in func (c *thCounter) Read() { - // note, since this channel never closes, this will never end - // if thCounter was used in anything beyond trivial test cases. - // it would have to be smarter. for range c.input { c.Increment() } @@ -44,7 +41,6 @@ func TestThrottle(test *testing.T) { ms := 50 delay := time.Duration(ms) * time.Millisecond - shortwait := time.Duration(ms/2) * time.Millisecond longwait := time.Duration(2) * delay t := NewThrottleTimer("foo", delay) @@ -69,21 +65,6 @@ func TestThrottle(test *testing.T) { time.Sleep(longwait) assert.Equal(2, c.Count()) - // keep cancelling before it is ready - for i := 0; i < 10; i++ { - t.Set() - time.Sleep(shortwait) - t.Unset() - } - time.Sleep(longwait) - assert.Equal(2, c.Count()) - - // a few unsets do nothing... - for i := 0; i < 5; i++ { - t.Unset() - } - assert.Equal(2, c.Count()) - // send 12, over 2 delay sections, adds 3 short := time.Duration(ms/5) * time.Millisecond for i := 0; i < 13; i++ { @@ -93,6 +74,5 @@ func TestThrottle(test *testing.T) { time.Sleep(longwait) assert.Equal(5, c.Count()) - stopped := t.Stop() - assert.True(stopped) + close(t.Ch) } From ca56a274bda79ca6367eac339e837c8042385c1e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 19 Dec 2017 20:33:34 -0800 Subject: [PATCH 323/515] Nil bytes are OK for Get/Set etc And s/Release/Close/g --- db/backend_test.go | 15 ++++++++------- db/c_level_db.go | 18 +++++++++--------- db/mem_db.go | 4 ++-- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index e103843dc..0f4346f2e 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -21,27 +21,27 @@ func testBackendGetSetDelete(t *testing.T, backend string) { defer dir.Close() db := NewDB("testdb", backend, dirname) - // A nonexistent key should return nil, even if the key is empty. + // A nonexistent key should return nil, even if the key is empty require.Nil(t, db.Get([]byte(""))) - // A nonexistent key should return nil, even if the key is nil. + // A nonexistent key should return nil, even if the key is nil require.Nil(t, db.Get(nil)) // A nonexistent key should return nil. key := []byte("abc") require.Nil(t, db.Get(key)) - // Set empty ("") + // Set empty value. db.Set(key, []byte("")) require.NotNil(t, db.Get(key)) require.Empty(t, db.Get(key)) - // Set empty (nil) + // Set nil value. db.Set(key, nil) require.NotNil(t, db.Get(key)) require.Empty(t, db.Get(key)) - // Delete + // Delete. db.Delete(key) require.Nil(t, db.Get(key)) } @@ -62,12 +62,13 @@ func withDB(t *testing.T, creator dbCreator, fn func(DB)) { } func TestBackendsNilKeys(t *testing.T) { - // test all backends. - // nil keys are treated as the empty key for most operations. + + // Test all backends. for dbType, creator := range backends { withDB(t, creator, func(db DB) { t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) { + // Nil keys are treated as the empty key for most operations. expect := func(key, value []byte) { if len(key) == 0 { // nil or empty assert.Equal(t, db.Get(nil), db.Get([]byte(""))) diff --git a/db/c_level_db.go b/db/c_level_db.go index c9f8d419b..7910628bf 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -199,12 +199,12 @@ func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse b } } -func (itr *cLevelDBIterator) Domain() ([]byte, []byte) { +func (itr cLevelDBIterator) Domain() ([]byte, []byte) { return itr.start, itr.end } -func (itr *cLevelDBIterator) Valid() bool { - +func (itr cLevelDBIterator) Valid() bool { + // Once invalid, forever invalid. if itr.isInvalid { return false @@ -227,33 +227,33 @@ func (itr *cLevelDBIterator) Valid() bool { return false } - // Valid + // It's valid. return true } -func (itr *cLevelDBIterator) Key() []byte { +func (itr cLevelDBIterator) Key() []byte { itr.assertNoError() itr.assertIsValid() return itr.source.Key() } -func (itr *cLevelDBIterator) Value() []byte { +func (itr cLevelDBIterator) Value() []byte { itr.assertNoError() itr.assertIsValid() return itr.source.Value() } -func (itr *cLevelDBIterator) Next() { +func (itr cLevelDBIterator) Next() { itr.assertNoError() itr.assertIsValid() itr.source.Next() } -func (itr *cLevelDBIterator) Close() { +func (itr cLevelDBIterator) Close() { itr.source.Close() } -func (itr *cLevelDBIterator) assertNoError() { +func (itr cLevelDBIterator) assertNoError() { if err := itr.source.GetError(); err != nil { panic(err) } diff --git a/db/mem_db.go b/db/mem_db.go index e9d9174dc..e2470d7f2 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -30,7 +30,7 @@ func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() key = nonNilBytes(key) - + return db.db[string(key)] } @@ -215,4 +215,4 @@ func (itr *memDBIterator) assertIsValid() { if !itr.Valid() { panic("memDBIterator is invalid") } -} +} \ No newline at end of file From b70ae4919befb6ae3e5cb40ae8174e122e771d08 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 19 Dec 2017 20:47:22 -0800 Subject: [PATCH 324/515] Update glide file --- glide.lock | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/glide.lock b/glide.lock index e87782d21..f541f98e3 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-12-17T12:50:35.983353926-08:00 +updated: 2017-12-19T20:38:52.947516911-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e3b2152e0063c5f05efea89ecbe297852af2a92d + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -18,7 +18,7 @@ imports: - name: github.com/go-playground/universal-translator version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl @@ -39,19 +39,21 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a - name: github.com/mattn/go-colorable version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 - name: github.com/mattn/go-isatty version: a5cdd64afdee435007ee3e9f6ed4684af949d568 - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a +- name: github.com/pelletier/go-buffruneio + version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd + version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536 + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast @@ -61,11 +63,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 + version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: adf24ef3f94bd13ec4163060b21a5678f22b429b + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -80,7 +82,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b6fc872b42d41158a60307db4da051dd6f179415 + version: 27be46e25124ddf775e23317a83647ce62a93f6b subpackages: - data - data/base58 @@ -89,22 +91,22 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840 + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 subpackages: - unix - name: golang.org/x/text - version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm - name: gopkg.in/go-playground/validator.v9 - version: 61caf9d3038e1af346dbf5c2e16f6678e1548364 + version: 1304298bf10d085adec514b076772a79c9cadb6b - name: gopkg.in/yaml.v2 - version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: - name: github.com/davecgh/go-spew version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 From a25ed5ba1b0124f82f77b722cf3225cf4b3f18f5 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 21 Dec 2017 10:02:25 -0500 Subject: [PATCH 325/515] cmn: fix race condition in prng --- common/random.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/random.go b/common/random.go index 9df55ff81..ca71b6143 100644 --- a/common/random.go +++ b/common/random.go @@ -40,7 +40,7 @@ func RandStr(length int) string { chars := []byte{} MAIN_LOOP: for { - val := prng.Int63() + val := RandInt63() for i := 0; i < 10; i++ { v := int(val & 0x3f) // rightmost 6 bits if v >= 62 { // only 62 characters in strChars From b0b740210c60b7fc789382ff3a709426eb71903d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 21 Dec 2017 11:15:17 -0500 Subject: [PATCH 326/515] cmn: fix repeate timer test with manual ticker --- common/repeat_timer.go | 86 +++++++++++++++++++++++++++++++++---- common/repeat_timer_test.go | 81 ++++++++++++++++------------------ 2 files changed, 114 insertions(+), 53 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index d7d9154d4..1500e95d1 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -5,6 +5,72 @@ import ( "time" ) +// Ticker is a basic ticker interface. +type Ticker interface { + Chan() <-chan time.Time + Stop() + Reset() +} + +// DefaultTicker wraps the stdlibs Ticker implementation. +type DefaultTicker struct { + t *time.Ticker + dur time.Duration +} + +// NewDefaultTicker returns a new DefaultTicker +func NewDefaultTicker(dur time.Duration) *DefaultTicker { + return &DefaultTicker{ + time.NewTicker(dur), + dur, + } +} + +// Implements Ticker +func (t *DefaultTicker) Chan() <-chan time.Time { + return t.t.C +} + +// Implements Ticker +func (t *DefaultTicker) Stop() { + t.t.Stop() + t.t = nil +} + +// Implements Ticker +func (t *DefaultTicker) Reset() { + t.t = time.NewTicker(t.dur) +} + +// ManualTicker wraps a channel that can be manually sent on +type ManualTicker struct { + ch chan time.Time +} + +// NewManualTicker returns a new ManualTicker +func NewManualTicker(ch chan time.Time) *ManualTicker { + return &ManualTicker{ + ch: ch, + } +} + +// Implements Ticker +func (t *ManualTicker) Chan() <-chan time.Time { + return t.ch +} + +// Implements Ticker +func (t *ManualTicker) Stop() { + // noop +} + +// Implements Ticker +func (t *ManualTicker) Reset() { + // noop +} + +//--------------------------------------------------------------------- + /* RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period. It's good for keeping connections alive. @@ -15,30 +81,35 @@ type RepeatTimer struct { mtx sync.Mutex name string - ticker *time.Ticker + ticker Ticker quit chan struct{} wg *sync.WaitGroup - dur time.Duration } +// NewRepeatTimer returns a RepeatTimer with the DefaultTicker. func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { + ticker := NewDefaultTicker(dur) + return NewRepeatTimerWithTicker(name, ticker) +} + +// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker. +func NewRepeatTimerWithTicker(name string, ticker Ticker) *RepeatTimer { var t = &RepeatTimer{ Ch: make(chan time.Time), - ticker: time.NewTicker(dur), + ticker: ticker, quit: make(chan struct{}), wg: new(sync.WaitGroup), name: name, - dur: dur, } t.wg.Add(1) go t.fireRoutine(t.ticker) return t } -func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { +func (t *RepeatTimer) fireRoutine(ticker Ticker) { for { select { - case t_ := <-ticker.C: + case t_ := <-ticker.Chan(): t.Ch <- t_ case <-t.quit: // needed so we know when we can reset t.quit @@ -55,7 +126,7 @@ func (t *RepeatTimer) Reset() { t.mtx.Lock() // Lock defer t.mtx.Unlock() - t.ticker = time.NewTicker(t.dur) + t.ticker.Reset() t.quit = make(chan struct{}) t.wg.Add(1) go t.fireRoutine(t.ticker) @@ -80,7 +151,6 @@ func (t *RepeatTimer) Stop() bool { } close(t.quit) t.wg.Wait() // must wait for quit to close else we race Reset - t.ticker = nil } return exists } diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 9f03f41df..98d991e9c 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -1,7 +1,6 @@ package common import ( - "sync" "testing" "time" @@ -9,69 +8,61 @@ import ( asrt "github.com/stretchr/testify/assert" ) -type rCounter struct { - input chan time.Time - mtx sync.Mutex - count int -} - -func (c *rCounter) Increment() { - c.mtx.Lock() - c.count++ - c.mtx.Unlock() -} - -func (c *rCounter) Count() int { - c.mtx.Lock() - val := c.count - c.mtx.Unlock() - return val -} - -// Read should run in a go-routine and -// updates count by one every time a packet comes in -func (c *rCounter) Read() { - for range c.input { - c.Increment() - } -} - +// NOTE: this only tests with the ManualTicker. +// How do you test a real-clock ticker properly? func TestRepeat(test *testing.T) { assert := asrt.New(test) - dur := time.Duration(50) * time.Millisecond - short := time.Duration(20) * time.Millisecond - // delay waits for cnt durations, an a little extra - delay := func(cnt int) time.Duration { - return time.Duration(cnt)*dur + time.Millisecond + ch := make(chan time.Time, 100) + // tick fires cnt times on ch + tick := func(cnt int) { + for i := 0; i < cnt; i++ { + ch <- time.Now() + } } - t := NewRepeatTimer("bar", dur) + tock := func(test *testing.T, t *RepeatTimer, cnt int) { + for i := 0; i < cnt; i++ { + after := time.After(time.Second * 2) + select { + case <-t.Ch: + case <-after: + test.Fatal("expected ticker to fire") + } + } + done := true + select { + case <-t.Ch: + done = false + default: + } + assert.True(done) + } + + ticker := NewManualTicker(ch) + t := NewRepeatTimerWithTicker("bar", ticker) // start at 0 - c := &rCounter{input: t.Ch} - go c.Read() - assert.Equal(0, c.Count()) + tock(test, t, 0) // wait for 4 periods - time.Sleep(delay(4)) - assert.Equal(4, c.Count()) + tick(4) + tock(test, t, 4) // keep reseting leads to no firing for i := 0; i < 20; i++ { - time.Sleep(short) + time.Sleep(time.Millisecond) t.Reset() } - assert.Equal(4, c.Count()) + tock(test, t, 0) // after this, it still works normal - time.Sleep(delay(2)) - assert.Equal(6, c.Count()) + tick(2) + tock(test, t, 2) // after a stop, nothing more is sent stopped := t.Stop() assert.True(stopped) - time.Sleep(delay(7)) - assert.Equal(6, c.Count()) + tock(test, t, 0) // close channel to stop counter close(t.Ch) From e2d7f1aa41dde5f29057dd08e64371a574b84c86 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 21 Dec 2017 14:21:15 -0500 Subject: [PATCH 327/515] cmn: fix race --- common/repeat_timer.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 1500e95d1..0bc4d87b4 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -102,14 +102,14 @@ func NewRepeatTimerWithTicker(name string, ticker Ticker) *RepeatTimer { name: name, } t.wg.Add(1) - go t.fireRoutine(t.ticker) + go t.fireRoutine(t.ticker.Chan()) return t } -func (t *RepeatTimer) fireRoutine(ticker Ticker) { +func (t *RepeatTimer) fireRoutine(ch <-chan time.Time) { for { select { - case t_ := <-ticker.Chan(): + case t_ := <-ch: t.Ch <- t_ case <-t.quit: // needed so we know when we can reset t.quit @@ -129,7 +129,7 @@ func (t *RepeatTimer) Reset() { t.ticker.Reset() t.quit = make(chan struct{}) t.wg.Add(1) - go t.fireRoutine(t.ticker) + go t.fireRoutine(t.ticker.Chan()) } // For ease of .Stop()'ing services before .Start()'ing them, From f2a8e95248ca42cf29885a44bc1362cf78d364fe Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 11:40:05 -0800 Subject: [PATCH 328/515] Add KI64Pair(s) --- common/kvpair.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/common/kvpair.go b/common/kvpair.go index b9e45733f..5faa534df 100644 --- a/common/kvpair.go +++ b/common/kvpair.go @@ -5,6 +5,9 @@ import ( "sort" ) +//---------------------------------------- +// KVPair + type KVPair struct { Key Bytes Value Bytes @@ -28,3 +31,30 @@ func (kvs KVPairs) Less(i, j int) bool { } func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } func (kvs KVPairs) Sort() { sort.Sort(kvs) } + +//---------------------------------------- +// KI64Pair + +type KI64Pair struct { + Key Bytes + Value int64 +} + +type KI64Pairs []KI64Pair + +// Sorting +func (kvs KI64Pairs) Len() int { return len(kvs) } +func (kvs KI64Pairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return kvs[i].Value < kvs[j].Value + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KI64Pairs) Sort() { sort.Sort(kvs) } From 797bcdd9e05f81a7e5c052384bc83ac71938753b Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 17:46:21 -0800 Subject: [PATCH 329/515] Remove common/http --- common/http.go | 153 --------------------------- common/http_test.go | 250 -------------------------------------------- glide.lock | 28 ++--- glide.yaml | 1 - 4 files changed, 10 insertions(+), 422 deletions(-) delete mode 100644 common/http.go delete mode 100644 common/http_test.go diff --git a/common/http.go b/common/http.go deleted file mode 100644 index 56b5b6c63..000000000 --- a/common/http.go +++ /dev/null @@ -1,153 +0,0 @@ -package common - -import ( - "encoding/json" - "io" - "net/http" - - "gopkg.in/go-playground/validator.v9" - - "github.com/pkg/errors" -) - -type ErrorResponse struct { - Success bool `json:"success,omitempty"` - - // Err is the error message if Success is false - Err string `json:"error,omitempty"` - - // Code is set if Success is false - Code int `json:"code,omitempty"` -} - -// ErrorWithCode makes an ErrorResponse with the -// provided err's Error() content, and status code. -// It panics if err is nil. -func ErrorWithCode(err error, code int) *ErrorResponse { - return &ErrorResponse{ - Err: err.Error(), - Code: code, - } -} - -// Ensure that ErrorResponse implements error -var _ error = (*ErrorResponse)(nil) - -func (er *ErrorResponse) Error() string { - return er.Err -} - -// Ensure that ErrorResponse implements httpCoder -var _ httpCoder = (*ErrorResponse)(nil) - -func (er *ErrorResponse) HTTPCode() int { - return er.Code -} - -var errNilBody = errors.Errorf("expecting a non-nil body") - -// FparseJSON unmarshals into save, the body of the provided reader. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func FparseJSON(r io.Reader, save interface{}) error { - if r == nil { - return errors.Wrap(errNilBody, "Reader") - } - - dec := json.NewDecoder(r) - if err := dec.Decode(save); err != nil { - return errors.Wrap(err, "Decode/Unmarshal") - } - return nil -} - -// ParseRequestJSON unmarshals into save, the body of the -// request. It closes the body of the request after parsing. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func ParseRequestJSON(r *http.Request, save interface{}) error { - if r == nil || r.Body == nil { - return errNilBody - } - defer r.Body.Close() - - return FparseJSON(r.Body, save) -} - -// ParseRequestAndValidateJSON unmarshals into save, the body of the -// request and invokes a validator on the saved content. To ensure -// validation, make sure to set tags "validate" on your struct as -// per https://godoc.org/gopkg.in/go-playground/validator.v9. -// It closes the body of the request after parsing. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func ParseRequestAndValidateJSON(r *http.Request, save interface{}) error { - if r == nil || r.Body == nil { - return errNilBody - } - defer r.Body.Close() - - return FparseAndValidateJSON(r.Body, save) -} - -// FparseAndValidateJSON like FparseJSON unmarshals into save, -// the body of the provided reader. However, it invokes the validator -// to check the set validators on your struct fields as per -// per https://godoc.org/gopkg.in/go-playground/validator.v9. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func FparseAndValidateJSON(r io.Reader, save interface{}) error { - if err := FparseJSON(r, save); err != nil { - return err - } - return validate(save) -} - -var theValidator = validator.New() - -func validate(obj interface{}) error { - return errors.Wrap(theValidator.Struct(obj), "Validate") -} - -// WriteSuccess JSON marshals the content provided, to an HTTP -// response, setting the provided status code and setting header -// "Content-Type" to "application/json". -func WriteSuccess(w http.ResponseWriter, data interface{}) { - WriteCode(w, data, 200) -} - -// WriteCode JSON marshals content, to an HTTP response, -// setting the provided status code, and setting header -// "Content-Type" to "application/json". If JSON marshalling fails -// with an error, WriteCode instead writes out the error invoking -// WriteError. -func WriteCode(w http.ResponseWriter, out interface{}, code int) { - blob, err := json.MarshalIndent(out, "", " ") - if err != nil { - WriteError(w, err) - } else { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - w.Write(blob) - } -} - -type httpCoder interface { - HTTPCode() int -} - -// WriteError is a convenience function to write out an -// error to an http.ResponseWriter, to send out an error -// that's structured as JSON i.e the form -// {"error": sss, "code": ddd} -// If err implements the interface HTTPCode() int, -// it will use that status code otherwise, it will -// set code to be http.StatusBadRequest -func WriteError(w http.ResponseWriter, err error) { - code := http.StatusBadRequest - if httpC, ok := err.(httpCoder); ok { - code = httpC.HTTPCode() - } - - WriteCode(w, ErrorWithCode(err, code), code) -} diff --git a/common/http_test.go b/common/http_test.go deleted file mode 100644 index 4272f6062..000000000 --- a/common/http_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package common_test - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tmlibs/common" -) - -func TestWriteSuccess(t *testing.T) { - w := httptest.NewRecorder() - common.WriteSuccess(w, "foo") - assert.Equal(t, w.Code, 200, "should get a 200") -} - -var blankErrResponse = new(common.ErrorResponse) - -func TestWriteError(t *testing.T) { - tests := [...]struct { - msg string - code int - }{ - 0: { - msg: "this is a message", - code: 419, - }, - } - - for i, tt := range tests { - w := httptest.NewRecorder() - msg := tt.msg - - // First check without a defined code, should send back a 400 - common.WriteError(w, errors.New(msg)) - assert.Equal(t, w.Code, http.StatusBadRequest, "#%d: should get a 400", i) - blob, err := ioutil.ReadAll(w.Body) - if err != nil { - assert.Fail(t, "expecting a successful ioutil.ReadAll", "#%d", i) - continue - } - - recv := new(common.ErrorResponse) - if err := json.Unmarshal(blob, recv); err != nil { - assert.Fail(t, "expecting a successful json.Unmarshal", "#%d", i) - continue - } - - assert.Equal(t, reflect.DeepEqual(recv, blankErrResponse), false, "expecting a non-blank error response") - - // Now test with an error that's .HTTPCode() int conforming - - // Reset w - w = httptest.NewRecorder() - - common.WriteError(w, common.ErrorWithCode(errors.New("foo"), tt.code)) - assert.Equal(t, w.Code, tt.code, "case #%d", i) - } -} - -type marshalFailer struct{} - -var errFooFailed = errors.New("foo failed here") - -func (mf *marshalFailer) MarshalJSON() ([]byte, error) { - return nil, errFooFailed -} - -func TestWriteCode(t *testing.T) { - codes := [...]int{ - 0: http.StatusOK, - 1: http.StatusBadRequest, - 2: http.StatusUnauthorized, - 3: http.StatusInternalServerError, - } - - for i, code := range codes { - w := httptest.NewRecorder() - common.WriteCode(w, "foo", code) - assert.Equal(t, w.Code, code, "#%d", i) - - // Then for the failed JSON marshaling - w = httptest.NewRecorder() - common.WriteCode(w, &marshalFailer{}, code) - wantCode := http.StatusBadRequest - assert.Equal(t, w.Code, wantCode, "#%d", i) - assert.True(t, strings.Contains(w.Body.String(), errFooFailed.Error()), - "#%d: expected %q in the error message", i, errFooFailed) - } -} - -type saver struct { - Foo int `json:"foo" validate:"min=10"` - Bar string `json:"bar"` -} - -type rcloser struct { - closeOnce sync.Once - body *bytes.Buffer - closeChan chan bool -} - -var errAlreadyClosed = errors.New("already closed") - -func (rc *rcloser) Close() error { - var err = errAlreadyClosed - rc.closeOnce.Do(func() { - err = nil - rc.closeChan <- true - close(rc.closeChan) - }) - return err -} - -func (rc *rcloser) Read(b []byte) (int, error) { - return rc.body.Read(b) -} - -var _ io.ReadCloser = (*rcloser)(nil) - -func makeReq(strBody string) (*http.Request, <-chan bool) { - closeChan := make(chan bool, 1) - buf := new(bytes.Buffer) - buf.Write([]byte(strBody)) - req := &http.Request{ - Header: make(http.Header), - Body: &rcloser{body: buf, closeChan: closeChan}, - } - return req, closeChan -} - -func TestParseRequestJSON(t *testing.T) { - tests := [...]struct { - body string - wantErr bool - useNil bool - }{ - 0: {wantErr: true, body: ``}, - 1: {body: `{}`}, - 2: {body: `{"foo": 2}`}, // Not that the validate tags don't matter here since we are just parsing - 3: {body: `{"foo": "abcd"}`, wantErr: true}, - 4: {useNil: true, wantErr: true}, - } - - for i, tt := range tests { - req, closeChan := makeReq(tt.body) - if tt.useNil { - req.Body = nil - } - sav := new(saver) - err := common.ParseRequestJSON(req, sav) - if tt.wantErr { - assert.NotEqual(t, err, nil, "#%d: want non-nil error", i) - continue - } - assert.Equal(t, err, nil, "#%d: want nil error", i) - wasClosed := <-closeChan - assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i) - } -} - -func TestFparseJSON(t *testing.T) { - r1 := strings.NewReader(`{"foo": 1}`) - sav := new(saver) - require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing") - r2 := strings.NewReader(`{"bar": "blockchain"}`) - require.Equal(t, common.FparseJSON(r2, sav), nil, "expecting successful parsing") - require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 1, Bar: "blockchain"}), true, "should have parsed both") - - // Now with a nil body - require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report") -} - -func TestFparseAndValidateJSON(t *testing.T) { - r1 := strings.NewReader(`{"foo": 1}`) - sav := new(saver) - require.NotEqual(t, common.FparseAndValidateJSON(r1, sav), nil, "expecting validation to fail") - r1 = strings.NewReader(`{"foo": 100}`) - require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing") - r2 := strings.NewReader(`{"bar": "blockchain"}`) - require.Equal(t, common.FparseAndValidateJSON(r2, sav), nil, "expecting successful parsing") - require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 100, Bar: "blockchain"}), true, "should have parsed both") - - // Now with a nil body - require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report") -} - -var blankSaver = new(saver) - -func TestParseAndValidateRequestJSON(t *testing.T) { - tests := [...]struct { - body string - wantErr bool - useNil bool - }{ - 0: {wantErr: true, body: ``}, - 1: {body: `{}`, wantErr: true}, // Here it should fail since Foo doesn't meet the minimum value - 2: {body: `{"foo": 2}`, wantErr: true}, // Here validation should fail - 3: {body: `{"foo": "abcd"}`, wantErr: true}, - 4: {useNil: true, wantErr: true}, - 5: {body: `{"foo": 100}`}, // Must succeed - } - - for i, tt := range tests { - req, closeChan := makeReq(tt.body) - if tt.useNil { - req.Body = nil - } - sav := new(saver) - err := common.ParseRequestAndValidateJSON(req, sav) - if tt.wantErr { - assert.NotEqual(t, err, nil, "#%d: want non-nil error", i) - continue - } - - assert.Equal(t, err, nil, "#%d: want nil error", i) - assert.False(t, reflect.DeepEqual(blankSaver, sav), "#%d: expecting a set saver", i) - - wasClosed := <-closeChan - assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i) - } -} - -func TestErrorWithCode(t *testing.T) { - tests := [...]struct { - code int - err error - }{ - 0: {code: 500, err: errors.New("funky")}, - 1: {code: 406, err: errors.New("purist")}, - } - - for i, tt := range tests { - errRes := common.ErrorWithCode(tt.err, tt.code) - assert.Equal(t, errRes.Error(), tt.err.Error(), "#%d: expecting the error values to be equal", i) - assert.Equal(t, errRes.Code, tt.code, "expecting the same status code", i) - assert.Equal(t, errRes.HTTPCode(), tt.code, "expecting the same status code", i) - } -} diff --git a/glide.lock b/glide.lock index f541f98e3..83c8551e0 100644 --- a/glide.lock +++ b/glide.lock @@ -1,24 +1,18 @@ -hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-12-19T20:38:52.947516911-08:00 +hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 +updated: 2017-12-25T17:45:52.357002873-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: e3b2152e0063c5f05efea89ecbe297852af2a92d subpackages: - log - log/level - log/term - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-playground/locales - version: e4cbcb5d0652150d40ad0646651076b6bd2be4f6 - subpackages: - - currency -- name: github.com/go-playground/universal-translator - version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl @@ -51,7 +45,7 @@ imports: - name: github.com/pelletier/go-toml version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d + version: f15c970de5b76fac0b59abb32d62c17cc7bed265 - name: github.com/spf13/afero version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: @@ -63,11 +57,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 + version: adf24ef3f94bd13ec4163060b21a5678f22b429b subpackages: - leveldb - leveldb/cache @@ -82,7 +76,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 27be46e25124ddf775e23317a83647ce62a93f6b + version: 5ab49b4c6ad674da6b81442911cf713ef0afb544 subpackages: - data - data/base58 @@ -91,7 +85,7 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 + version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 subpackages: - ripemd160 - name: golang.org/x/sys @@ -99,12 +93,10 @@ imports: subpackages: - unix - name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 subpackages: - transform - unicode/norm -- name: gopkg.in/go-playground/validator.v9 - version: 1304298bf10d085adec514b076772a79c9cadb6b - name: gopkg.in/yaml.v2 version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: diff --git a/glide.yaml b/glide.yaml index 22825a273..d8bdd5872 100644 --- a/glide.yaml +++ b/glide.yaml @@ -23,7 +23,6 @@ import: - package: golang.org/x/crypto subpackages: - ripemd160 -- package: gopkg.in/go-playground/validator.v9 testImport: - package: github.com/stretchr/testify version: ^1.1.4 From 2fd8f35b74e80382e276393b6edaa4464642a9df Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 21:12:14 -0800 Subject: [PATCH 330/515] Fix #112 by using RWMutex per element --- clist/clist.go | 222 +++++++++++++++++++++++++++++-------------------- 1 file changed, 130 insertions(+), 92 deletions(-) diff --git a/clist/clist.go b/clist/clist.go index 5295dd995..e8cf6b93c 100644 --- a/clist/clist.go +++ b/clist/clist.go @@ -11,36 +11,38 @@ to ensure garbage collection of removed elements. import ( "sync" - "sync/atomic" - "unsafe" ) // CElement is an element of a linked-list // Traversal from a CElement are goroutine-safe. type CElement struct { - prev unsafe.Pointer + mtx sync.RWMutex + prev *CElement prevWg *sync.WaitGroup - next unsafe.Pointer + next *CElement nextWg *sync.WaitGroup - removed uint32 - Value interface{} + removed bool + + Value interface{} // immutable } // Blocking implementation of Next(). // May return nil iff CElement was tail and got removed. func (e *CElement) NextWait() *CElement { for { - e.nextWg.Wait() - next := e.Next() - if next == nil { - if e.Removed() { - return nil - } else { - continue - } - } else { + e.mtx.RLock() + next := e.next + nextWg := e.nextWg + removed := e.removed + e.mtx.RUnlock() + + if next != nil || removed { return next } + + nextWg.Wait() + // e.next doesn't necessarily exist here. + // That's why we need to continue a for-loop. } } @@ -48,82 +50,113 @@ func (e *CElement) NextWait() *CElement { // May return nil iff CElement was head and got removed. func (e *CElement) PrevWait() *CElement { for { - e.prevWg.Wait() - prev := e.Prev() - if prev == nil { - if e.Removed() { - return nil - } else { - continue - } - } else { + e.mtx.RLock() + prev := e.prev + prevWg := e.prevWg + removed := e.removed + e.mtx.RUnlock() + + if prev != nil || removed { return prev } + + prevWg.Wait() } } // Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { - return (*CElement)(atomic.LoadPointer(&e.next)) + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.next } // Nonblocking, may return nil if at the end. func (e *CElement) Prev() *CElement { - return (*CElement)(atomic.LoadPointer(&e.prev)) + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prev } func (e *CElement) Removed() bool { - return atomic.LoadUint32(&(e.removed)) > 0 + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.removed } func (e *CElement) DetachNext() { if !e.Removed() { panic("DetachNext() must be called after Remove(e)") } - atomic.StorePointer(&e.next, nil) + e.mtx.Lock() + defer e.mtx.Unlock() + + e.next = nil } func (e *CElement) DetachPrev() { if !e.Removed() { panic("DetachPrev() must be called after Remove(e)") } - atomic.StorePointer(&e.prev, nil) + e.mtx.Lock() + defer e.mtx.Unlock() + + e.prev = nil } -func (e *CElement) setNextAtomic(next *CElement) { - for { - oldNext := atomic.LoadPointer(&e.next) - if !atomic.CompareAndSwapPointer(&(e.next), oldNext, unsafe.Pointer(next)) { - continue - } - if next == nil && oldNext != nil { // We for-loop in NextWait() so race is ok - e.nextWg.Add(1) - } - if next != nil && oldNext == nil { - e.nextWg.Done() - } - return +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on nextWg. +func (e *CElement) SetNext(newNext *CElement) { + e.mtx.Lock() + defer e.mtx.Unlock() + + oldNext := e.next + e.next = newNext + if oldNext != nil && newNext == nil { + // See https://golang.org/pkg/sync/: + // + // If a WaitGroup is reused to wait for several independent sets of + // events, new Add calls must happen after all previous Wait calls have + // returned. + e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + } + if oldNext == nil && newNext != nil { + e.nextWg.Done() } } -func (e *CElement) setPrevAtomic(prev *CElement) { - for { - oldPrev := atomic.LoadPointer(&e.prev) - if !atomic.CompareAndSwapPointer(&(e.prev), oldPrev, unsafe.Pointer(prev)) { - continue - } - if prev == nil && oldPrev != nil { // We for-loop in PrevWait() so race is ok - e.prevWg.Add(1) - } - if prev != nil && oldPrev == nil { - e.prevWg.Done() - } - return +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on prevWg +func (e *CElement) SetPrev(newPrev *CElement) { + e.mtx.Lock() + defer e.mtx.Unlock() + + oldPrev := e.prev + e.prev = newPrev + if oldPrev != nil && newPrev == nil { + e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + } + if oldPrev == nil && newPrev != nil { + e.prevWg.Done() } } -func (e *CElement) setRemovedAtomic() { - atomic.StoreUint32(&(e.removed), 1) +func (e *CElement) SetRemoved() { + e.mtx.Lock() + defer e.mtx.Unlock() + + e.removed = true + + // This wakes up anyone waiting in either direction. + if e.prev == nil { + e.prevWg.Done() + } + if e.next == nil { + e.nextWg.Done() + } } //-------------------------------------------------------------------------------- @@ -132,7 +165,7 @@ func (e *CElement) setRemovedAtomic() { // The zero value for CList is an empty list ready to use. // Operations are goroutine-safe. type CList struct { - mtx sync.Mutex + mtx sync.RWMutex wg *sync.WaitGroup head *CElement // first element tail *CElement // last element @@ -142,6 +175,7 @@ type CList struct { func (l *CList) Init() *CList { l.mtx.Lock() defer l.mtx.Unlock() + l.wg = waitGroup1() l.head = nil l.tail = nil @@ -152,48 +186,55 @@ func (l *CList) Init() *CList { func New() *CList { return new(CList).Init() } func (l *CList) Len() int { - l.mtx.Lock() - defer l.mtx.Unlock() + l.mtx.RLock() + defer l.mtx.RUnlock() + return l.len } func (l *CList) Front() *CElement { - l.mtx.Lock() - defer l.mtx.Unlock() + l.mtx.RLock() + defer l.mtx.RUnlock() + return l.head } func (l *CList) FrontWait() *CElement { for { - l.mtx.Lock() + l.mtx.RLock() head := l.head wg := l.wg - l.mtx.Unlock() - if head == nil { - wg.Wait() - } else { + l.mtx.RUnlock() + + if head != nil { return head } + wg.Wait() + // l.head doesn't necessarily exist here. + // That's why we need to continue a for-loop. } } func (l *CList) Back() *CElement { - l.mtx.Lock() - defer l.mtx.Unlock() + l.mtx.RLock() + defer l.mtx.RUnlock() + return l.tail } func (l *CList) BackWait() *CElement { for { - l.mtx.Lock() + l.mtx.RLock() tail := l.tail wg := l.wg - l.mtx.Unlock() - if tail == nil { - wg.Wait() - } else { + l.mtx.RUnlock() + + if tail != nil { return tail } + wg.Wait() + // l.tail doesn't necessarily exist here. + // That's why we need to continue a for-loop. } } @@ -203,11 +244,12 @@ func (l *CList) PushBack(v interface{}) *CElement { // Construct a new element e := &CElement{ - prev: nil, - prevWg: waitGroup1(), - next: nil, - nextWg: waitGroup1(), - Value: v, + prev: nil, + prevWg: waitGroup1(), + next: nil, + nextWg: waitGroup1(), + removed: false, + Value: v, } // Release waiters on FrontWait/BackWait maybe @@ -221,9 +263,9 @@ func (l *CList) PushBack(v interface{}) *CElement { l.head = e l.tail = e } else { - l.tail.setNextAtomic(e) - e.setPrevAtomic(l.tail) - l.tail = e + e.SetPrev(l.tail) // We must init e first. + l.tail.SetNext(e) // This will make e accessible. + l.tail = e // Update the list. } return e @@ -250,30 +292,26 @@ func (l *CList) Remove(e *CElement) interface{} { // If we're removing the only item, make CList FrontWait/BackWait wait. if l.len == 1 { - l.wg.Add(1) + l.wg = waitGroup1() // WaitGroups are difficult to re-use. } + + // Update l.len l.len -= 1 // Connect next/prev and set head/tail if prev == nil { l.head = next } else { - prev.setNextAtomic(next) + prev.SetNext(next) } if next == nil { l.tail = prev } else { - next.setPrevAtomic(prev) + next.SetPrev(prev) } // Set .Done() on e, otherwise waiters will wait forever. - e.setRemovedAtomic() - if prev == nil { - e.prevWg.Done() - } - if next == nil { - e.nextWg.Done() - } + e.SetRemoved() return e.Value } From 0f8ebd024db7f32ca0d94e7f3d13049ffcb70c09 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 22:28:15 -0800 Subject: [PATCH 331/515] Update clist.go Add more justification of synchrony primitives in documentation. --- clist/clist.go | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/clist/clist.go b/clist/clist.go index e8cf6b93c..02e31a509 100644 --- a/clist/clist.go +++ b/clist/clist.go @@ -1,20 +1,40 @@ package clist /* + The purpose of CList is to provide a goroutine-safe linked-list. This list can be traversed concurrently by any number of goroutines. However, removed CElements cannot be added back. NOTE: Not all methods of container/list are (yet) implemented. NOTE: Removed elements need to DetachPrev or DetachNext consistently to ensure garbage collection of removed elements. + */ import ( "sync" ) -// CElement is an element of a linked-list -// Traversal from a CElement are goroutine-safe. +/* + +CElement is an element of a linked-list +Traversal from a CElement are goroutine-safe. + +We can't avoid using WaitGroups or for-loops given the documentation +spec without re-implementing the primitives that already exist in +golang/sync. Notice that WaitGroup allows many go-routines to be +simultaneously released, which is what we want. Mutex doesn't do +this. RWMutex does this, but it's clumsy to use in the way that a +WaitGroup would be used -- and we'd end up having two RWMutex's for +prev/next each, which is doubly confusing. + +sync.Cond would be sort-of useful, but we don't need a write-lock in +the for-loop. Use sync.Cond when you need serial access to the +"condition". In our case our condition is if `next != nil || removed`, +and there's no reason to serialize that condition for goroutines +waiting on NextWait() (since it's just a read operation). + +*/ type CElement struct { mtx sync.RWMutex prev *CElement From 6ec8c1602f22f41fc320da05c3a80acebf2c23bd Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 22:41:40 -0800 Subject: [PATCH 332/515] Update Makefile --- Makefile | 81 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 61 insertions(+), 20 deletions(-) diff --git a/Makefile b/Makefile index a24306f32..29a3ac7db 100644 --- a/Makefile +++ b/Makefile @@ -2,38 +2,69 @@ GOTOOLS = \ github.com/Masterminds/glide \ - github.com/alecthomas/gometalinter + github.com/alecthomas/gometalinter.v2 +GOTOOLS_CHECK = glide gometalinter.v2 -REPO:=github.com/tendermint/tmlibs +all: check get_vendor_deps build test install metalinter -all: test +check: check_tools -NOVENDOR = go list github.com/tendermint/tmlibs/... | grep -v /vendor/ -test: - go test -tags gcc `glide novendor` +######################################## +### Build + +build: + # Nothing to build! + +install: + # Nothing to install! + + +######################################## +### Tools & dependencies + +check_tools: + @# https://stackoverflow.com/a/25668869 + @echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\ + $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" + +get_tools: + @echo "--> Installing tools" + go get -u -v $(GOTOOLS) + @gometalinter.v2 --install + +update_tools: + @echo "--> Updating tools" + @go get -u $(GOTOOLS) -get_vendor_deps: ensure_tools +get_vendor_deps: @rm -rf vendor/ @echo "--> Running glide install" @glide install -ensure_tools: - go get $(GOTOOLS) -metalinter: ensure_tools - @gometalinter --install - gometalinter --vendor --deadline=600s --enable-all --disable=lll ./... +######################################## +### Testing -metalinter_test: ensure_tools - @gometalinter --install - gometalinter --vendor --deadline=600s --disable-all \ +test: + go test -tags gcc `glide novendor` + + +######################################## +### Formatting, linting, and vetting + +fmt: + @go fmt ./... + +metalinter: + @echo "==> Running linter" + gometalinter.v2 --vendor --deadline=600s --disable-all \ + --enable=maligned \ --enable=deadcode \ - --enable=gas \ --enable=goconst \ + --enable=goimports \ --enable=gosimple \ --enable=ineffassign \ - --enable=interfacer \ --enable=megacheck \ --enable=misspell \ --enable=staticcheck \ @@ -43,13 +74,23 @@ metalinter_test: ensure_tools --enable=unused \ --enable=varcheck \ --enable=vetshadow \ - --enable=vet \ ./... - #--enable=aligncheck \ + #--enable=gas \ #--enable=dupl \ #--enable=errcheck \ #--enable=gocyclo \ - #--enable=goimports \ #--enable=golint \ <== comments on anything exported #--enable=gotype \ + #--enable=interfacer \ #--enable=unparam \ + #--enable=vet \ + +metalinter_all: + protoc $(INCLUDE) --lint_out=. types/*.proto + gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... + + +# To avoid unintended conflicts with file names, always add to .PHONY +# unless there is a reason not to. +# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html +.PHONY: check build check_tools get_tools update_tools get_vendor_deps test fmt metalinter metalinter_all From bf644b098496cd766e7ab540898b1d3e25d11e77 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Dec 2017 00:36:58 -0800 Subject: [PATCH 333/515] Do not shadow assert --- common/bytes_test.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/common/bytes_test.go b/common/bytes_test.go index 0c0eacc33..2ad0e692c 100644 --- a/common/bytes_test.go +++ b/common/bytes_test.go @@ -10,23 +10,20 @@ import ( // This is a trivial test for protobuf compatibility. func TestMarshal(t *testing.T) { - assert := assert.New(t) - b := []byte("hello world") dataB := Bytes(b) b2, err := dataB.Marshal() - assert.Nil(err) - assert.Equal(b, b2) + assert.Nil(t, err) + assert.Equal(t, b, b2) var dataB2 Bytes err = (&dataB2).Unmarshal(b) - assert.Nil(err) - assert.Equal(dataB, dataB2) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) } // Test that the hex encoding works. func TestJSONMarshal(t *testing.T) { - assert := assert.New(t) type TestStruct struct { B1 []byte @@ -51,7 +48,7 @@ func TestJSONMarshal(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(string(jsonBytes), tc.expected) + assert.Equal(t, string(jsonBytes), tc.expected) // TODO do fuzz testing to ensure that unmarshal fails @@ -61,8 +58,8 @@ func TestJSONMarshal(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(ts2.B1, tc.input) - assert.Equal(ts2.B2, Bytes(tc.input)) + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, Bytes(tc.input)) }) } } From b25df389db3c98f4b964bd39511c199f02d07715 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Dec 2017 04:40:35 -0800 Subject: [PATCH 334/515] Remove Bytes, just use []byte; Use protobuf for KVPair/KI64Pair --- Makefile | 32 +++++++++++--- common/bytes.go | 53 ----------------------- common/bytes_test.go | 65 ---------------------------- common/kvpair.go | 13 ++++-- common/types.pb.go | 101 +++++++++++++++++++++++++++++++++++++++++++ common/types.proto | 24 ++++++++++ 6 files changed, 161 insertions(+), 127 deletions(-) delete mode 100644 common/bytes.go delete mode 100644 common/bytes_test.go create mode 100644 common/types.pb.go create mode 100644 common/types.proto diff --git a/Makefile b/Makefile index 29a3ac7db..af60f7314 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,12 @@ -.PHONY: all test get_vendor_deps ensure_tools - GOTOOLS = \ github.com/Masterminds/glide \ - github.com/alecthomas/gometalinter.v2 -GOTOOLS_CHECK = glide gometalinter.v2 + github.com/alecthomas/gometalinter.v2 \ + github.com/gogo/protobuf/protoc-gen-gogo \ + github.com/gogo/protobuf/gogoproto +GOTOOLS_CHECK = glide gometalinter.v2 protoc protoc-gen-gogo +INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf -all: check get_vendor_deps build test install metalinter +all: check get_vendor_deps protoc build test install metalinter check: check_tools @@ -13,6 +14,15 @@ check: check_tools ######################################## ### Build +protoc: + ## If you get the following error, + ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" + ## See https://stackoverflow.com/a/25518702 + protoc $(INCLUDE) --gogo_out=plugins=grpc:. common/*.proto + @echo "--> adding nolint declarations to protobuf generated files" + @awk '/package common/ { print "//nolint: gas"; print; next }1' common/types.pb.go > common/types.pb.go.new + @mv common/types.pb.go.new common/types.pb.go + build: # Nothing to build! @@ -33,6 +43,16 @@ get_tools: go get -u -v $(GOTOOLS) @gometalinter.v2 --install +get_protoc: + @# https://github.com/google/protobuf/releases + curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ + cd protobuf-3.4.1 && \ + DIST_LANG=cpp ./configure && \ + make && \ + make install && \ + cd .. && \ + rm -rf protobuf-3.4.1 + update_tools: @echo "--> Updating tools" @go get -u $(GOTOOLS) @@ -93,4 +113,4 @@ metalinter_all: # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build check_tools get_tools update_tools get_vendor_deps test fmt metalinter metalinter_all +.PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all diff --git a/common/bytes.go b/common/bytes.go deleted file mode 100644 index d9ede98df..000000000 --- a/common/bytes.go +++ /dev/null @@ -1,53 +0,0 @@ -package common - -import ( - "encoding/hex" - "fmt" - "strings" -) - -// The main purpose of Bytes is to enable HEX-encoding for json/encoding. -type Bytes []byte - -// Marshal needed for protobuf compatibility -func (b Bytes) Marshal() ([]byte, error) { - return b, nil -} - -// Unmarshal needed for protobuf compatibility -func (b *Bytes) Unmarshal(data []byte) error { - *b = data - return nil -} - -// This is the point of Bytes. -func (b Bytes) MarshalJSON() ([]byte, error) { - s := strings.ToUpper(hex.EncodeToString(b)) - jb := make([]byte, len(s)+2) - jb[0] = '"' - copy(jb[1:], []byte(s)) - jb[1] = '"' - return jb, nil -} - -// This is the point of Bytes. -func (b *Bytes) UnmarshalJSON(data []byte) error { - if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { - return fmt.Errorf("Invalid hex string: %s", data) - } - bytes, err := hex.DecodeString(string(data[1 : len(data)-1])) - if err != nil { - return err - } - *b = bytes - return nil -} - -// Allow it to fulfill various interfaces in light-client, etc... -func (b Bytes) Bytes() []byte { - return b -} - -func (b Bytes) String() string { - return strings.ToUpper(hex.EncodeToString(b)) -} diff --git a/common/bytes_test.go b/common/bytes_test.go deleted file mode 100644 index 2ad0e692c..000000000 --- a/common/bytes_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package common - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -// This is a trivial test for protobuf compatibility. -func TestMarshal(t *testing.T) { - b := []byte("hello world") - dataB := Bytes(b) - b2, err := dataB.Marshal() - assert.Nil(t, err) - assert.Equal(t, b, b2) - - var dataB2 Bytes - err = (&dataB2).Unmarshal(b) - assert.Nil(t, err) - assert.Equal(t, dataB, dataB2) -} - -// Test that the hex encoding works. -func TestJSONMarshal(t *testing.T) { - - type TestStruct struct { - B1 []byte - B2 Bytes - } - - cases := []struct { - input []byte - expected string - }{ - {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { - ts := TestStruct{B1: tc.input, B2: tc.input} - - // Test that it marshals correctly to JSON. - jsonBytes, err := json.Marshal(ts) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, string(jsonBytes), tc.expected) - - // TODO do fuzz testing to ensure that unmarshal fails - - // Test that unmarshaling works correctly. - ts2 := TestStruct{} - err = json.Unmarshal(jsonBytes, &ts2) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, ts2.B1, tc.input) - assert.Equal(t, ts2.B2, Bytes(tc.input)) - }) - } -} diff --git a/common/kvpair.go b/common/kvpair.go index 5faa534df..54c3a58c0 100644 --- a/common/kvpair.go +++ b/common/kvpair.go @@ -8,10 +8,14 @@ import ( //---------------------------------------- // KVPair +/* +Defined in types.proto + type KVPair struct { - Key Bytes - Value Bytes + Key []byte + Value []byte } +*/ type KVPairs []KVPair @@ -35,10 +39,13 @@ func (kvs KVPairs) Sort() { sort.Sort(kvs) } //---------------------------------------- // KI64Pair +/* +Defined in types.proto type KI64Pair struct { - Key Bytes + Key []byte Value int64 } +*/ type KI64Pairs []KI64Pair diff --git a/common/types.pb.go b/common/types.pb.go new file mode 100644 index 000000000..047b7aee2 --- /dev/null +++ b/common/types.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: common/types.proto + +/* +Package common is a generated protocol buffer package. + +It is generated from these files: + common/types.proto + +It has these top-level messages: + KVPair + KI64Pair +*/ +//nolint: gas +package common + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Define these here for compatibility but use tmlibs/common.KVPair. +type KVPair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +func (m *KVPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KVPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +type KI64Pair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KI64Pair) Reset() { *m = KI64Pair{} } +func (m *KI64Pair) String() string { return proto.CompactTextString(m) } +func (*KI64Pair) ProtoMessage() {} +func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +func (m *KI64Pair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KI64Pair) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterType((*KVPair)(nil), "common.KVPair") + proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") +} + +func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd, + 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, + 0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, + 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68, + 0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e, + 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, + 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99, + 0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00, +} diff --git a/common/types.proto b/common/types.proto new file mode 100644 index 000000000..94abcccc3 --- /dev/null +++ b/common/types.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package common; + +// For more information on gogo.proto, see: +// https://github.com/gogo/protobuf/blob/master/extensions.md +// NOTE: Try really hard not to use custom types, +// it's often complicated, broken, nor not worth it. +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + +//---------------------------------------- +// Abstract types + +// Define these here for compatibility but use tmlibs/common.KVPair. +message KVPair { + bytes key = 1; + bytes value = 2; +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +message KI64Pair { + bytes key = 1; + int64 value = 2; +} From 93c05aa8c06ef38f2b15fcdd1d91eafefda2732d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Dec 2017 13:52:32 -0800 Subject: [PATCH 335/515] Add back on HexBytes --- common/bytes.go | 53 ++++++++++++++++++++++++++++++++++++ common/bytes_test.go | 65 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 common/bytes.go create mode 100644 common/bytes_test.go diff --git a/common/bytes.go b/common/bytes.go new file mode 100644 index 000000000..1ec880c25 --- /dev/null +++ b/common/bytes.go @@ -0,0 +1,53 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of HexBytes is to enable HEX-encoding for json/encoding. +type HexBytes []byte + +// Marshal needed for protobuf compatibility +func (bz HexBytes) Marshal() ([]byte, error) { + return bz, nil +} + +// Unmarshal needed for protobuf compatibility +func (bz *HexBytes) Unmarshal(data []byte) error { + *bz = data + return nil +} + +// This is the point of Bytes. +func (bz HexBytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(bz)) + jbz := make([]byte, len(s)+2) + jbz[0] = '"' + copy(jbz[1:], []byte(s)) + jbz[1] = '"' + return jbz, nil +} + +// This is the point of Bytes. +func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("Invalid hex string: %s", data) + } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *bz = bz2 + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (bz HexBytes) Bytes() []byte { + return bz +} + +func (bz HexBytes) String() string { + return strings.ToUpper(hex.EncodeToString(bz)) +} diff --git a/common/bytes_test.go b/common/bytes_test.go new file mode 100644 index 000000000..3e693b239 --- /dev/null +++ b/common/bytes_test.go @@ -0,0 +1,65 @@ +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + bz := []byte("hello world") + dataB := HexBytes(bz) + bz2, err := dataB.Marshal() + assert.Nil(t, err) + assert.Equal(t, bz, bz2) + + var dataB2 HexBytes + err = (&dataB2).Unmarshal(bz) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + + type TestStruct struct { + B1 []byte + B2 HexBytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, HexBytes(tc.input)) + }) + } +} From e47ce81422e436459dabf803676d3a3d6924699b Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 28 Dec 2017 03:02:23 -0800 Subject: [PATCH 336/515] Comment fixes from Emmanuel --- clist/clist.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clist/clist.go b/clist/clist.go index 02e31a509..a52920f8c 100644 --- a/clist/clist.go +++ b/clist/clist.go @@ -18,7 +18,7 @@ import ( /* CElement is an element of a linked-list -Traversal from a CElement are goroutine-safe. +Traversal from a CElement is goroutine-safe. We can't avoid using WaitGroups or for-loops given the documentation spec without re-implementing the primitives that already exist in @@ -220,6 +220,7 @@ func (l *CList) Front() *CElement { } func (l *CList) FrontWait() *CElement { + // Loop until the head is non-nil else wait and try again for { l.mtx.RLock() head := l.head @@ -230,8 +231,7 @@ func (l *CList) FrontWait() *CElement { return head } wg.Wait() - // l.head doesn't necessarily exist here. - // That's why we need to continue a for-loop. + // NOTE: If you think l.head exists here, think harder. } } From b31397aff5f43216ba831338f7dcdee8a53cf433 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 28 Dec 2017 18:30:56 -0800 Subject: [PATCH 337/515] Fix GoLevelDB Iterator which needs to copy a temp []byte --- db/c_level_db.go | 15 +++++++++- db/db.go | 2 +- db/go_level_db.go | 49 ++++++++++++++++++++++++-------- db/mem_db.go | 71 ++++++++++++++++++++++++++++++++--------------- db/types.go | 20 +++++++++---- glide.lock | 24 ++++++++++------ 6 files changed, 129 insertions(+), 52 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 7910628bf..f1a5a3aef 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -50,6 +50,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { return database, nil } +// Implements DB. func (db *CLevelDB) Get(key []byte) []byte { key = nonNilBytes(key) res, err := db.db.Get(db.ro, key) @@ -59,10 +60,12 @@ func (db *CLevelDB) Get(key []byte) []byte { return res } +// Implements DB. func (db *CLevelDB) Has(key []byte) bool { return db.Get(key) != nil } +// Implements DB. func (db *CLevelDB) Set(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -72,6 +75,7 @@ func (db *CLevelDB) Set(key []byte, value []byte) { } } +// Implements DB. func (db *CLevelDB) SetSync(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -81,6 +85,7 @@ func (db *CLevelDB) SetSync(key []byte, value []byte) { } } +// Implements DB. func (db *CLevelDB) Delete(key []byte) { key = nonNilBytes(key) err := db.db.Delete(db.wo, key) @@ -89,6 +94,7 @@ func (db *CLevelDB) Delete(key []byte) { } } +// Implements DB. func (db *CLevelDB) DeleteSync(key []byte) { key = nonNilBytes(key) err := db.db.Delete(db.woSync, key) @@ -101,6 +107,7 @@ func (db *CLevelDB) DB() *levigo.DB { return db.db } +// Implements DB. func (db *CLevelDB) Close() { db.db.Close() db.ro.Close() @@ -108,6 +115,7 @@ func (db *CLevelDB) Close() { db.woSync.Close() } +// Implements DB. func (db *CLevelDB) Print() { itr := db.Iterator(nil, nil) defer itr.Close() @@ -118,6 +126,7 @@ func (db *CLevelDB) Print() { } } +// Implements DB. func (db *CLevelDB) Stats() map[string]string { // TODO: Find the available properties for the C LevelDB implementation keys := []string{} @@ -133,6 +142,7 @@ func (db *CLevelDB) Stats() map[string]string { //---------------------------------------- // Batch +// Implements DB. func (db *CLevelDB) NewBatch() Batch { batch := levigo.NewWriteBatch() return &cLevelDBBatch{db, batch} @@ -143,14 +153,17 @@ type cLevelDBBatch struct { batch *levigo.WriteBatch } +// Implements Batch. func (mBatch *cLevelDBBatch) Set(key, value []byte) { mBatch.batch.Put(key, value) } +// Implements Batch. func (mBatch *cLevelDBBatch) Delete(key []byte) { mBatch.batch.Delete(key) } +// Implements Batch. func (mBatch *cLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) if err != nil { @@ -204,7 +217,7 @@ func (itr cLevelDBIterator) Domain() ([]byte, []byte) { } func (itr cLevelDBIterator) Valid() bool { - + // Once invalid, forever invalid. if itr.isInvalid { return false diff --git a/db/db.go b/db/db.go index b43b06554..25ff93ec5 100644 --- a/db/db.go +++ b/db/db.go @@ -2,7 +2,7 @@ package db import "fmt" -//----------------------------------------------------------------------------- +//---------------------------------------- // Main entry const ( diff --git a/db/go_level_db.go b/db/go_level_db.go index bf2b3bf76..7d60e060f 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -39,6 +39,7 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { return database, nil } +// Implements DB. func (db *GoLevelDB) Get(key []byte) []byte { key = nonNilBytes(key) res, err := db.db.Get(key, nil) @@ -52,10 +53,12 @@ func (db *GoLevelDB) Get(key []byte) []byte { return res } +// Implements DB. func (db *GoLevelDB) Has(key []byte) bool { return db.Get(key) != nil } +// Implements DB. func (db *GoLevelDB) Set(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -65,6 +68,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { } } +// Implements DB. func (db *GoLevelDB) SetSync(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -74,6 +78,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { } } +// Implements DB. func (db *GoLevelDB) Delete(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, nil) @@ -82,6 +87,7 @@ func (db *GoLevelDB) Delete(key []byte) { } } +// Implements DB. func (db *GoLevelDB) DeleteSync(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) @@ -94,10 +100,12 @@ func (db *GoLevelDB) DB() *leveldb.DB { return db.db } +// Implements DB. func (db *GoLevelDB) Close() { db.db.Close() } +// Implements DB. func (db *GoLevelDB) Print() { str, _ := db.db.GetProperty("leveldb.stats") fmt.Printf("%v\n", str) @@ -110,6 +118,7 @@ func (db *GoLevelDB) Print() { } } +// Implements DB. func (db *GoLevelDB) Stats() map[string]string { keys := []string{ "leveldb.num-files-at-level{n}", @@ -135,6 +144,7 @@ func (db *GoLevelDB) Stats() map[string]string { //---------------------------------------- // Batch +// Implements DB. func (db *GoLevelDB) NewBatch() Batch { batch := new(leveldb.Batch) return &goLevelDBBatch{db, batch} @@ -145,18 +155,21 @@ type goLevelDBBatch struct { batch *leveldb.Batch } +// Implements Batch. func (mBatch *goLevelDBBatch) Set(key, value []byte) { mBatch.batch.Put(key, value) } +// Implements Batch. func (mBatch *goLevelDBBatch) Delete(key []byte) { mBatch.batch.Delete(key) } +// Implements Batch. func (mBatch *goLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.batch, nil) if err != nil { - PanicCrisis(err) + panic(err) } } @@ -165,6 +178,17 @@ func (mBatch *goLevelDBBatch) Write() { // NOTE This is almost identical to db/c_level_db.Iterator // Before creating a third version, refactor. +// Implements DB. +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, false) +} + +// Implements DB. +func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { + panic("not implemented yet") // XXX +} + type goLevelDBIterator struct { source iterator.Iterator start []byte @@ -189,19 +213,12 @@ func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse } } -func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itr := db.db.NewIterator(nil, nil) - return newGoLevelDBIterator(itr, start, end, false) -} - -func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { - panic("not implemented yet") // XXX -} - +// Implements Iterator. func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { return itr.start, itr.end } +// Implements Iterator. func (itr *goLevelDBIterator) Valid() bool { // Once invalid, forever invalid. @@ -230,24 +247,32 @@ func (itr *goLevelDBIterator) Valid() bool { return true } +// Implements Iterator. func (itr *goLevelDBIterator) Key() []byte { + // Key returns a copy of the current key. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 itr.assertNoError() itr.assertIsValid() - return itr.source.Key() + return cp(itr.source.Key()) } +// Implements Iterator. func (itr *goLevelDBIterator) Value() []byte { + // Value returns a copy of the current value. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 itr.assertNoError() itr.assertIsValid() - return itr.source.Value() + return cp(itr.source.Value()) } +// Implements Iterator. func (itr *goLevelDBIterator) Next() { itr.assertNoError() itr.assertIsValid() itr.source.Next() } +// Implements Iterator. func (itr *goLevelDBIterator) Close() { itr.source.Release() } diff --git a/db/mem_db.go b/db/mem_db.go index e2470d7f2..1e3bee5a5 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -26,14 +26,16 @@ func NewMemDB() *MemDB { return database } +// Implements DB. func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() key = nonNilBytes(key) - + return db.db[string(key)] } +// Implements DB. func (db *MemDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() @@ -43,6 +45,7 @@ func (db *MemDB) Has(key []byte) bool { return ok } +// Implements DB. func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -50,6 +53,7 @@ func (db *MemDB) Set(key []byte, value []byte) { db.SetNoLock(key, value) } +// Implements DB. func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -57,7 +61,7 @@ func (db *MemDB) SetSync(key []byte, value []byte) { db.SetNoLock(key, value) } -// NOTE: Implements atomicSetDeleter +// Implements atomicSetDeleter. func (db *MemDB) SetNoLock(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -65,6 +69,7 @@ func (db *MemDB) SetNoLock(key []byte, value []byte) { db.db[string(key)] = value } +// Implements DB. func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -72,6 +77,7 @@ func (db *MemDB) Delete(key []byte) { db.DeleteNoLock(key) } +// Implements DB. func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -79,13 +85,14 @@ func (db *MemDB) DeleteSync(key []byte) { db.DeleteNoLock(key) } -// NOTE: Implements atomicSetDeleter +// Implements atomicSetDeleter. func (db *MemDB) DeleteNoLock(key []byte) { key = nonNilBytes(key) delete(db.db, string(key)) } +// Implements DB. func (db *MemDB) Close() { // Close is a noop since for an in-memory // database, we don't have a destination @@ -94,6 +101,7 @@ func (db *MemDB) Close() { // See the discussion in https://github.com/tendermint/tmlibs/pull/56 } +// Implements DB. func (db *MemDB) Print() { db.mtx.Lock() defer db.mtx.Unlock() @@ -103,6 +111,7 @@ func (db *MemDB) Print() { } } +// Implements DB. func (db *MemDB) Stats() map[string]string { db.mtx.Lock() defer db.mtx.Unlock() @@ -113,6 +122,10 @@ func (db *MemDB) Stats() map[string]string { return stats } +//---------------------------------------- +// Batch + +// Implements DB. func (db *MemDB) NewBatch() Batch { db.mtx.Lock() defer db.mtx.Unlock() @@ -125,7 +138,9 @@ func (db *MemDB) Mutex() *sync.Mutex { } //---------------------------------------- +// Iterator +// Implements DB. func (db *MemDB) Iterator(start, end []byte) Iterator { db.mtx.Lock() defer db.mtx.Unlock() @@ -134,6 +149,7 @@ func (db *MemDB) Iterator(start, end []byte) Iterator { return newMemDBIterator(db, keys, start, end) } +// Implements DB. func (db *MemDB) ReverseIterator(start, end []byte) Iterator { db.mtx.Lock() defer db.mtx.Unlock() @@ -142,25 +158,6 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { return newMemDBIterator(db, keys, start, end) } -func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { - keys := []string{} - for key, _ := range db.db { - if IsKeyInDomain([]byte(key), start, end, false) { - keys = append(keys, key) - } - } - sort.Strings(keys) - if reverse { - nkeys := len(keys) - for i := 0; i < nkeys/2; i++ { - keys[i] = keys[nkeys-i-1] - } - } - return keys -} - -var _ Iterator = (*memDBIterator)(nil) - // We need a copy of all of the keys. // Not the best, but probably not a bottleneck depending. type memDBIterator struct { @@ -171,6 +168,8 @@ type memDBIterator struct { end []byte } +var _ Iterator = (*memDBIterator)(nil) + // Keys is expected to be in reverse order for reverse iterators. func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { return &memDBIterator{ @@ -182,30 +181,36 @@ func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { } } +// Implements Iterator. func (itr *memDBIterator) Domain() ([]byte, []byte) { return itr.start, itr.end } +// Implements Iterator. func (itr *memDBIterator) Valid() bool { return 0 <= itr.cur && itr.cur < len(itr.keys) } +// Implements Iterator. func (itr *memDBIterator) Next() { itr.assertIsValid() itr.cur++ } +// Implements Iterator. func (itr *memDBIterator) Key() []byte { itr.assertIsValid() return []byte(itr.keys[itr.cur]) } +// Implements Iterator. func (itr *memDBIterator) Value() []byte { itr.assertIsValid() key := []byte(itr.keys[itr.cur]) return itr.db.Get(key) } +// Implements Iterator. func (itr *memDBIterator) Close() { itr.keys = nil itr.db = nil @@ -215,4 +220,24 @@ func (itr *memDBIterator) assertIsValid() { if !itr.Valid() { panic("memDBIterator is invalid") } -} \ No newline at end of file +} + +//---------------------------------------- +// Misc. + +func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { + keys := []string{} + for key, _ := range db.db { + if IsKeyInDomain([]byte(key), start, end, false) { + keys = append(keys, key) + } + } + sort.Strings(keys) + if reverse { + nkeys := len(keys) + for i := 0; i < nkeys/2; i++ { + keys[i] = keys[nkeys-i-1] + } + } + return keys +} diff --git a/db/types.go b/db/types.go index 6e5d2408d..07858087a 100644 --- a/db/types.go +++ b/db/types.go @@ -4,19 +4,23 @@ type DB interface { // Get returns nil iff key doesn't exist. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte Get([]byte) []byte // Has checks if a key exists. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte Has(key []byte) bool // Set sets the key. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte Set([]byte, []byte) SetSync([]byte, []byte) // Delete deletes the key. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key readonly []byte Delete([]byte) DeleteSync([]byte) @@ -25,6 +29,7 @@ type DB interface { // A nil start is interpreted as an empty byteslice. // If end is nil, iterates up to the last item (inclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte Iterator(start, end []byte) Iterator // Iterate over a domain of keys in descending order. End is exclusive. @@ -32,6 +37,7 @@ type DB interface { // If start is nil, iterates from the last/greatest item (inclusive). // If end is nil, iterates up to the first/least item (iclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte ReverseIterator(start, end []byte) Iterator // Closes the connection. @@ -56,11 +62,12 @@ type Batch interface { } type SetDeleter interface { - Set(key, value []byte) - Delete(key []byte) + Set(key, value []byte) // CONTRACT: key, value readonly []byte + Delete(key []byte) // CONTRACT: key readonly []byte } //---------------------------------------- +// Iterator /* Usage: @@ -83,6 +90,7 @@ type Iterator interface { // // The smallest key is the empty byte array []byte{} - see BeginningKey(). // The largest key is the nil byte array []byte(nil) - see EndingKey(). + // CONTRACT: start, end readonly []byte Domain() (start []byte, end []byte) // Valid returns whether the current position is valid. @@ -96,14 +104,14 @@ type Iterator interface { Next() // Key returns the key of the cursor. - // // If Valid returns false, this method will panic. - Key() []byte + // CONTRACT: key readonly []byte + Key() (key []byte) // Value returns the value of the cursor. - // // If Valid returns false, this method will panic. - Value() []byte + // CONTRACT: value readonly []byte + Value() (value []byte) // Close releases the Iterator. Close() diff --git a/glide.lock b/glide.lock index 83c8551e0..146a32a0f 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 -updated: 2017-12-25T17:45:52.357002873-08:00 +updated: 2017-12-28T18:27:21.247160207-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e3b2152e0063c5f05efea89ecbe297852af2a92d + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -12,7 +12,13 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf +- name: github.com/gogo/protobuf + version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 + subpackages: + - gogoproto + - proto + - protoc-gen-gogo/descriptor - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl @@ -45,7 +51,7 @@ imports: - name: github.com/pelletier/go-toml version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors - version: f15c970de5b76fac0b59abb32d62c17cc7bed265 + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: @@ -57,11 +63,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: adf24ef3f94bd13ec4163060b21a5678f22b429b + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -76,7 +82,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 5ab49b4c6ad674da6b81442911cf713ef0afb544 + version: 27be46e25124ddf775e23317a83647ce62a93f6b subpackages: - data - data/base58 @@ -85,7 +91,7 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys @@ -93,7 +99,7 @@ imports: subpackages: - unix - name: golang.org/x/text - version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm From 6b5d08f7daf180036d338d7d7d729861bb58eae5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 23 Dec 2017 04:18:50 -0800 Subject: [PATCH 338/515] RepeatTimer fix --- common/repeat_timer.go | 228 ++++++++++++++++++++++++------------ common/repeat_timer_test.go | 86 ++++++++------ 2 files changed, 203 insertions(+), 111 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 0bc4d87b4..2947a9166 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -5,152 +5,224 @@ import ( "time" ) +// Used by RepeatTimer the first time, +// and every time it's Reset() after Stop(). +type TickerMaker func(dur time.Duration) Ticker + // Ticker is a basic ticker interface. type Ticker interface { + + // Never changes, never closes. Chan() <-chan time.Time + + // Stopping a stopped Ticker will panic. Stop() - Reset() } -// DefaultTicker wraps the stdlibs Ticker implementation. -type DefaultTicker struct { - t *time.Ticker - dur time.Duration -} +//---------------------------------------- +// defaultTickerMaker -// NewDefaultTicker returns a new DefaultTicker -func NewDefaultTicker(dur time.Duration) *DefaultTicker { - return &DefaultTicker{ - time.NewTicker(dur), - dur, - } +func defaultTickerMaker(dur time.Duration) Ticker { + ticker := time.NewTicker(dur) + return (*defaultTicker)(ticker) } +type defaultTicker time.Ticker + // Implements Ticker -func (t *DefaultTicker) Chan() <-chan time.Time { - return t.t.C +func (t *defaultTicker) Chan() <-chan time.Time { + return t.C } // Implements Ticker -func (t *DefaultTicker) Stop() { - t.t.Stop() - t.t = nil +func (t *defaultTicker) Stop() { + t.Stop() } -// Implements Ticker -func (t *DefaultTicker) Reset() { - t.t = time.NewTicker(t.dur) +//---------------------------------------- +// LogicalTickerMaker + +// Construct a TickerMaker that always uses `ch`. +// It's useful for simulating a deterministic clock. +func NewLogicalTickerMaker(ch chan time.Time) TickerMaker { + return func(dur time.Duration) Ticker { + return newLogicalTicker(ch, dur) + } } -// ManualTicker wraps a channel that can be manually sent on -type ManualTicker struct { - ch chan time.Time +type logicalTicker struct { + source <-chan time.Time + ch chan time.Time + quit chan struct{} } -// NewManualTicker returns a new ManualTicker -func NewManualTicker(ch chan time.Time) *ManualTicker { - return &ManualTicker{ - ch: ch, +func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker { + lt := &logicalTicker{ + source: source, + ch: make(chan time.Time), + quit: make(chan struct{}), } + go lt.fireRoutine(interval) + return lt } -// Implements Ticker -func (t *ManualTicker) Chan() <-chan time.Time { - return t.ch +// We clearly need a new goroutine, for logicalTicker may have been created +// from a goroutine separate from the source. +func (t *logicalTicker) fireRoutine(interval time.Duration) { + source := t.source + + // Init `lasttime` + lasttime := time.Time{} + select { + case lasttime = <-source: + case <-t.quit: + return + } + // Init `lasttime` end + + timeleft := interval + for { + select { + case newtime := <-source: + elapsed := newtime.Sub(lasttime) + timeleft -= elapsed + if timeleft <= 0 { + // Block for determinism until the ticker is stopped. + select { + case t.ch <- newtime: + case <-t.quit: + return + } + // Reset timeleft. + // Don't try to "catch up" by sending more. + // "Ticker adjusts the intervals or drops ticks to make up for + // slow receivers" - https://golang.org/pkg/time/#Ticker + timeleft = interval + } + case <-t.quit: + return // done + } + } } // Implements Ticker -func (t *ManualTicker) Stop() { - // noop +func (t *logicalTicker) Chan() <-chan time.Time { + return t.ch // immutable } // Implements Ticker -func (t *ManualTicker) Reset() { - // noop +func (t *logicalTicker) Stop() { + close(t.quit) // it *should* panic when stopped twice. } //--------------------------------------------------------------------- /* -RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period. -It's good for keeping connections alive. -A RepeatTimer must be Stop()'d or it will keep a goroutine alive. + RepeatTimer repeatedly sends a struct{}{} to `.Chan()` after each `dur` + period. (It's good for keeping connections alive.) + A RepeatTimer must be stopped, or it will keep a goroutine alive. */ type RepeatTimer struct { - Ch chan time.Time + name string + ch chan time.Time + tm TickerMaker mtx sync.Mutex - name string + dur time.Duration ticker Ticker quit chan struct{} - wg *sync.WaitGroup } -// NewRepeatTimer returns a RepeatTimer with the DefaultTicker. +// NewRepeatTimer returns a RepeatTimer with a defaultTicker. func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { - ticker := NewDefaultTicker(dur) - return NewRepeatTimerWithTicker(name, ticker) + return NewRepeatTimerWithTickerMaker(name, dur, defaultTickerMaker) } -// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker. -func NewRepeatTimerWithTicker(name string, ticker Ticker) *RepeatTimer { +// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker +// maker. +func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMaker) *RepeatTimer { var t = &RepeatTimer{ - Ch: make(chan time.Time), - ticker: ticker, - quit: make(chan struct{}), - wg: new(sync.WaitGroup), name: name, + ch: make(chan time.Time), + tm: tm, + dur: dur, + ticker: nil, + quit: nil, } - t.wg.Add(1) - go t.fireRoutine(t.ticker.Chan()) + t.reset() return t } -func (t *RepeatTimer) fireRoutine(ch <-chan time.Time) { +func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { for { select { case t_ := <-ch: - t.Ch <- t_ - case <-t.quit: - // needed so we know when we can reset t.quit - t.wg.Done() + t.ch <- t_ + case <-quit: // NOTE: `t.quit` races. return } } } +func (t *RepeatTimer) Chan() <-chan time.Time { + return t.ch +} + +func (t *RepeatTimer) Stop() { + t.mtx.Lock() + defer t.mtx.Unlock() + + t.stop() +} + // Wait the duration again before firing. func (t *RepeatTimer) Reset() { - t.Stop() - - t.mtx.Lock() // Lock + t.mtx.Lock() defer t.mtx.Unlock() - t.ticker.Reset() - t.quit = make(chan struct{}) - t.wg.Add(1) - go t.fireRoutine(t.ticker.Chan()) + t.reset() } -// For ease of .Stop()'ing services before .Start()'ing them, -// we ignore .Stop()'s on nil RepeatTimers. -func (t *RepeatTimer) Stop() bool { - if t == nil { - return false +//---------------------------------------- +// Misc. + +// CONTRACT: (non-constructor) caller should hold t.mtx. +func (t *RepeatTimer) reset() { + if t.ticker != nil { + t.stop() } - t.mtx.Lock() // Lock - defer t.mtx.Unlock() + t.ticker = t.tm(t.dur) + t.quit = make(chan struct{}) + go t.fireRoutine(t.ticker.Chan(), t.quit) +} + +// CONTRACT: caller should hold t.mtx. +func (t *RepeatTimer) stop() { + if t.ticker == nil { + /* + Similar to the case of closing channels twice: + https://groups.google.com/forum/#!topic/golang-nuts/rhxMiNmRAPk + Stopping a RepeatTimer twice implies that you do + not know whether you are done or not. + If you're calling stop on a stopped RepeatTimer, + you probably have race conditions. + */ + panic("Tried to stop a stopped RepeatTimer") + } + t.ticker.Stop() + t.ticker = nil + /* + XXX + From https://golang.org/pkg/time/#Ticker: + "Stop the ticker to release associated resources" + "After Stop, no more ticks will be sent" + So we shouldn't have to do the below. - exists := t.ticker != nil - if exists { - t.ticker.Stop() // does not close the channel select { - case <-t.Ch: + case <-t.ch: // read off channel if there's anything there default: } - close(t.quit) - t.wg.Wait() // must wait for quit to close else we race Reset - } - return exists + */ + close(t.quit) } diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 98d991e9c..f43cc7514 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -4,66 +4,86 @@ import ( "testing" "time" - // make govet noshadow happy... - asrt "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/assert" ) -// NOTE: this only tests with the ManualTicker. +// NOTE: this only tests with the LogicalTicker. // How do you test a real-clock ticker properly? -func TestRepeat(test *testing.T) { - assert := asrt.New(test) +func TestRepeat(t *testing.T) { ch := make(chan time.Time, 100) - // tick fires cnt times on ch + lt := time.Time{} // zero time is year 1 + + // tick fires `cnt` times for each second. tick := func(cnt int) { for i := 0; i < cnt; i++ { - ch <- time.Now() + lt = lt.Add(time.Second) + ch <- lt } } - tock := func(test *testing.T, t *RepeatTimer, cnt int) { + + // tock consumes Ticker.Chan() events `cnt` times. + tock := func(t *testing.T, rt *RepeatTimer, cnt int) { for i := 0; i < cnt; i++ { - after := time.After(time.Second * 2) + timeout := time.After(time.Second * 2) select { - case <-t.Ch: - case <-after: - test.Fatal("expected ticker to fire") + case _ = <-rt.Chan(): + case <-timeout: + panic("QWE") + t.Fatal("expected RepeatTimer to fire") } } done := true select { - case <-t.Ch: + case <-rt.Chan(): done = false default: } - assert.True(done) + assert.True(t, done) } - ticker := NewManualTicker(ch) - t := NewRepeatTimerWithTicker("bar", ticker) + tm := NewLogicalTickerMaker(ch) + dur := time.Duration(0) // dontcare + rt := NewRepeatTimerWithTickerMaker("bar", dur, tm) - // start at 0 - tock(test, t, 0) + // Start at 0. + tock(t, rt, 0) + tick(1) // init time - // wait for 4 periods - tick(4) - tock(test, t, 4) + tock(t, rt, 0) + tick(1) // wait 1 periods + tock(t, rt, 1) + tick(2) // wait 2 periods + tock(t, rt, 2) + tick(3) // wait 3 periods + tock(t, rt, 3) + tick(4) // wait 4 periods + tock(t, rt, 4) - // keep reseting leads to no firing + // Multiple resets leads to no firing. for i := 0; i < 20; i++ { time.Sleep(time.Millisecond) - t.Reset() + rt.Reset() } - tock(test, t, 0) - // after this, it still works normal - tick(2) - tock(test, t, 2) + // After this, it works as new. + tock(t, rt, 0) + tick(1) // init time + + tock(t, rt, 0) + tick(1) // wait 1 periods + tock(t, rt, 1) + tick(2) // wait 2 periods + tock(t, rt, 2) + tick(3) // wait 3 periods + tock(t, rt, 3) + tick(4) // wait 4 periods + tock(t, rt, 4) - // after a stop, nothing more is sent - stopped := t.Stop() - assert.True(stopped) - tock(test, t, 0) + // After a stop, nothing more is sent. + rt.Stop() + tock(t, rt, 0) - // close channel to stop counter - close(t.Ch) + // Another stop panics. + assert.Panics(t, func() { rt.Stop() }) } From 76433d904059009050393ae31c569b7f2df72350 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 25 Dec 2017 10:13:37 -0500 Subject: [PATCH 339/515] little things --- common/repeat_timer.go | 10 +++++----- common/repeat_timer_test.go | 3 +-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 2947a9166..7c5291844 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -42,11 +42,11 @@ func (t *defaultTicker) Stop() { //---------------------------------------- // LogicalTickerMaker -// Construct a TickerMaker that always uses `ch`. +// Construct a TickerMaker that always uses `source`. // It's useful for simulating a deterministic clock. -func NewLogicalTickerMaker(ch chan time.Time) TickerMaker { +func NewLogicalTickerMaker(source chan time.Time) TickerMaker { return func(dur time.Duration) Ticker { - return newLogicalTicker(ch, dur) + return newLogicalTicker(source, dur) } } @@ -66,8 +66,8 @@ func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker { return lt } -// We clearly need a new goroutine, for logicalTicker may have been created -// from a goroutine separate from the source. +// We need a goroutine to read times from t.source +// and fire on t.Chan() when `interval` has passed. func (t *logicalTicker) fireRoutine(interval time.Duration) { source := t.source diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index f43cc7514..44a1a0679 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -29,7 +29,6 @@ func TestRepeat(t *testing.T) { select { case _ = <-rt.Chan(): case <-timeout: - panic("QWE") t.Fatal("expected RepeatTimer to fire") } } @@ -43,7 +42,7 @@ func TestRepeat(t *testing.T) { } tm := NewLogicalTickerMaker(ch) - dur := time.Duration(0) // dontcare + dur := time.Duration(10 * time.Millisecond) // less than a second rt := NewRepeatTimerWithTickerMaker("bar", dur, tm) // Start at 0. From 558f8e77699286ffca1f59842f54160dd30d4794 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 25 Dec 2017 11:10:48 -0500 Subject: [PATCH 340/515] fix recursion --- common/repeat_timer.go | 3 ++- common/repeat_timer_test.go | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 7c5291844..96348bd19 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -36,7 +36,8 @@ func (t *defaultTicker) Chan() <-chan time.Time { // Implements Ticker func (t *defaultTicker) Stop() { - t.Stop() + tt := time.Ticker(*t) + tt.Stop() } //---------------------------------------- diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 44a1a0679..269316bd2 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -7,8 +7,12 @@ import ( "github.com/stretchr/testify/assert" ) -// NOTE: this only tests with the LogicalTicker. -// How do you test a real-clock ticker properly? +func TestDefaultTicker(t *testing.T) { + ticker := defaultTickerMaker(time.Millisecond * 10) + <-ticker.Chan() + ticker.Stop() +} + func TestRepeat(t *testing.T) { ch := make(chan time.Time, 100) From a171d906110ea86c3e9e79f3e0bd6c7c7640abc2 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 28 Dec 2017 17:37:21 -0800 Subject: [PATCH 341/515] Fix possibly incorrect usage of conversion --- common/repeat_timer.go | 3 +-- common/repeat_timer_test.go | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 96348bd19..2e6cb81c8 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -36,8 +36,7 @@ func (t *defaultTicker) Chan() <-chan time.Time { // Implements Ticker func (t *defaultTicker) Stop() { - tt := time.Ticker(*t) - tt.Stop() + ((*time.Ticker)(t)).Stop() } //---------------------------------------- diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 269316bd2..da1687073 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -31,7 +31,7 @@ func TestRepeat(t *testing.T) { for i := 0; i < cnt; i++ { timeout := time.After(time.Second * 2) select { - case _ = <-rt.Chan(): + case <-rt.Chan(): case <-timeout: t.Fatal("expected RepeatTimer to fire") } From 71f13cc071258fbcfe3fb3a3438d1a9f0ee0f4e0 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 10:42:02 -0500 Subject: [PATCH 342/515] drop metalinter --- test.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test.sh b/test.sh index 02bdaae86..b3978d3fe 100755 --- a/test.sh +++ b/test.sh @@ -2,14 +2,14 @@ set -e # run the linter -make metalinter_test +# make metalinter_test # run the unit tests with coverage echo "" > coverage.txt for d in $(go list ./... | grep -v vendor); do - go test -race -coverprofile=profile.out -covermode=atomic "$d" - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi done From 92c17f3f251d51878dc866a42dc57dc09df88ac8 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 10:49:49 -0500 Subject: [PATCH 343/515] give test more time --- common/repeat_timer_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index da1687073..5a3a4c0a6 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -29,11 +29,11 @@ func TestRepeat(t *testing.T) { // tock consumes Ticker.Chan() events `cnt` times. tock := func(t *testing.T, rt *RepeatTimer, cnt int) { for i := 0; i < cnt; i++ { - timeout := time.After(time.Second * 2) + timeout := time.After(time.Second * 10) select { case <-rt.Chan(): case <-timeout: - t.Fatal("expected RepeatTimer to fire") + panic("expected RepeatTimer to fire") } } done := true From 35e6f11ad445cf4cb19fefadba0517a86f00b1fc Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 11:01:37 -0500 Subject: [PATCH 344/515] changelog and version --- CHANGELOG.md | 13 +++++++++++++ version/version.go | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b679b839d..fe2c2fe94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 0.6.0 (December 29, 2017) + +BREAKING: + - [cli] remove --root + - [pubsub] add String() method to Query interface + +IMPROVEMENTS: + - [common] use a thread-safe and well seeded non-crypto rng + +BUG FIXES + - [clist] fix misuse of wait group + - [common] introduce Ticker interface and logicalTicker for better testing of timers + ## 0.5.0 (December 5, 2017) BREAKING: diff --git a/version/version.go b/version/version.go index 45222da79..6cc887286 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.5.0" +const Version = "0.6.0" From 9f72e25b23f3a3120af5e48b5b7520c34b88775f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 16 Dec 2017 00:03:40 -0500 Subject: [PATCH 345/515] readme --- README.md | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..d5a11c7b4 --- /dev/null +++ b/README.md @@ -0,0 +1,53 @@ +# TMLIBS + +This repo is a home for various small packages. + +## autofile + +Autofile is file access with automatic log rotation. A group of files is maintained and rotation happens +when the leading file gets too big. Provides a reader for reading from the file group. + +## cli + +CLI wraps the `cobra` and `viper` packages and handles some common elements of building a CLI like flags and env vars for the home directory and the logger. + +## clist + +Clist provides a linekd list that is safe for concurrent access by many readers. + +## common + +Common provides a hodgepodge of useful functions. + +## db + +DB provides a database interface and a number of implementions, including ones using an in-memory map, the filesystem directory structure, +an implemention of LevelDB in Go, and the official LevelDB in C. + +## events + +Events is a synchronous PubSub package. + +## flowrate + +Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` method. + +## log + +Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. + +## logger + +Logger is DEPRECATED. It's a simple wrapper around `log15`. + +## merkle + +Merkle provides a simple static merkle tree and corresponding proofs. + +## process + +Process is a simple utility for spawning OS processes. + +## pubsub + +PubSub is an asynchronous PubSub package. From a84bc2f5b26094bbd15dfefe46a2ac932fc9d557 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 29 Dec 2017 15:23:07 +0000 Subject: [PATCH 346/515] logger is deprecated, removed; closes #115 --- README.md | 4 --- logger/log.go | 78 --------------------------------------------------- 2 files changed, 82 deletions(-) delete mode 100644 logger/log.go diff --git a/README.md b/README.md index d5a11c7b4..9ea618dbd 100644 --- a/README.md +++ b/README.md @@ -36,10 +36,6 @@ Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. -## logger - -Logger is DEPRECATED. It's a simple wrapper around `log15`. - ## merkle Merkle provides a simple static merkle tree and corresponding proofs. diff --git a/logger/log.go b/logger/log.go deleted file mode 100644 index 2f4faef6b..000000000 --- a/logger/log.go +++ /dev/null @@ -1,78 +0,0 @@ -// DEPRECATED! Use newer log package. -package logger - -import ( - "os" - - "github.com/tendermint/log15" - . "github.com/tendermint/tmlibs/common" -) - -var mainHandler log15.Handler -var bypassHandler log15.Handler - -func init() { - resetWithLogLevel("debug") -} - -func SetLogLevel(logLevel string) { - resetWithLogLevel(logLevel) -} - -func resetWithLogLevel(logLevel string) { - // main handler - //handlers := []log15.Handler{} - mainHandler = log15.LvlFilterHandler( - getLevel(logLevel), - log15.StreamHandler(os.Stdout, log15.TerminalFormat()), - ) - //handlers = append(handlers, mainHandler) - - // bypass handler for not filtering on global logLevel. - bypassHandler = log15.StreamHandler(os.Stdout, log15.TerminalFormat()) - //handlers = append(handlers, bypassHandler) - - // By setting handlers on the root, we handle events from all loggers. - log15.Root().SetHandler(mainHandler) -} - -// See go-wire/log for an example of usage. -func MainHandler() log15.Handler { - return mainHandler -} - -func New(ctx ...interface{}) log15.Logger { - return NewMain(ctx...) -} - -func BypassHandler() log15.Handler { - return bypassHandler -} - -func NewMain(ctx ...interface{}) log15.Logger { - return log15.Root().New(ctx...) -} - -func NewBypass(ctx ...interface{}) log15.Logger { - bypass := log15.New(ctx...) - bypass.SetHandler(bypassHandler) - return bypass -} - -func getLevel(lvlString string) log15.Lvl { - lvl, err := log15.LvlFromString(lvlString) - if err != nil { - Exit(Fmt("Invalid log level %v: %v", lvlString, err)) - } - return lvl -} - -//---------------------------------------- -// Exported from log15 - -var LvlFilterHandler = log15.LvlFilterHandler -var LvlDebug = log15.LvlDebug -var LvlInfo = log15.LvlInfo -var LvlNotice = log15.LvlNotice -var LvlWarn = log15.LvlWarn -var LvlError = log15.LvlError From 7fe3d5dac2786aae214fbd5b1df45bc9e4db9e18 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 10:48:36 -0500 Subject: [PATCH 347/515] metalinter --- Makefile | 2 +- db/stats.go | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) delete mode 100644 db/stats.go diff --git a/Makefile b/Makefile index f1ee1004e..dfdd6bef5 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,6 @@ fmt: metalinter: @echo "==> Running linter" gometalinter.v2 --vendor --deadline=600s --disable-all \ - --enable=maligned \ --enable=deadcode \ --enable=goconst \ --enable=goimports \ @@ -94,6 +93,7 @@ metalinter: --enable=vetshadow \ ./... + #--enable=maligned \ #--enable=gas \ #--enable=aligncheck \ #--enable=dupl \ diff --git a/db/stats.go b/db/stats.go deleted file mode 100644 index ef4b0dd0f..000000000 --- a/db/stats.go +++ /dev/null @@ -1,7 +0,0 @@ -package db - -func mergeStats(src, dest map[string]string, prefix string) { - for key, value := range src { - dest[prefix+key] = value - } -} From 1838db28803edbc8c77cefc814652f32bbae39d7 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 10:53:56 -0500 Subject: [PATCH 348/515] circle --- circle.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 104cfa6f3..390ffb039 100644 --- a/circle.yml +++ b/circle.yml @@ -15,7 +15,7 @@ dependencies: test: override: - - cd $PROJECT_PATH && make get_vendor_deps && bash ./test.sh + - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh post: - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" From 1460540acd267c37f3d58ebe18cdaf4baec15f7f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 10:59:43 -0500 Subject: [PATCH 349/515] metalinter is for another time --- Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index dfdd6bef5..e15356c2c 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,9 @@ GOTOOLS = \ github.com/Masterminds/glide \ - github.com/alecthomas/gometalinter.v2 \ github.com/gogo/protobuf/protoc-gen-gogo \ github.com/gogo/protobuf/gogoproto + # github.com/alecthomas/gometalinter.v2 \ + GOTOOLS_CHECK = glide gometalinter.v2 protoc protoc-gen-gogo INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf @@ -40,7 +41,7 @@ check_tools: get_tools: @echo "--> Installing tools" go get -u -v $(GOTOOLS) - @gometalinter.v2 --install + # @gometalinter.v2 --install get_protoc: @# https://github.com/google/protobuf/releases From 2bb538b150f197a04a0b969a27e9ea24d35edbc1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 11:05:53 -0500 Subject: [PATCH 350/515] cmn: fix HexBytes.MarshalJSON --- common/bytes.go | 2 +- common/bytes_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/bytes.go b/common/bytes.go index 1ec880c25..ba81bbe97 100644 --- a/common/bytes.go +++ b/common/bytes.go @@ -26,7 +26,7 @@ func (bz HexBytes) MarshalJSON() ([]byte, error) { jbz := make([]byte, len(s)+2) jbz[0] = '"' copy(jbz[1:], []byte(s)) - jbz[1] = '"' + jbz[len(jbz)-1] = '"' return jbz, nil } diff --git a/common/bytes_test.go b/common/bytes_test.go index 3e693b239..9e11988f2 100644 --- a/common/bytes_test.go +++ b/common/bytes_test.go @@ -35,8 +35,8 @@ func TestJSONMarshal(t *testing.T) { expected string }{ {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, + {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, } for i, tc := range cases { From 84afef20f5d960b033c9c8d84710331e6cacec70 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Sun, 28 Jan 2018 10:35:34 -0700 Subject: [PATCH 351/515] common: fix BitArray.Update to avoid nil dereference Update previously only checked that the receiver was non-nil but didn't check that the input parameter to update "o" was non-nil causing a nil dereference in cases such as https://github.com/tendermint/tendermint/blob/fe632ea32a89c3d9804bbd6e3ce9391b1d5a0993/consensus/reactor.go#L306 Fixes https://github.com/tendermint/tendermint/issues/1169 --- common/bit_array.go | 2 +- common/bit_array_test.go | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/common/bit_array.go b/common/bit_array.go index 848763b48..68201bad6 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -306,7 +306,7 @@ func (bA *BitArray) Bytes() []byte { // so if necessary, caller must copy or lock o prior to calling Update. // If bA is nil, does nothing. func (bA *BitArray) Update(o *BitArray) { - if bA == nil { + if bA == nil || o == nil { return } bA.mtx.Lock() diff --git a/common/bit_array_test.go b/common/bit_array_test.go index 1c72882c7..9a787e441 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -164,3 +164,26 @@ func TestEmptyFull(t *testing.T) { } } } + +func TestUpdateNeverPanics(t *testing.T) { + newRandBitArray := func(n int) *BitArray { + ba, _ := randBitArray(n) + return ba + } + pairs := []struct { + a, b *BitArray + }{ + {nil, nil}, + {newRandBitArray(10), newRandBitArray(12)}, + {newRandBitArray(0), NewBitArray(10)}, + {nil, NewBitArray(10)}, + {nil, newRandBitArray(64)}, + {newRandBitArray(63), newRandBitArray(64)}, + } + + for _, pair := range pairs { + a, b := pair.a, pair.b + a.Update(b) + b.Update(a) + } +} From 85be26c675b05a2a75c856f7c22b446d8df1c944 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Sun, 28 Jan 2018 22:02:46 -0700 Subject: [PATCH 352/515] common: BitArray: feedback from @adrianbrink to simplify tests --- common/bit_array_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/common/bit_array_test.go b/common/bit_array_test.go index 9a787e441..e4ac8bf6f 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -175,10 +175,9 @@ func TestUpdateNeverPanics(t *testing.T) { }{ {nil, nil}, {newRandBitArray(10), newRandBitArray(12)}, - {newRandBitArray(0), NewBitArray(10)}, + {newRandBitArray(23), newRandBitArray(23)}, + {newRandBitArray(37), nil}, {nil, NewBitArray(10)}, - {nil, newRandBitArray(64)}, - {newRandBitArray(63), newRandBitArray(64)}, } for _, pair := range pairs { From bcd8d403dcef53c1fabf2521362c467459fabafc Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 13:27:34 -0800 Subject: [PATCH 353/515] Remove encoding from common cli --- cli/setup.go | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 295477598..2dcadb407 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -8,9 +8,6 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" - - data "github.com/tendermint/go-wire/data" - "github.com/tendermint/go-wire/data/base58" ) const ( @@ -42,7 +39,7 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") - cmd.PersistentPreRunE = concatCobraCmdFuncs(setEncoding, validateOutput, cmd.PersistentPreRunE) + cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) return PrepareBaseCmd(cmd, envPrefix, defaultHome) } @@ -147,23 +144,6 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { return nil } -// setEncoding reads the encoding flag -func setEncoding(cmd *cobra.Command, args []string) error { - // validate and set encoding - enc := viper.GetString("encoding") - switch enc { - case "hex": - data.Encoder = data.HexEncoder - case "b64": - data.Encoder = data.B64Encoder - case "btc": - data.Encoder = base58.BTCEncoder - default: - return errors.Errorf("Unsupported encoding: %s", enc) - } - return nil -} - func validateOutput(cmd *cobra.Command, args []string) error { // validate output format output := viper.GetString(OutputFlag) From 4e2a275a67614a250f70adba2340cd463b24f06d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 13:30:47 -0800 Subject: [PATCH 354/515] Update to use tmlibs sdk2 --- glide.lock | 62 +++++++++++++++++++++++------------------------------- glide.yaml | 4 +--- 2 files changed, 27 insertions(+), 39 deletions(-) diff --git a/glide.lock b/glide.lock index aaf7c07e2..5576d090a 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,14 @@ -hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 -updated: 2017-12-28T18:27:21.247160207-08:00 +hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 +updated: 2018-01-14T13:29:55.282854028-08:00 imports: +- name: github.com/davecgh/go-spew + version: ecdeabc65495df2dec95d7c4a4c3e021903035e5 + subpackages: + - spew - name: github.com/fsnotify/fsnotify - version: 4da3e2cfbabc9f751898f250b49f2439785783a1 + version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: 953e747656a7bbb5e1f998608b460458958b70cc subpackages: - log - log/level @@ -12,7 +16,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -26,6 +30,7 @@ imports: subpackages: - hcl/ast - hcl/parser + - hcl/printer - hcl/scanner - hcl/strconv - hcl/token @@ -39,21 +44,15 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a -- name: github.com/mattn/go-colorable - version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 -- name: github.com/mattn/go-isatty - version: a5cdd64afdee435007ee3e9f6ed4684af949d568 + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mitchellh/mapstructure - version: 06020f85339e21b2478f756a78e295255ffa4d6a -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: b4575eea38cca1123ec2dc90c26529b5c5acfcff - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 - name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d + version: e881fd58d78e04cf6d0de1217f8707c8cc2249bc - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: 57afd63c68602b63ed976de00dd066ccb3c319db subpackages: - mem - name: github.com/spf13/cast @@ -61,13 +60,13 @@ imports: - name: github.com/spf13/cobra version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: 12bd96e66386c1960ab0f74ced1362f66f552f7b + version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 - name: github.com/spf13/pflag - version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper - version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 + version: aafc9e6bc7b7bb53ddaa75a5ef49a17d6e654be5 - name: github.com/syndtr/goleveldb - version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 + version: 34011bf325bce385408353a30b101fe5e923eb6e subpackages: - leveldb - leveldb/cache @@ -82,40 +81,31 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 27be46e25124ddf775e23317a83647ce62a93f6b - subpackages: - - data - - data/base58 + version: b93ebdd4f306833936c243561ec30af3455dc764 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - subpackages: - - term - name: golang.org/x/crypto - version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 + version: 13931e22f9e72ea58bb73048bc752b48c6d4d4ac subpackages: - ripemd160 - name: golang.org/x/sys - version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + version: 810d7000345868fc619eb81f46307107118f4ae1 subpackages: - unix - name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + version: e19ae1496984b1c655b8044a65c0300a3c878dd3 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: d670f9405373e636a5a2765eea47fac0c9bc91a4 testImports: -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew - name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + version: 792786c7400a136282c1664665ae0a8db921c6c2 subpackages: - difflib - name: github.com/stretchr/testify - version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f + version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index 2df880175..a28bd39ec 100644 --- a/glide.yaml +++ b/glide.yaml @@ -16,9 +16,7 @@ import: - leveldb/errors - leveldb/opt - package: github.com/tendermint/go-wire - subpackages: - - data - - data/base58 + version: sdk2 - package: github.com/tendermint/log15 - package: golang.org/x/crypto subpackages: From 6637c202bf7d5256caf8acb65070937cfd2e75a0 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 13:40:18 -0800 Subject: [PATCH 355/515] Revert "Update to use tmlibs sdk2" This reverts commit ae58af0be534a5c344896461b97a6490d428deb4. Breaks the tests. --- glide.lock | 62 +++++++++++++++++++++++++++++++----------------------- glide.yaml | 4 +++- 2 files changed, 39 insertions(+), 27 deletions(-) diff --git a/glide.lock b/glide.lock index 5576d090a..aaf7c07e2 100644 --- a/glide.lock +++ b/glide.lock @@ -1,14 +1,10 @@ -hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 -updated: 2018-01-14T13:29:55.282854028-08:00 +hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 +updated: 2017-12-28T18:27:21.247160207-08:00 imports: -- name: github.com/davecgh/go-spew - version: ecdeabc65495df2dec95d7c4a4c3e021903035e5 - subpackages: - - spew - name: github.com/fsnotify/fsnotify - version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 + version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: 953e747656a7bbb5e1f998608b460458958b70cc + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -16,7 +12,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -30,7 +26,6 @@ imports: subpackages: - hcl/ast - hcl/parser - - hcl/printer - hcl/scanner - hcl/strconv - hcl/token @@ -44,15 +39,21 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a +- name: github.com/mattn/go-colorable + version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 +- name: github.com/mattn/go-isatty + version: a5cdd64afdee435007ee3e9f6ed4684af949d568 - name: github.com/mitchellh/mapstructure - version: b4575eea38cca1123ec2dc90c26529b5c5acfcff + version: 06020f85339e21b2478f756a78e295255ffa4d6a +- name: github.com/pelletier/go-buffruneio + version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 + version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors - version: e881fd58d78e04cf6d0de1217f8707c8cc2249bc + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 57afd63c68602b63ed976de00dd066ccb3c319db + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast @@ -60,13 +61,13 @@ imports: - name: github.com/spf13/cobra version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 + version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: aafc9e6bc7b7bb53ddaa75a5ef49a17d6e654be5 + version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: 34011bf325bce385408353a30b101fe5e923eb6e + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -81,31 +82,40 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b93ebdd4f306833936c243561ec30af3455dc764 + version: 27be46e25124ddf775e23317a83647ce62a93f6b + subpackages: + - data + - data/base58 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b + subpackages: + - term - name: golang.org/x/crypto - version: 13931e22f9e72ea58bb73048bc752b48c6d4d4ac + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 810d7000345868fc619eb81f46307107118f4ae1 + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 subpackages: - unix - name: golang.org/x/text - version: e19ae1496984b1c655b8044a65c0300a3c878dd3 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: d670f9405373e636a5a2765eea47fac0c9bc91a4 + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew - name: github.com/pmezard/go-difflib - version: 792786c7400a136282c1664665ae0a8db921c6c2 + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib - name: github.com/stretchr/testify - version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c + version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index a28bd39ec..2df880175 100644 --- a/glide.yaml +++ b/glide.yaml @@ -16,7 +16,9 @@ import: - leveldb/errors - leveldb/opt - package: github.com/tendermint/go-wire - version: sdk2 + subpackages: + - data + - data/base58 - package: github.com/tendermint/log15 - package: golang.org/x/crypto subpackages: From cfbb9338bdad8f5b369b0e403eb428712860f1bb Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 14 Jan 2018 17:35:57 -0500 Subject: [PATCH 356/515] use go-wire sdk2 --- glide.lock | 46 ++++++++++++++++----------------------- glide.yaml | 1 + merkle/simple_map.go | 16 +++++++------- merkle/simple_map_test.go | 12 +++++----- merkle/simple_tree.go | 18 +++++++-------- 5 files changed, 42 insertions(+), 51 deletions(-) diff --git a/glide.lock b/glide.lock index aaf7c07e2..cd11b7588 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,14 @@ -hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 -updated: 2017-12-28T18:27:21.247160207-08:00 +hash: c2db6960e66e1f56fbce88caec470cbde14701763efb4a26d2f3fabd2f979a96 +updated: 2018-01-14T17:26:45.597677436-05:00 imports: +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: 953e747656a7bbb5e1f998608b460458958b70cc subpackages: - log - log/level @@ -12,7 +16,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -39,21 +43,15 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a -- name: github.com/mattn/go-colorable - version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 -- name: github.com/mattn/go-isatty - version: a5cdd64afdee435007ee3e9f6ed4684af949d568 + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: 57afd63c68602b63ed976de00dd066ccb3c319db subpackages: - mem - name: github.com/spf13/cast @@ -63,11 +61,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper - version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb - version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 + version: 34011bf325bce385408353a30b101fe5e923eb6e subpackages: - leveldb - leveldb/cache @@ -82,34 +80,28 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 27be46e25124ddf775e23317a83647ce62a93f6b + version: b93ebdd4f306833936c243561ec30af3455dc764 subpackages: - data - data/base58 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - subpackages: - - term - name: golang.org/x/crypto - version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 + version: 95a4943f35d008beabde8c11e5075a1b714e6419 subpackages: - ripemd160 - name: golang.org/x/sys - version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + version: 83801418e1b59fb1880e363299581ee543af32ca subpackages: - unix - name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + version: e19ae1496984b1c655b8044a65c0300a3c878dd3 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 testImports: -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/glide.yaml b/glide.yaml index 2df880175..e295781cb 100644 --- a/glide.yaml +++ b/glide.yaml @@ -16,6 +16,7 @@ import: - leveldb/errors - leveldb/opt - package: github.com/tendermint/go-wire + version: sdk2 subpackages: - data - data/base58 diff --git a/merkle/simple_map.go b/merkle/simple_map.go index 003c7cd42..f637d30a7 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -26,7 +26,7 @@ func (sm *SimpleMap) Set(key string, value interface{}) { if hashable, ok := value.(Hashable); ok { vBytes = hashable.Hash() } else { - vBytes = wire.BinaryBytes(value) + vBytes, _ = wire.MarshalBinary(value) } sm.kvs = append(sm.kvs, cmn.KVPair{ @@ -65,14 +65,14 @@ func (sm *SimpleMap) KVPairs() cmn.KVPairs { type kvPair cmn.KVPair func (kv kvPair) Hash() []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteByteSlice(kv.Key, hasher, n, err) - if *err != nil { - panic(*err) + hasher := ripemd160.New() + err := wire.EncodeByteSlice(hasher, kv.Key) + if err != nil { + panic(err) } - wire.WriteByteSlice(kv.Value, hasher, n, err) - if *err != nil { - panic(*err) + err = wire.EncodeByteSlice(hasher, kv.Value) + if err != nil { + panic(err) } return hasher.Sum(nil) } diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index 8ba7ce66b..946858550 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -11,37 +11,37 @@ func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() db.Set("key1", "value1") - assert.Equal(t, "3bb53f017d2f5b4f144692aa829a5c245ac2b123", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "d7df3e1d47fe38b51f8d897a88828026807a86b6", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value2") - assert.Equal(t, "14a68db29e3f930ffaafeff5e07c17a439384f39", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "db415336c9be129ac38259b935a49d8e9c248c88", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") - assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") - assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") db.Set("key3", "value3") - assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") db.Set("key3", "value3") - assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 3a82f4edc..86b0bf26c 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -28,17 +28,14 @@ import ( "golang.org/x/crypto/ripemd160" "github.com/tendermint/go-wire" - . "github.com/tendermint/tmlibs/common" ) func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { - var n int - var err error var hasher = ripemd160.New() - wire.WriteByteSlice(left, hasher, &n, &err) - wire.WriteByteSlice(right, hasher, &n, &err) + err := wire.EncodeByteSlice(hasher, left) + err = wire.EncodeByteSlice(hasher, right) if err != nil { - PanicCrisis(err) + panic(err) } return hasher.Sum(nil) } @@ -68,11 +65,12 @@ func SimpleHashFromBinaries(items []interface{}) []byte { // General Convenience func SimpleHashFromBinary(item interface{}) []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteBinary(item, hasher, n, err) - if *err != nil { - PanicCrisis(err) + hasher := ripemd160.New() + bz, err := wire.MarshalBinary(item) + if err != nil { + panic(err) } + hasher.Write(bz) return hasher.Sum(nil) } From ff230682d1a058e2707600c4ba45a7fa6d6b39f5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 21:20:53 -0800 Subject: [PATCH 357/515] Fix logical time (#122) Should fix a nondeterministic bug so... --- Makefile | 4 ++ common/repeat_timer.go | 6 +- common/repeat_timer_test.go | 114 ++++++++++++++++++++---------------- 3 files changed, 69 insertions(+), 55 deletions(-) diff --git a/Makefile b/Makefile index e15356c2c..ae2c71610 100644 --- a/Makefile +++ b/Makefile @@ -62,12 +62,16 @@ get_vendor_deps: @echo "--> Running glide install" @glide install + ######################################## ### Testing test: go test -tags gcc `glide novendor` +test100: + @for i in {1..100}; do make test; done + ######################################## ### Formatting, linting, and vetting diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 2e6cb81c8..cb227199e 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -80,13 +80,11 @@ func (t *logicalTicker) fireRoutine(interval time.Duration) { } // Init `lasttime` end - timeleft := interval for { select { case newtime := <-source: elapsed := newtime.Sub(lasttime) - timeleft -= elapsed - if timeleft <= 0 { + if interval <= elapsed { // Block for determinism until the ticker is stopped. select { case t.ch <- newtime: @@ -97,7 +95,7 @@ func (t *logicalTicker) fireRoutine(interval time.Duration) { // Don't try to "catch up" by sending more. // "Ticker adjusts the intervals or drops ticks to make up for // slow receivers" - https://golang.org/pkg/time/#Ticker - timeleft = interval + lasttime = newtime } case <-t.quit: return // done diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 5a3a4c0a6..5598922c5 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -1,6 +1,7 @@ package common import ( + "sync" "testing" "time" @@ -13,29 +14,42 @@ func TestDefaultTicker(t *testing.T) { ticker.Stop() } -func TestRepeat(t *testing.T) { +func TestRepeatTimer(t *testing.T) { ch := make(chan time.Time, 100) - lt := time.Time{} // zero time is year 1 + mtx := new(sync.Mutex) - // tick fires `cnt` times for each second. - tick := func(cnt int) { - for i := 0; i < cnt; i++ { - lt = lt.Add(time.Second) - ch <- lt - } + // tick() fires from start to end + // (exclusive) in milliseconds with incr. + // It locks on mtx, so subsequent calls + // run in series. + tick := func(startMs, endMs, incrMs time.Duration) { + mtx.Lock() + go func() { + for tMs := startMs; tMs < endMs; tMs += incrMs { + lt := time.Time{} + lt = lt.Add(tMs * time.Millisecond) + ch <- lt + } + mtx.Unlock() + }() } - // tock consumes Ticker.Chan() events `cnt` times. - tock := func(t *testing.T, rt *RepeatTimer, cnt int) { - for i := 0; i < cnt; i++ { - timeout := time.After(time.Second * 10) - select { - case <-rt.Chan(): - case <-timeout: - panic("expected RepeatTimer to fire") - } + // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". + tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { + + // Check against timesMs. + for _, timeMs := range timesMs { + tyme := <-rt.Chan() + sinceMs := tyme.Sub(time.Time{}) / time.Millisecond + assert.Equal(t, timeMs, int64(sinceMs)) } + + // TODO detect number of running + // goroutines to ensure that + // no other times will fire. + // See https://github.com/tendermint/tmlibs/issues/120. + time.Sleep(time.Millisecond * 100) done := true select { case <-rt.Chan(): @@ -46,46 +60,44 @@ func TestRepeat(t *testing.T) { } tm := NewLogicalTickerMaker(ch) - dur := time.Duration(10 * time.Millisecond) // less than a second - rt := NewRepeatTimerWithTickerMaker("bar", dur, tm) - - // Start at 0. - tock(t, rt, 0) - tick(1) // init time + rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) - tock(t, rt, 0) - tick(1) // wait 1 periods - tock(t, rt, 1) - tick(2) // wait 2 periods - tock(t, rt, 2) - tick(3) // wait 3 periods - tock(t, rt, 3) - tick(4) // wait 4 periods - tock(t, rt, 4) - - // Multiple resets leads to no firing. - for i := 0; i < 20; i++ { - time.Sleep(time.Millisecond) - rt.Reset() - } + /* NOTE: Useful for debugging deadlocks... + go func() { + time.Sleep(time.Second * 3) + trace := make([]byte, 102400) + count := runtime.Stack(trace, true) + fmt.Printf("Stack of %d bytes: %s\n", count, trace) + }() + */ - // After this, it works as new. - tock(t, rt, 0) - tick(1) // init time + tick(0, 1000, 10) + tock(t, rt, []int64{}) + tick(1000, 2000, 10) + tock(t, rt, []int64{1000}) + tick(2005, 5000, 10) + tock(t, rt, []int64{2005, 3005, 4005}) + tick(5001, 5999, 1) + // Read 5005 instead of 5001 because + // it's 1 second greater than 4005. + tock(t, rt, []int64{5005}) + tick(6000, 7005, 1) + tock(t, rt, []int64{6005}) + tick(7033, 8032, 1) + tock(t, rt, []int64{7033}) - tock(t, rt, 0) - tick(1) // wait 1 periods - tock(t, rt, 1) - tick(2) // wait 2 periods - tock(t, rt, 2) - tick(3) // wait 3 periods - tock(t, rt, 3) - tick(4) // wait 4 periods - tock(t, rt, 4) + // After a reset, nothing happens + // until two ticks are received. + rt.Reset() + tock(t, rt, []int64{}) + tick(8040, 8041, 1) + tock(t, rt, []int64{}) + tick(9555, 9556, 1) + tock(t, rt, []int64{9555}) // After a stop, nothing more is sent. rt.Stop() - tock(t, rt, 0) + tock(t, rt, []int64{}) // Another stop panics. assert.Panics(t, func() { rt.Stop() }) From 7ef6d4b8132dc8e9bc035b6fa9311934c67d5f87 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 21:25:57 -0800 Subject: [PATCH 358/515] Glide update --- glide.lock | 35 +++++++++++++++++------------------ glide.yaml | 3 --- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/glide.lock b/glide.lock index cd11b7588..e23eb217a 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: c2db6960e66e1f56fbce88caec470cbde14701763efb4a26d2f3fabd2f979a96 -updated: 2018-01-14T17:26:45.597677436-05:00 +hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 +updated: 2018-01-14T21:24:21.241420637-08:00 imports: - name: github.com/davecgh/go-spew version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 @@ -8,7 +8,7 @@ imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: 953e747656a7bbb5e1f998608b460458958b70cc + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -16,7 +16,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -43,15 +43,17 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a +- name: github.com/pelletier/go-buffruneio + version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 + version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 57afd63c68602b63ed976de00dd066ccb3c319db + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast @@ -61,11 +63,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 + version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: 34011bf325bce385408353a30b101fe5e923eb6e + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -81,33 +83,30 @@ imports: - leveldb/util - name: github.com/tendermint/go-wire version: b93ebdd4f306833936c243561ec30af3455dc764 - subpackages: - - data - - data/base58 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - name: golang.org/x/crypto - version: 95a4943f35d008beabde8c11e5075a1b714e6419 + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 83801418e1b59fb1880e363299581ee543af32ca + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 subpackages: - unix - name: golang.org/x/text - version: e19ae1496984b1c655b8044a65c0300a3c878dd3 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib - name: github.com/stretchr/testify - version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f + version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index e295781cb..a28bd39ec 100644 --- a/glide.yaml +++ b/glide.yaml @@ -17,9 +17,6 @@ import: - leveldb/opt - package: github.com/tendermint/go-wire version: sdk2 - subpackages: - - data - - data/base58 - package: github.com/tendermint/log15 - package: golang.org/x/crypto subpackages: From 580c3db8f974ec771fd4d7b64b2bbf690002bc75 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 25 Jan 2018 20:05:23 -0800 Subject: [PATCH 359/515] Hashable -> Hasher; SimpleMap upgrade; No "SimpleHashFromBinary" (#128) * Update SimpleMap to hash both keys and values for benefit; Hashable is Hasher; Don't assume go-wire --- glide.lock | 6 +++--- merkle/simple_map.go | 29 +++++++++++++------------- merkle/simple_map_test.go | 42 ++++++++++++++++++++++---------------- merkle/simple_proof.go | 10 ++++----- merkle/simple_tree.go | 26 ++++++++++------------- merkle/simple_tree_test.go | 6 +++--- merkle/types.go | 2 +- 7 files changed, 61 insertions(+), 60 deletions(-) diff --git a/glide.lock b/glide.lock index e23eb217a..8ed27e0b0 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 -updated: 2018-01-14T21:24:21.241420637-08:00 +updated: 2018-01-21T03:46:56.821595635-08:00 imports: - name: github.com/davecgh/go-spew version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 @@ -65,7 +65,7 @@ imports: - name: github.com/spf13/pflag version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: @@ -82,7 +82,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b93ebdd4f306833936c243561ec30af3455dc764 + version: 0cce10e82786f2d501827fbe158747dbc4ceeb43 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - name: golang.org/x/crypto diff --git a/merkle/simple_map.go b/merkle/simple_map.go index f637d30a7..b09b71d54 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -18,25 +18,25 @@ func NewSimpleMap() *SimpleMap { } } -func (sm *SimpleMap) Set(key string, value interface{}) { +func (sm *SimpleMap) Set(key string, value Hasher) { sm.sorted = false - // Is value Hashable? - var vBytes []byte - if hashable, ok := value.(Hashable); ok { - vBytes = hashable.Hash() - } else { - vBytes, _ = wire.MarshalBinary(value) - } + // Hash the key to blind it... why not? + khash := SimpleHashFromBytes([]byte(key)) + + // And the value is hashed too, so you can + // check for equality with a cached value (say) + // and make a determination to fetch or not. + vhash := value.Hash() sm.kvs = append(sm.kvs, cmn.KVPair{ - Key: []byte(key), - Value: vBytes, + Key: khash, + Value: vhash, }) } -// Merkle root hash of items sorted by key. -// NOTE: Behavior is undefined when key is duplicate. +// Merkle root hash of items sorted by key +// (UNSTABLE: and by value too if duplicate key). func (sm *SimpleMap) Hash() []byte { sm.Sort() return hashKVPairs(sm.kvs) @@ -51,7 +51,6 @@ func (sm *SimpleMap) Sort() { } // Returns a copy of sorted KVPairs. -// CONTRACT: The returned slice must not be mutated. func (sm *SimpleMap) KVPairs() cmn.KVPairs { sm.Sort() kvs := make(cmn.KVPairs, len(sm.kvs)) @@ -78,9 +77,9 @@ func (kv kvPair) Hash() []byte { } func hashKVPairs(kvs cmn.KVPairs) []byte { - kvsH := make([]Hashable, 0, len(kvs)) + kvsH := make([]Hasher, 0, len(kvs)) for _, kvp := range kvs { kvsH = append(kvsH, kvPair(kvp)) } - return SimpleHashFromHashables(kvsH) + return SimpleHashFromHashers(kvsH) } diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index 946858550..61210132b 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -7,41 +7,47 @@ import ( "github.com/stretchr/testify/assert" ) +type strHasher string + +func (str strHasher) Hash() []byte { + return SimpleHashFromBytes([]byte(str)) +} + func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() - db.Set("key1", "value1") - assert.Equal(t, "d7df3e1d47fe38b51f8d897a88828026807a86b6", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value1")) + assert.Equal(t, "19618304d1ad2635c4238bce87f72331b22a11a1", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key1", "value2") - assert.Equal(t, "db415336c9be129ac38259b935a49d8e9c248c88", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value2")) + assert.Equal(t, "51cb96d3d41e1714def72eb4bacc211de9ddf284", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key1", "value1") - db.Set("key2", "value2") - assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value1")) + db.Set("key2", strHasher("value2")) + assert.Equal(t, "58a0a99d5019fdcad4bcf55942e833b2dfab9421", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key2", "value2") // NOTE: out of order - db.Set("key1", "value1") - assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key2", strHasher("value2")) // NOTE: out of order + db.Set("key1", strHasher("value1")) + assert.Equal(t, "58a0a99d5019fdcad4bcf55942e833b2dfab9421", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key1", "value1") - db.Set("key2", "value2") - db.Set("key3", "value3") - assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value1")) + db.Set("key2", strHasher("value2")) + db.Set("key3", strHasher("value3")) + assert.Equal(t, "cb56db3c7993e977f4c2789559ae3e5e468a6e9b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key2", "value2") // NOTE: out of order - db.Set("key1", "value1") - db.Set("key3", "value3") - assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key2", strHasher("value2")) // NOTE: out of order + db.Set("key1", strHasher("value1")) + db.Set("key3", strHasher("value3")) + assert.Equal(t, "cb56db3c7993e977f4c2789559ae3e5e468a6e9b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go index f75568fd9..83f89e598 100644 --- a/merkle/simple_proof.go +++ b/merkle/simple_proof.go @@ -10,8 +10,8 @@ type SimpleProof struct { } // proofs[0] is the proof for items[0]. -func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromHashables(items) +func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromHashers(items) rootHash = rootSPN.Hash proofs = make([]*SimpleProof, len(items)) for i, trail := range trails { @@ -109,7 +109,7 @@ func (spn *SimpleProofNode) FlattenAunts() [][]byte { // trails[0].Hash is the leaf hash for items[0]. // trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { +func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) { // Recursive impl. switch len(items) { case 0: @@ -118,8 +118,8 @@ func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *Sim trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} return []*SimpleProofNode{trail}, trail default: - lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) + lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:]) rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) root := &SimpleProofNode{rootHash, nil, nil, nil} leftRoot.Parent = root diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 86b0bf26c..182f2fdaa 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -54,28 +54,25 @@ func SimpleHashFromHashes(hashes [][]byte) []byte { } } -// Convenience for SimpleHashFromHashes. -func SimpleHashFromBinaries(items []interface{}) []byte { - hashes := make([][]byte, len(items)) - for i, item := range items { - hashes[i] = SimpleHashFromBinary(item) +// NOTE: Do not implement this, use SimpleHashFromByteslices instead. +// type Byteser interface { Bytes() []byte } +// func SimpleHashFromBytesers(items []Byteser) []byte { ... } + +func SimpleHashFromByteslices(bzs [][]byte) []byte { + hashes := make([][]byte, len(bzs)) + for i, bz := range bzs { + hashes[i] = SimpleHashFromBytes(bz) } return SimpleHashFromHashes(hashes) } -// General Convenience -func SimpleHashFromBinary(item interface{}) []byte { +func SimpleHashFromBytes(bz []byte) []byte { hasher := ripemd160.New() - bz, err := wire.MarshalBinary(item) - if err != nil { - panic(err) - } hasher.Write(bz) return hasher.Sum(nil) } -// Convenience for SimpleHashFromHashes. -func SimpleHashFromHashables(items []Hashable) []byte { +func SimpleHashFromHashers(items []Hasher) []byte { hashes := make([][]byte, len(items)) for i, item := range items { hash := item.Hash() @@ -84,8 +81,7 @@ func SimpleHashFromHashables(items []Hashable) []byte { return SimpleHashFromHashes(hashes) } -// Convenience for SimpleHashFromHashes. -func SimpleHashFromMap(m map[string]interface{}) []byte { +func SimpleHashFromMap(m map[string]Hasher) []byte { sm := NewSimpleMap() for k, v := range m { sm.Set(k, v) diff --git a/merkle/simple_tree_test.go b/merkle/simple_tree_test.go index 6299fa33b..26f35c807 100644 --- a/merkle/simple_tree_test.go +++ b/merkle/simple_tree_test.go @@ -19,14 +19,14 @@ func TestSimpleProof(t *testing.T) { total := 100 - items := make([]Hashable, total) + items := make([]Hasher, total) for i := 0; i < total; i++ { items[i] = testItem(RandBytes(32)) } - rootHash := SimpleHashFromHashables(items) + rootHash := SimpleHashFromHashers(items) - rootHash2, proofs := SimpleProofsFromHashables(items) + rootHash2, proofs := SimpleProofsFromHashers(items) if !bytes.Equal(rootHash, rootHash2) { t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) diff --git a/merkle/types.go b/merkle/types.go index 93541eda5..1a6d75e0c 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -18,6 +18,6 @@ type Tree interface { IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) } -type Hashable interface { +type Hasher interface { Hash() []byte } From c75298e3594a0d5f0c88112fbc0543edb6212d5b Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 21 Jan 2018 18:47:18 -0800 Subject: [PATCH 360/515] Update SimpleMap to hash both keys and values for benefit; Hashable is Hasher; Don't assume go-wire --- merkle/types.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/merkle/types.go b/merkle/types.go index 1a6d75e0c..3881f3793 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -21,3 +21,7 @@ type Tree interface { type Hasher interface { Hash() []byte } + +type Byteser interface { + Bytes() []byte +} From b95cac5f4fbedd187a0503cc18452fd4092413e8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 24 Jan 2018 15:13:39 -0800 Subject: [PATCH 361/515] Remove unnecessary Byteser interface --- merkle/types.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/merkle/types.go b/merkle/types.go index 3881f3793..1a6d75e0c 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -21,7 +21,3 @@ type Tree interface { type Hasher interface { Hash() []byte } - -type Byteser interface { - Bytes() []byte -} From f6dbe9ba054200d9d0753ac0407d1864f90a2b8d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 24 Jan 2018 15:55:31 -0800 Subject: [PATCH 362/515] Refactor string -> dbBackendType --- db/backend_test.go | 6 +++--- db/c_level_db.go | 4 ++-- db/c_level_db_test.go | 4 ++-- db/common_test.go | 2 +- db/db.go | 18 ++++++++++-------- db/fsdb.go | 2 +- db/go_level_db.go | 4 ++-- db/mem_db.go | 2 +- 8 files changed, 22 insertions(+), 20 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 0f4346f2e..0227eb547 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,7 +15,7 @@ func cleanupDBDir(dir, name string) { os.RemoveAll(filepath.Join(dir, name) + ".db") } -func testBackendGetSetDelete(t *testing.T, backend string) { +func testBackendGetSetDelete(t *testing.T, backend dbBackendType) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() @@ -141,9 +141,9 @@ func TestBackendsNilKeys(t *testing.T) { } } -func TestGoLevelDBBackendStr(t *testing.T) { +func TestGoLevelDBBackend(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, GoLevelDBBackendStr, "") + db := NewDB(name, GoLevelDBBackend, "") defer cleanupDBDir("", name) _, ok := db.(*GoLevelDB) diff --git a/db/c_level_db.go b/db/c_level_db.go index f1a5a3aef..a59137883 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -14,8 +14,8 @@ func init() { dbCreator := func(name string, dir string) (DB, error) { return NewCLevelDB(name, dir) } - registerDBCreator(LevelDBBackendStr, dbCreator, true) - registerDBCreator(CLevelDBBackendStr, dbCreator, false) + registerDBCreator(LevelDBBackend, dbCreator, true) + registerDBCreator(CLevelDBBackend, dbCreator, false) } var _ DB = (*CLevelDB)(nil) diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 89993fbac..34bb72273 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -86,9 +86,9 @@ func bytes2Int64(buf []byte) int64 { } */ -func TestCLevelDBBackendStr(t *testing.T) { +func TestCLevelDBBackend(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") + db := NewDB(name, LevelDBBackend, "") defer cleanupDBDir("", name) _, ok := db.(*CLevelDB) diff --git a/db/common_test.go b/db/common_test.go index 2a5d01818..1a529949f 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -45,7 +45,7 @@ func checkValuePanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") } -func newTempDB(t *testing.T, backend string) (db DB) { +func newTempDB(t *testing.T, backend dbBackendType) (db DB) { dir, dirname := cmn.Tempdir("test_go_iterator") db = NewDB("testdb", backend, dirname) dir.Close() diff --git a/db/db.go b/db/db.go index 25ff93ec5..1428c9c42 100644 --- a/db/db.go +++ b/db/db.go @@ -5,19 +5,21 @@ import "fmt" //---------------------------------------- // Main entry +type dbBackendType string + const ( - LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackendStr = "cleveldb" - GoLevelDBBackendStr = "goleveldb" - MemDBBackendStr = "memdb" - FSDBBackendStr = "fsdb" // using the filesystem naively + LevelDBBackend dbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend dbBackendType = "cleveldb" + GoLevelDBBackend dbBackendType = "goleveldb" + MemDBBackend dbBackendType = "memDB" + FSDBBackend dbBackendType = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) -var backends = map[string]dbCreator{} +var backends = map[dbBackendType]dbCreator{} -func registerDBCreator(backend string, creator dbCreator, force bool) { +func registerDBCreator(backend dbBackendType, creator dbCreator, force bool) { _, ok := backends[backend] if !force && ok { return @@ -25,7 +27,7 @@ func registerDBCreator(backend string, creator dbCreator, force bool) { backends[backend] = creator } -func NewDB(name string, backend string, dir string) DB { +func NewDB(name string, backend dbBackendType, dir string) DB { db, err := backends[backend](name, dir) if err != nil { panic(fmt.Sprintf("Error initializing DB: %v", err)) diff --git a/db/fsdb.go b/db/fsdb.go index 45c3231f6..578c1785a 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -19,7 +19,7 @@ const ( ) func init() { - registerDBCreator(FSDBBackendStr, func(name string, dir string) (DB, error) { + registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) { dbPath := filepath.Join(dir, name+".db") return NewFSDB(dbPath), nil }, false) diff --git a/db/go_level_db.go b/db/go_level_db.go index 7d60e060f..9fed329bf 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -17,8 +17,8 @@ func init() { dbCreator := func(name string, dir string) (DB, error) { return NewGoLevelDB(name, dir) } - registerDBCreator(LevelDBBackendStr, dbCreator, false) - registerDBCreator(GoLevelDBBackendStr, dbCreator, false) + registerDBCreator(LevelDBBackend, dbCreator, false) + registerDBCreator(GoLevelDBBackend, dbCreator, false) } var _ DB = (*GoLevelDB)(nil) diff --git a/db/mem_db.go b/db/mem_db.go index 1e3bee5a5..f2c484fa7 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -7,7 +7,7 @@ import ( ) func init() { - registerDBCreator(MemDBBackendStr, func(name string, dir string) (DB, error) { + registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) { return NewMemDB(), nil }, false) } From 9ccfe161ad47c9471796107d9a9a68322caf5960 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 2 Feb 2018 13:51:39 +0400 Subject: [PATCH 363/515] lowercase memDB type key --- db/db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/db.go b/db/db.go index 1428c9c42..ced0ec9ca 100644 --- a/db/db.go +++ b/db/db.go @@ -11,7 +11,7 @@ const ( LevelDBBackend dbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc CLevelDBBackend dbBackendType = "cleveldb" GoLevelDBBackend dbBackendType = "goleveldb" - MemDBBackend dbBackendType = "memDB" + MemDBBackend dbBackendType = "memdb" FSDBBackend dbBackendType = "fsdb" // using the filesystem naively ) From 2e765462234e7749806cd00fa326acc25d704448 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 2 Feb 2018 18:09:48 +0100 Subject: [PATCH 364/515] Clean up glide.yaml --- glide.lock | 34 +++++++++++++++------------------- glide.yaml | 15 +++++++++++++-- 2 files changed, 28 insertions(+), 21 deletions(-) diff --git a/glide.lock b/glide.lock index 8ed27e0b0..875f9837b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,14 +1,14 @@ -hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 -updated: 2018-01-21T03:46:56.821595635-08:00 +hash: 22e22759d9adc51e3ce0728955143321386891907ce54eb952245d57285d8784 +updated: 2018-02-02T18:08:31.85309+01:00 imports: - name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + version: 346938d642f2ec3594ed81d874461961cd0faa76 subpackages: - spew - name: github.com/fsnotify/fsnotify - version: 4da3e2cfbabc9f751898f250b49f2439785783a1 + version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: 4dc7be5d2d12881735283bcab7352178e190fc71 subpackages: - log - log/level @@ -18,7 +18,7 @@ imports: - name: github.com/go-stack/stack version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/gogo/protobuf - version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 + version: 1adfc126b41513cc696b209667c8656ea7aac67c subpackages: - gogoproto - proto @@ -43,17 +43,15 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mitchellh/mapstructure - version: 06020f85339e21b2478f756a78e295255ffa4d6a -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: b4575eea38cca1123ec2dc90c26529b5c5acfcff - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: acdc4509485b587f5e675510c4f2c63e90ff68a8 - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: bb8f1927f2a9d3ab41c9340aa034f6b803f4359c subpackages: - mem - name: github.com/spf13/cast @@ -61,7 +59,7 @@ imports: - name: github.com/spf13/cobra version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: 12bd96e66386c1960ab0f74ced1362f66f552f7b + version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 - name: github.com/spf13/pflag version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper @@ -82,15 +80,13 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 0cce10e82786f2d501827fbe158747dbc4ceeb43 -- name: github.com/tendermint/log15 - version: f91285dece9f4875421b481da3e613d83d44f29b + version: e723d95ac2838b7ae9919ada25004859236c32ff - name: golang.org/x/crypto version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + version: 37707fdb30a5b38865cfb95e5aab41707daec7fd subpackages: - unix - name: golang.org/x/text @@ -99,14 +95,14 @@ imports: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: d670f9405373e636a5a2765eea47fac0c9bc91a4 testImports: - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib - name: github.com/stretchr/testify - version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c + version: 12b6f73e6084dad08a7c6e575284b177ecafbc71 subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index a28bd39ec..42d43e4be 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,28 +1,39 @@ package: github.com/tendermint/tmlibs import: - package: github.com/go-kit/kit + version: ^0.6.0 subpackages: - log - log/level - log/term - package: github.com/go-logfmt/logfmt + version: ^0.3.0 +- package: github.com/gogo/protobuf + version: ^1.0.0 + subpackages: + - gogoproto + - proto - package: github.com/jmhodges/levigo - package: github.com/pkg/errors + version: ^0.8.0 - package: github.com/spf13/cobra + version: ^0.0.1 - package: github.com/spf13/viper + version: ^1.0.0 - package: github.com/syndtr/goleveldb subpackages: - leveldb - leveldb/errors + - leveldb/iterator - leveldb/opt - package: github.com/tendermint/go-wire - version: sdk2 -- package: github.com/tendermint/log15 + version: develop - package: golang.org/x/crypto subpackages: - ripemd160 testImport: - package: github.com/stretchr/testify + version: ^1.2.1 subpackages: - assert - require From cbc63518e589d6b0069f9750127fa83dd6ea5ee3 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 2 Feb 2018 18:50:24 +0100 Subject: [PATCH 365/515] Export DbBackendType in order to fix IAVL tests --- db/backend_test.go | 2 +- db/common_test.go | 2 +- db/db.go | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 0227eb547..9e73a1f66 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,7 +15,7 @@ func cleanupDBDir(dir, name string) { os.RemoveAll(filepath.Join(dir, name) + ".db") } -func testBackendGetSetDelete(t *testing.T, backend dbBackendType) { +func testBackendGetSetDelete(t *testing.T, backend DbBackendType) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() diff --git a/db/common_test.go b/db/common_test.go index 1a529949f..4209b44d7 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -45,7 +45,7 @@ func checkValuePanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") } -func newTempDB(t *testing.T, backend dbBackendType) (db DB) { +func newTempDB(t *testing.T, backend DbBackendType) (db DB) { dir, dirname := cmn.Tempdir("test_go_iterator") db = NewDB("testdb", backend, dirname) dir.Close() diff --git a/db/db.go b/db/db.go index ced0ec9ca..ac19f6b4b 100644 --- a/db/db.go +++ b/db/db.go @@ -5,21 +5,21 @@ import "fmt" //---------------------------------------- // Main entry -type dbBackendType string +type DbBackendType string const ( - LevelDBBackend dbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackend dbBackendType = "cleveldb" - GoLevelDBBackend dbBackendType = "goleveldb" - MemDBBackend dbBackendType = "memdb" - FSDBBackend dbBackendType = "fsdb" // using the filesystem naively + LevelDBBackend DbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend DbBackendType = "cleveldb" + GoLevelDBBackend DbBackendType = "goleveldb" + MemDBBackend DbBackendType = "memdb" + FSDBBackend DbBackendType = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) -var backends = map[dbBackendType]dbCreator{} +var backends = map[DbBackendType]dbCreator{} -func registerDBCreator(backend dbBackendType, creator dbCreator, force bool) { +func registerDBCreator(backend DbBackendType, creator dbCreator, force bool) { _, ok := backends[backend] if !force && ok { return @@ -27,7 +27,7 @@ func registerDBCreator(backend dbBackendType, creator dbCreator, force bool) { backends[backend] = creator } -func NewDB(name string, backend dbBackendType, dir string) DB { +func NewDB(name string, backend DbBackendType, dir string) DB { db, err := backends[backend](name, dir) if err != nil { panic(fmt.Sprintf("Error initializing DB: %v", err)) From 1b5176003a7733baed745dd9b9c153a0893ad46a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 2 Feb 2018 22:31:17 -0500 Subject: [PATCH 366/515] DbBackend -> DBBackend --- db/backend_test.go | 2 +- db/common_test.go | 2 +- db/db.go | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 9e73a1f66..80fbbb140 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,7 +15,7 @@ func cleanupDBDir(dir, name string) { os.RemoveAll(filepath.Join(dir, name) + ".db") } -func testBackendGetSetDelete(t *testing.T, backend DbBackendType) { +func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() diff --git a/db/common_test.go b/db/common_test.go index 4209b44d7..1b0f00416 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -45,7 +45,7 @@ func checkValuePanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") } -func newTempDB(t *testing.T, backend DbBackendType) (db DB) { +func newTempDB(t *testing.T, backend DBBackendType) (db DB) { dir, dirname := cmn.Tempdir("test_go_iterator") db = NewDB("testdb", backend, dirname) dir.Close() diff --git a/db/db.go b/db/db.go index ac19f6b4b..869937660 100644 --- a/db/db.go +++ b/db/db.go @@ -5,21 +5,21 @@ import "fmt" //---------------------------------------- // Main entry -type DbBackendType string +type DBBackendType string const ( - LevelDBBackend DbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackend DbBackendType = "cleveldb" - GoLevelDBBackend DbBackendType = "goleveldb" - MemDBBackend DbBackendType = "memdb" - FSDBBackend DbBackendType = "fsdb" // using the filesystem naively + LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend DBBackendType = "cleveldb" + GoLevelDBBackend DBBackendType = "goleveldb" + MemDBBackend DBBackendType = "memdb" + FSDBBackend DBBackendType = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) -var backends = map[DbBackendType]dbCreator{} +var backends = map[DBBackendType]dbCreator{} -func registerDBCreator(backend DbBackendType, creator dbCreator, force bool) { +func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) { _, ok := backends[backend] if !force && ok { return @@ -27,7 +27,7 @@ func registerDBCreator(backend DbBackendType, creator dbCreator, force bool) { backends[backend] = creator } -func NewDB(name string, backend DbBackendType, dir string) DB { +func NewDB(name string, backend DBBackendType, dir string) DB { db, err := backends[backend](name, dir) if err != nil { panic(fmt.Sprintf("Error initializing DB: %v", err)) From 690d6c60701758ab757d11ef674906f64e6b618d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 Jan 2018 22:56:36 -0500 Subject: [PATCH 367/515] cli: WriteDemoConfig -> WriteConfigVals --- CHANGELOG.md | 6 ++++++ cli/helper.go | 16 +++++----------- cli/setup.go | 6 ++++-- cli/setup_test.go | 18 +++++++++++++++--- 4 files changed, 30 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe2c2fe94..42b8cdd61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 0.7.0 (TBD) + +BREAKING: + + - [cli] WriteDemoConfig -> WriteConfigValues + ## 0.6.0 (December 29, 2017) BREAKING: diff --git a/cli/helper.go b/cli/helper.go index 845c17dbf..878cf26e5 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -9,21 +9,15 @@ import ( "path/filepath" ) -// WriteDemoConfig writes a toml file with the given values. -// It returns the RootDir the config.toml file is stored in, -// or an error if writing was impossible -func WriteDemoConfig(vals map[string]string) (string, error) { - cdir, err := ioutil.TempDir("", "test-cli") - if err != nil { - return "", err - } +// WriteConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func WriteConfigVals(dir string, vals map[string]string) error { data := "" for k, v := range vals { data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) } - cfile := filepath.Join(cdir, "config.toml") - err = ioutil.WriteFile(cfile, []byte(data), 0666) - return cdir, err + cfile := filepath.Join(dir, "config.toml") + return ioutil.WriteFile(cfile, []byte(data), 0666) } // RunWithArgs executes the given command with the specified command line args diff --git a/cli/setup.go b/cli/setup.go index 2dcadb407..dc34abdf9 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -3,6 +3,7 @@ package cli import ( "fmt" "os" + "path/filepath" "strings" "github.com/pkg/errors" @@ -129,8 +130,9 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { homeDir := viper.GetString(HomeFlag) viper.Set(HomeFlag, homeDir) - viper.SetConfigName("config") // name of config file (without extension) - viper.AddConfigPath(homeDir) // search root directory + viper.SetConfigName("config") // name of config file (without extension) + viper.AddConfigPath(homeDir) // search root directory + viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { diff --git a/cli/setup_test.go b/cli/setup_test.go index e0fd75d8a..04209e493 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -2,6 +2,7 @@ package cli import ( "fmt" + "io/ioutil" "strconv" "strings" "testing" @@ -54,11 +55,20 @@ func TestSetupEnv(t *testing.T) { } } +func tempDir() string { + cdir, err := ioutil.TempDir("", "test-cli") + if err != nil { + panic(err) + } + return cdir +} + func TestSetupConfig(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. cval1 := "fubble" - conf1, err := WriteDemoConfig(map[string]string{"boo": cval1}) + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) require.Nil(t, err) cases := []struct { @@ -116,10 +126,12 @@ func TestSetupUnmarshal(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "someone", "else" - conf1, err := WriteDemoConfig(map[string]string{"name": cval1}) + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"name": cval1}) require.Nil(t, err) // even with some ignored fields, should be no problem - conf2, err := WriteDemoConfig(map[string]string{"name": cval2, "foo": "bar"}) + conf2 := tempDir() + err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) require.Nil(t, err) // unused is not declared on a flag and remains from base From 1d7fc78ea171587e9e63da566d3da1b127bfd14c Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 2 Feb 2018 23:49:14 -0500 Subject: [PATCH 368/515] update glide --- glide.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/glide.lock b/glide.lock index 875f9837b..4f3c395ce 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 22e22759d9adc51e3ce0728955143321386891907ce54eb952245d57285d8784 -updated: 2018-02-02T18:08:31.85309+01:00 +updated: 2018-02-02T23:47:17.788237939-05:00 imports: - name: github.com/davecgh/go-spew version: 346938d642f2ec3594ed81d874461961cd0faa76 @@ -80,7 +80,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: e723d95ac2838b7ae9919ada25004859236c32ff + version: dec83f641903b22f039da3974607859715d0377e - name: golang.org/x/crypto version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: From d6d97889f21f5ff168de16191be0f9c937fef1f8 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 3 Feb 2018 01:29:39 -0500 Subject: [PATCH 369/515] merkle: remove go-wire dep by copying EncodeByteSlice --- glide.lock | 14 ++++++-------- glide.yaml | 2 -- merkle/simple_map.go | 5 ++--- merkle/simple_tree.go | 6 ++---- merkle/types.go | 24 ++++++++++++++++++++++++ 5 files changed, 34 insertions(+), 17 deletions(-) diff --git a/glide.lock b/glide.lock index 4f3c395ce..10dec980b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,6 @@ -hash: 22e22759d9adc51e3ce0728955143321386891907ce54eb952245d57285d8784 -updated: 2018-02-02T23:47:17.788237939-05:00 +hash: 98752078f39da926f655268b3b143f713d64edd379fc9fcb1210d9d8aa7ab4e0 +updated: 2018-02-03T01:28:00.221548057-05:00 imports: -- name: github.com/davecgh/go-spew - version: 346938d642f2ec3594ed81d874461961cd0faa76 - subpackages: - - spew - name: github.com/fsnotify/fsnotify version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 - name: github.com/go-kit/kit @@ -79,8 +75,6 @@ imports: - leveldb/storage - leveldb/table - leveldb/util -- name: github.com/tendermint/go-wire - version: dec83f641903b22f039da3974607859715d0377e - name: golang.org/x/crypto version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: @@ -97,6 +91,10 @@ imports: - name: gopkg.in/yaml.v2 version: d670f9405373e636a5a2765eea47fac0c9bc91a4 testImports: +- name: github.com/davecgh/go-spew + version: 346938d642f2ec3594ed81d874461961cd0faa76 + subpackages: + - spew - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/glide.yaml b/glide.yaml index 42d43e4be..b12c72a16 100644 --- a/glide.yaml +++ b/glide.yaml @@ -26,8 +26,6 @@ import: - leveldb/errors - leveldb/iterator - leveldb/opt -- package: github.com/tendermint/go-wire - version: develop - package: golang.org/x/crypto subpackages: - ripemd160 diff --git a/merkle/simple_map.go b/merkle/simple_map.go index b09b71d54..b59e3b4b6 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -1,7 +1,6 @@ package merkle import ( - "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" "golang.org/x/crypto/ripemd160" ) @@ -65,11 +64,11 @@ type kvPair cmn.KVPair func (kv kvPair) Hash() []byte { hasher := ripemd160.New() - err := wire.EncodeByteSlice(hasher, kv.Key) + err := encodeByteSlice(hasher, kv.Key) if err != nil { panic(err) } - err = wire.EncodeByteSlice(hasher, kv.Value) + err = encodeByteSlice(hasher, kv.Value) if err != nil { panic(err) } diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 182f2fdaa..a363ea8e8 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -26,14 +26,12 @@ package merkle import ( "golang.org/x/crypto/ripemd160" - - "github.com/tendermint/go-wire" ) func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { var hasher = ripemd160.New() - err := wire.EncodeByteSlice(hasher, left) - err = wire.EncodeByteSlice(hasher, right) + err := encodeByteSlice(hasher, left) + err = encodeByteSlice(hasher, right) if err != nil { panic(err) } diff --git a/merkle/types.go b/merkle/types.go index 1a6d75e0c..e0fe35fa8 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -1,5 +1,10 @@ package merkle +import ( + "encoding/binary" + "io" +) + type Tree interface { Size() (size int) Height() (height int8) @@ -21,3 +26,22 @@ type Tree interface { type Hasher interface { Hash() []byte } + +//----------------------------------------------------------------------- +// NOTE: these are duplicated from go-wire so we dont need go-wire as a dep + +func encodeByteSlice(w io.Writer, bz []byte) (err error) { + err = encodeVarint(w, int64(len(bz))) + if err != nil { + return + } + _, err = w.Write(bz) + return +} + +func encodeVarint(w io.Writer, i int64) (err error) { + var buf [10]byte + n := binary.PutVarint(buf[:], i) + _, err = w.Write(buf[0:n]) + return +} From 951333ecb0c82d1022bd2fc49da63977f7378eb2 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Fri, 2 Feb 2018 23:40:38 -0700 Subject: [PATCH 370/515] common: IsHex should be able to handle 0X prefixed strings IsHex should also successfully decode strings prefixed with 0X instead of only 0x strings. Also add tests generally for IsHex. --- common/string.go | 2 +- common/string_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/common/string.go b/common/string.go index 6924e6a5b..a6895eb25 100644 --- a/common/string.go +++ b/common/string.go @@ -29,7 +29,7 @@ func LeftPadString(s string, totalLength int) string { // IsHex returns true for non-empty hex-string prefixed with "0x" func IsHex(s string) bool { - if len(s) > 2 && s[:2] == "0x" { + if len(s) > 2 && strings.EqualFold(s[:2], "0x") { _, err := hex.DecodeString(s[2:]) return err == nil } diff --git a/common/string_test.go b/common/string_test.go index a82f1022b..b8a917c16 100644 --- a/common/string_test.go +++ b/common/string_test.go @@ -12,3 +12,21 @@ func TestStringInSlice(t *testing.T) { assert.True(t, StringInSlice("", []string{""})) assert.False(t, StringInSlice("", []string{})) } + +func TestIsHex(t *testing.T) { + notHex := []string{ + "", " ", "a", "x", "0", "0x", "0X", "0x ", "0X ", "0X a", + "0xf ", "0x f", "0xp", "0x-", + "0xf", "0XBED", "0xF", "0xbed", // Odd lengths + } + for _, v := range notHex { + assert.False(t, IsHex(v), "%q is not hex", v) + } + hex := []string{ + "0x00", "0x0a", "0x0F", "0xFFFFFF", "0Xdeadbeef", "0x0BED", + "0X12", "0X0A", + } + for _, v := range hex { + assert.True(t, IsHex(v), "%q is hex", v) + } +} From 91b41ddd59788ef800804b036f47eda73442b780 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 31 Jan 2018 12:13:59 +0400 Subject: [PATCH 371/515] add waitCh as an alternative to waitGroup new methods: - [CList] WaitChan() - [CElement] NextWaitChan() - [CElement] PrevWaitChan() Refs https://github.com/tendermint/tendermint/pull/1173 --- CHANGELOG.md | 7 +++++ clist/clist.go | 75 +++++++++++++++++++++++++++++++++++---------- clist/clist_test.go | 73 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42b8cdd61..2c9466126 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,13 @@ BREAKING: - [cli] WriteDemoConfig -> WriteConfigValues +## 0.6.1 (TBD) + +IMPROVEMENTS: + - [clist] add WaitChan() to CList, NextWaitChan() and PrevWaitChan() + to CElement. These can be used instead of blocking *Wait() methods + if you need to be able to send quit signal and not block forever + ## 0.6.0 (December 29, 2017) BREAKING: diff --git a/clist/clist.go b/clist/clist.go index a52920f8c..28d771a28 100644 --- a/clist/clist.go +++ b/clist/clist.go @@ -36,12 +36,14 @@ waiting on NextWait() (since it's just a read operation). */ type CElement struct { - mtx sync.RWMutex - prev *CElement - prevWg *sync.WaitGroup - next *CElement - nextWg *sync.WaitGroup - removed bool + mtx sync.RWMutex + prev *CElement + prevWg *sync.WaitGroup + prevWaitCh chan struct{} + next *CElement + nextWg *sync.WaitGroup + nextWaitCh chan struct{} + removed bool Value interface{} // immutable } @@ -84,6 +86,24 @@ func (e *CElement) PrevWait() *CElement { } } +// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) PrevWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prevWaitCh +} + +// NextWaitChan can be used to wait until Next becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) NextWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.nextWaitCh +} + // Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { e.mtx.RLock() @@ -142,9 +162,11 @@ func (e *CElement) SetNext(newNext *CElement) { // events, new Add calls must happen after all previous Wait calls have // returned. e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { e.nextWg.Done() + close(e.nextWaitCh) } } @@ -158,9 +180,11 @@ func (e *CElement) SetPrev(newPrev *CElement) { e.prev = newPrev if oldPrev != nil && newPrev == nil { e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { e.prevWg.Done() + close(e.prevWaitCh) } } @@ -173,9 +197,11 @@ func (e *CElement) SetRemoved() { // This wakes up anyone waiting in either direction. if e.prev == nil { e.prevWg.Done() + close(e.prevWaitCh) } if e.next == nil { e.nextWg.Done() + close(e.nextWaitCh) } } @@ -185,11 +211,12 @@ func (e *CElement) SetRemoved() { // The zero value for CList is an empty list ready to use. // Operations are goroutine-safe. type CList struct { - mtx sync.RWMutex - wg *sync.WaitGroup - head *CElement // first element - tail *CElement // last element - len int // list length + mtx sync.RWMutex + wg *sync.WaitGroup + waitCh chan struct{} + head *CElement // first element + tail *CElement // last element + len int // list length } func (l *CList) Init() *CList { @@ -197,6 +224,7 @@ func (l *CList) Init() *CList { defer l.mtx.Unlock() l.wg = waitGroup1() + l.waitCh = make(chan struct{}) l.head = nil l.tail = nil l.len = 0 @@ -258,23 +286,35 @@ func (l *CList) BackWait() *CElement { } } +// WaitChan can be used to wait until Front or Back becomes not nil. Once it +// does, channel will be closed. +func (l *CList) WaitChan() <-chan struct{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + return l.waitCh +} + func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() defer l.mtx.Unlock() // Construct a new element e := &CElement{ - prev: nil, - prevWg: waitGroup1(), - next: nil, - nextWg: waitGroup1(), - removed: false, - Value: v, + prev: nil, + prevWg: waitGroup1(), + prevWaitCh: make(chan struct{}), + next: nil, + nextWg: waitGroup1(), + nextWaitCh: make(chan struct{}), + removed: false, + Value: v, } // Release waiters on FrontWait/BackWait maybe if l.len == 0 { l.wg.Done() + close(l.waitCh) } l.len += 1 @@ -313,6 +353,7 @@ func (l *CList) Remove(e *CElement) interface{} { // If we're removing the only item, make CList FrontWait/BackWait wait. if l.len == 1 { l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.waitCh = make(chan struct{}) } // Update l.len diff --git a/clist/clist_test.go b/clist/clist_test.go index 9d5272de5..31f821653 100644 --- a/clist/clist_test.go +++ b/clist/clist_test.go @@ -218,3 +218,76 @@ func TestScanRightDeleteRandom(t *testing.T) { t.Fatal("Failed to remove all elements from CList") } } + +func TestWaitChan(t *testing.T) { + l := New() + ch := l.WaitChan() + + // 1) add one element to an empty list + go l.PushBack(1) + <-ch + + // 2) and remove it + el := l.Front() + v := l.Remove(el) + if v != 1 { + t.Fatal("where is 1 coming from?") + } + + // 3) test iterating forward and waiting for Next (NextWaitChan and Next) + el = l.PushBack(0) + + done := make(chan struct{}) + pushed := 0 + go func() { + for i := 1; i < 100; i++ { + l.PushBack(i) + pushed++ + time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) + } + close(done) + }() + + next := el + seen := 0 +FOR_LOOP: + for { + select { + case <-next.NextWaitChan(): + next = next.Next() + seen++ + if next == nil { + continue + } + case <-done: + break FOR_LOOP + case <-time.After(10 * time.Second): + t.Fatal("max execution time") + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } + + // 4) test iterating backwards (PrevWaitChan and Prev) + prev := next + seen = 0 +FOR_LOOP2: + for { + select { + case <-prev.PrevWaitChan(): + prev = prev.Prev() + seen++ + if prev == nil { + t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") + } + case <-time.After(5 * time.Second): + break FOR_LOOP2 + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } +} From 763dc2139300927522e1fc5aa5a1c7f777f6175a Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Tue, 6 Feb 2018 01:12:19 -0800 Subject: [PATCH 372/515] common/BitArray: reduce fragility with methods Fixes https://github.com/tendermint/tmlibs/issues/145 Fixes https://github.com/tendermint/tmlibs/issues/146 The code in here has been fragile when it comes to nil but these edge cases were never tested, although they've showed up in the wild and were only noticed because the reporter actually read the logs otherwise we'd have never known. This changes covers some of these cases and adds some tests. --- common/bit_array.go | 15 +++++++++++---- common/bit_array_test.go | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/common/bit_array.go b/common/bit_array.go index 68201bad6..7cc84705e 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -99,8 +99,14 @@ func (bA *BitArray) copyBits(bits int) *BitArray { // Returns a BitArray of larger bits size. func (bA *BitArray) Or(o *BitArray) *BitArray { - if bA == nil { - o.Copy() + if bA == nil && o == nil { + return nil + } + if bA == nil && o != nil { + return o.Copy() + } + if o == nil { + return bA.Copy() } bA.mtx.Lock() defer bA.mtx.Unlock() @@ -113,7 +119,7 @@ func (bA *BitArray) Or(o *BitArray) *BitArray { // Returns a BitArray of smaller bit size. func (bA *BitArray) And(o *BitArray) *BitArray { - if bA == nil { + if bA == nil || o == nil { return nil } bA.mtx.Lock() @@ -143,7 +149,8 @@ func (bA *BitArray) Not() *BitArray { } func (bA *BitArray) Sub(o *BitArray) *BitArray { - if bA == nil { + if bA == nil || o == nil { + // TODO: Decide if we should do 1's complement here? return nil } bA.mtx.Lock() diff --git a/common/bit_array_test.go b/common/bit_array_test.go index e4ac8bf6f..94a312b7e 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -3,6 +3,8 @@ package common import ( "bytes" "testing" + + "github.com/stretchr/testify/require" ) func randBitArray(bits int) (*BitArray, []byte) { @@ -26,6 +28,11 @@ func TestAnd(t *testing.T) { bA2, _ := randBitArray(31) bA3 := bA1.And(bA2) + var bNil *BitArray + require.Equal(t, bNil.And(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.And(nil), (*BitArray)(nil)) + require.Equal(t, bNil.And(nil), (*BitArray)(nil)) + if bA3.Bits != 31 { t.Error("Expected min bits", bA3.Bits) } @@ -46,6 +53,11 @@ func TestOr(t *testing.T) { bA2, _ := randBitArray(31) bA3 := bA1.Or(bA2) + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Or(bA1), bA1) + require.Equal(t, bA1.Or(nil), bA1) + require.Equal(t, bNil.Or(nil), (*BitArray)(nil)) + if bA3.Bits != 51 { t.Error("Expected max bits") } @@ -66,6 +78,11 @@ func TestSub1(t *testing.T) { bA2, _ := randBitArray(51) bA3 := bA1.Sub(bA2) + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + if bA3.Bits != bA1.Bits { t.Error("Expected bA1 bits") } @@ -89,6 +106,11 @@ func TestSub2(t *testing.T) { bA2, _ := randBitArray(31) bA3 := bA1.Sub(bA2) + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + if bA3.Bits != bA1.Bits { t.Error("Expected bA1 bits") } From 52ce4c20f8bc9b6da5fc1274bcce27c0b9dd738a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 9 Feb 2018 13:31:32 +0400 Subject: [PATCH 373/515] Fix RepeatTimer memory leak (#137) fix RepeatTimer memory leak (Refs #137) * test case * drain channels on reset Leaking memory: ``` leaktest.go:144: leaktest: leaked goroutine: goroutine 116 [chan send]: github.com/tendermint/tmlibs/common.(*RepeatTimer).fireRoutine(0xc42006a410, 0xc4203403c0, 0xc42031b2c0) /go/src/github.com/tendermint/tmlibs/common/repeat_timer.go:160 +0x6e created by github.com/tendermint/tmlibs/common.(*RepeatTimer).reset /go/src/github.com/tendermint/tmlibs/common/repeat_timer.go:196 +0xe9 ``` The alternative solution could be draining channels on the client side. * add one more select instead of draining thanks to Jae --- common/repeat_timer.go | 7 +++++-- common/repeat_timer_test.go | 33 +++++++++++++++++++++++++++++++++ glide.lock | 2 ++ glide.yaml | 1 + 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index cb227199e..dba5fbadd 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -155,7 +155,11 @@ func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { for { select { case t_ := <-ch: - t.ch <- t_ + select { + case t.ch <- t_: + case <-quit: + return + } case <-quit: // NOTE: `t.quit` races. return } @@ -210,7 +214,6 @@ func (t *RepeatTimer) stop() { t.ticker.Stop() t.ticker = nil /* - XXX From https://golang.org/pkg/time/#Ticker: "Stop the ticker to release associated resources" "After Stop, no more ticks will be sent" diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 5598922c5..160f4394a 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -1,10 +1,12 @@ package common import ( + "math/rand" "sync" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" ) @@ -102,3 +104,34 @@ func TestRepeatTimer(t *testing.T) { // Another stop panics. assert.Panics(t, func() { rt.Stop() }) } + +func TestRepeatTimerReset(t *testing.T) { + // check that we are not leaking any go-routines + defer leaktest.Check(t)() + + timer := NewRepeatTimer("test", 20*time.Millisecond) + defer timer.Stop() + + // test we don't receive tick before duration ms. + select { + case <-timer.Chan(): + t.Fatal("did not expect to receive tick") + default: + } + + timer.Reset() + + // test we receive tick after Reset is called + select { + case <-timer.Chan(): + // all good + case <-time.After(40 * time.Millisecond): + t.Fatal("expected to receive tick after reset") + } + + // just random calls + for i := 0; i < 100; i++ { + time.Sleep(time.Duration(rand.Intn(40)) * time.Millisecond) + timer.Reset() + } +} diff --git a/glide.lock b/glide.lock index 10dec980b..a0ada5a4a 100644 --- a/glide.lock +++ b/glide.lock @@ -95,6 +95,8 @@ testImports: version: 346938d642f2ec3594ed81d874461961cd0faa76 subpackages: - spew +- name: github.com/fortytw2/leaktest + version: 3b724c3d7b8729a35bf4e577f71653aec6e53513 - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/glide.yaml b/glide.yaml index b12c72a16..cf3da346b 100644 --- a/glide.yaml +++ b/glide.yaml @@ -35,3 +35,4 @@ testImport: subpackages: - assert - require +- package: github.com/fortytw2/leaktest From a57340ffb53aefb0fca1fc610d18fcbcc61b126f Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 12 Feb 2018 11:38:31 +0400 Subject: [PATCH 374/515] add Quit method to Service interface remove deprecated QuitService --- CHANGELOG.md | 2 ++ common/service.go | 54 ++++++++++++++++++++++++----------------------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c9466126..374a272d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ BREAKING: - [cli] WriteDemoConfig -> WriteConfigValues + - [common] added Quit method to Service interface, which returns a channel + which is closed once a service is stopped ## 0.6.1 (TBD) diff --git a/common/service.go b/common/service.go index d70d16a80..2502d671c 100644 --- a/common/service.go +++ b/common/service.go @@ -35,9 +35,13 @@ type Service interface { // Return true if the service is running IsRunning() bool + // Quit returns a channel, which is closed once service is stopped. + Quit() <-chan struct{} + // String representation of the service String() string + // SetLogger sets a logger. SetLogger(log.Logger) } @@ -88,12 +92,13 @@ type BaseService struct { name string started uint32 // atomic stopped uint32 // atomic - Quit chan struct{} + quit chan struct{} // The "subclass" of BaseService impl Service } +// NewBaseService creates a new BaseService. func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { if logger == nil { logger = log.NewNopLogger() @@ -102,16 +107,19 @@ func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { return &BaseService{ Logger: logger, name: name, - Quit: make(chan struct{}), + quit: make(chan struct{}), impl: impl, } } +// SetLogger implements Service by setting a logger. func (bs *BaseService) SetLogger(l log.Logger) { bs.Logger = l } -// Implements Servce +// Start implements Service by calling OnStart (if defined). An error will be +// returned if the service is already running or stopped. Not to start the +// stopped service, you need to call Reset. func (bs *BaseService) Start() error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { @@ -133,17 +141,18 @@ func (bs *BaseService) Start() error { } } -// Implements Service +// OnStart implements Service by doing nothing. // NOTE: Do not put anything in here, // that way users don't need to call BaseService.OnStart() func (bs *BaseService) OnStart() error { return nil } -// Implements Service +// Stop implements Service by calling OnStop (if defined) and closing quit +// channel. An error will be returned if the service is already stopped. func (bs *BaseService) Stop() error { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) bs.impl.OnStop() - close(bs.Quit) + close(bs.quit) return nil } else { bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) @@ -151,12 +160,13 @@ func (bs *BaseService) Stop() error { } } -// Implements Service +// OnStop implements Service by doing nothing. // NOTE: Do not put anything in here, // that way users don't need to call BaseService.OnStop() func (bs *BaseService) OnStop() {} -// Implements Service +// Reset implements Service by calling OnReset callback (if defined). An error +// will be returned if the service is running. func (bs *BaseService) Reset() error { if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) @@ -166,41 +176,33 @@ func (bs *BaseService) Reset() error { // whether or not we've started, we can reset atomic.CompareAndSwapUint32(&bs.started, 1, 0) - bs.Quit = make(chan struct{}) + bs.quit = make(chan struct{}) return bs.impl.OnReset() } -// Implements Service +// OnReset implements Service by panicking. func (bs *BaseService) OnReset() error { PanicSanity("The service cannot be reset") return nil } -// Implements Service +// IsRunning implements Service by returning true or false depending on the +// service's state. func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 } +// Wait blocks until the service is stopped. func (bs *BaseService) Wait() { - <-bs.Quit + <-bs.quit } -// Implements Servce +// String implements Servce by returning a string representation of the service. func (bs *BaseService) String() string { return bs.name } -//---------------------------------------- - -type QuitService struct { - BaseService -} - -func NewQuitService(logger log.Logger, name string, impl Service) *QuitService { - if logger != nil { - logger.Info("QuitService is deprecated, use BaseService instead") - } - return &QuitService{ - BaseService: *NewBaseService(logger, name, impl), - } +// Quit Implements Service by returning a quit channel. +func (bs *BaseService) Quit() <-chan struct{} { + return bs.quit } From 737c30c19d43b12e132843d95f6250b216a9c215 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 12 Feb 2018 19:12:24 -0500 Subject: [PATCH 375/515] minor nit --- common/repeat_timer.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index dba5fbadd..5d049738d 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -20,15 +20,17 @@ type Ticker interface { } //---------------------------------------- -// defaultTickerMaker +// defaultTicker + +var _ Ticker = (*defaultTicker)(nil) + +type defaultTicker time.Ticker func defaultTickerMaker(dur time.Duration) Ticker { ticker := time.NewTicker(dur) return (*defaultTicker)(ticker) } -type defaultTicker time.Ticker - // Implements Ticker func (t *defaultTicker) Chan() <-chan time.Time { return t.C @@ -151,12 +153,13 @@ func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMake return t } +// receive ticks on ch, send out on t.ch func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { for { select { - case t_ := <-ch: + case tick := <-ch: select { - case t.ch <- t_: + case t.ch <- tick: case <-quit: return } From c6163bdab2d627855400284c90a9c95a53d8eb87 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 19 Feb 2018 17:05:36 -0500 Subject: [PATCH 376/515] version bump and changelog --- CHANGELOG.md | 25 +++++++++++++++++++++---- version/version.go | 2 +- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 374a272d4..89b841d4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,16 +4,33 @@ BREAKING: + - [db] Major API upgrade. See `db/types.go`. + - [common] added `Quit() <-chan struct{}` to Service interface. + The returned channel is closed when service is stopped. + - [common] Remove HTTP functions + - [common] Heap.Push takes an `int`, new Heap.PushComparable takes the comparable. + - [logger] Removed. Use `log` + - [merkle] Major API updade - uses cmn.KVPairs. - [cli] WriteDemoConfig -> WriteConfigValues - - [common] added Quit method to Service interface, which returns a channel - which is closed once a service is stopped + - [all] Remove go-wire dependency! -## 0.6.1 (TBD) +FEATURES: + + - [db] New FSDB that uses the filesystem directly + - [common] HexBytes + - [common] KVPair and KI64Pair (protobuf based key-value pair objects) IMPROVEMENTS: + - [clist] add WaitChan() to CList, NextWaitChan() and PrevWaitChan() - to CElement. These can be used instead of blocking *Wait() methods + to CElement. These can be used instead of blocking `*Wait()` methods if you need to be able to send quit signal and not block forever + - [common] IsHex handles 0x-prefix + +BUG FIXES: + + - [common] BitArray check for nil arguments + - [common] Fix memory leak in RepeatTimer ## 0.6.0 (December 29, 2017) diff --git a/version/version.go b/version/version.go index 6cc887286..2c0474fa8 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.6.0" +const Version = "0.7.0" From 1b9b5652a199ab0be2e781393fb275b66377309d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 20 Feb 2018 22:00:20 -0500 Subject: [PATCH 377/515] changelog date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89b841d4f..f3a305b20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 0.7.0 (TBD) +## 0.7.0 (February 20, 2018) BREAKING: From 26f2ab65f82cfc6873c312e8030104c47c05f10e Mon Sep 17 00:00:00 2001 From: Zaki Manian Date: Tue, 27 Feb 2018 04:01:49 -0800 Subject: [PATCH 378/515] Switch to dep from glide (#155) --- Gopkg.lock | 220 +++++++++++++++++++++++++++++++++++++++++++++ Gopkg.toml | 66 ++++++++++++++ Makefile | 10 +-- common/types.pb.go | 3 +- 4 files changed, 293 insertions(+), 6 deletions(-) create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..45b4d2887 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,220 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/fortytw2/leaktest" + packages = ["."] + revision = "3b724c3d7b8729a35bf4e577f71653aec6e53513" + +[[projects]] + name = "github.com/fsnotify/fsnotify" + packages = ["."] + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + +[[projects]] + name = "github.com/go-kit/kit" + packages = [ + "log", + "log/level", + "log/term" + ] + revision = "4dc7be5d2d12881735283bcab7352178e190fc71" + version = "v0.6.0" + +[[projects]] + name = "github.com/go-logfmt/logfmt" + packages = ["."] + revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" + version = "v0.3.0" + +[[projects]] + name = "github.com/go-stack/stack" + packages = ["."] + revision = "817915b46b97fd7bb80e8ab6b69f01a53ac3eebf" + version = "v1.6.0" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "gogoproto", + "proto", + "protoc-gen-gogo/descriptor" + ] + revision = "1adfc126b41513cc696b209667c8656ea7aac67c" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "553a641470496b2327abcac10b36396bd98e45c9" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token" + ] + revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8" + +[[projects]] + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + branch = "master" + name = "github.com/jmhodges/levigo" + packages = ["."] + revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" + +[[projects]] + branch = "master" + name = "github.com/kr/logfmt" + packages = ["."] + revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" + +[[projects]] + name = "github.com/magiconair/properties" + packages = ["."] + revision = "49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934" + +[[projects]] + name = "github.com/mitchellh/mapstructure" + packages = ["."] + revision = "b4575eea38cca1123ec2dc90c26529b5c5acfcff" + +[[projects]] + name = "github.com/pelletier/go-toml" + packages = ["."] + revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8" + version = "v1.1.0" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/spf13/afero" + packages = [ + ".", + "mem" + ] + revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c" + version = "v1.0.2" + +[[projects]] + name = "github.com/spf13/cast" + packages = ["."] + revision = "acbeb36b902d72a7a4c18e8f3241075e7ab763e4" + version = "v1.1.0" + +[[projects]] + name = "github.com/spf13/cobra" + packages = ["."] + revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" + version = "v0.0.1" + +[[projects]] + branch = "master" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "97afa5e7ca8a08a383cb259e06636b5e2cc7897f" + +[[projects]] + name = "github.com/spf13/viper" + packages = ["."] + revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" + +[[projects]] + name = "github.com/syndtr/goleveldb" + packages = [ + "leveldb", + "leveldb/cache", + "leveldb/comparer", + "leveldb/errors", + "leveldb/filter", + "leveldb/iterator", + "leveldb/journal", + "leveldb/memdb", + "leveldb/opt", + "leveldb/storage", + "leveldb/table", + "leveldb/util" + ] + revision = "b89cc31ef7977104127d34c1bd31ebd1a9db2199" + +[[projects]] + name = "golang.org/x/crypto" + packages = ["ripemd160"] + revision = "edd5e9b0879d13ee6970a50153d85b8fec9f7686" + +[[projects]] + name = "golang.org/x/sys" + packages = ["unix"] + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "internal/gen", + "internal/triegen", + "internal/ucd", + "transform", + "unicode/cldr", + "unicode/norm" + ] + revision = "c01e4764d870b77f8abe5096ee19ad20d80e8075" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" + version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "c33ff784e40965e1cd0ec6232b43e379c6608cb41a9c5c707247742b68c906fb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..ef3f055a8 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,66 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + branch = "master" + name = "github.com/fortytw2/leaktest" + +[[constraint]] + name = "github.com/go-kit/kit" + version = "0.6.0" + +[[constraint]] + name = "github.com/go-logfmt/logfmt" + version = "0.3.0" + +[[constraint]] + name = "github.com/gogo/protobuf" + version = "1.0.0" + +[[constraint]] + branch = "master" + name = "github.com/jmhodges/levigo" + +[[constraint]] + name = "github.com/pkg/errors" + version = "0.8.0" + +[[constraint]] + name = "github.com/spf13/cobra" + version = "0.0.1" + +[[constraint]] + name = "github.com/spf13/viper" + version = "1.0.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.1" + +[prune] + go-tests = true + unused-packages = true diff --git a/Makefile b/Makefile index ae2c71610..9e181f9f9 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,10 @@ GOTOOLS = \ - github.com/Masterminds/glide \ + github.com/golang/dep/cmd/dep \ github.com/gogo/protobuf/protoc-gen-gogo \ github.com/gogo/protobuf/gogoproto # github.com/alecthomas/gometalinter.v2 \ -GOTOOLS_CHECK = glide gometalinter.v2 protoc protoc-gen-gogo +GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-gogo INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf all: check get_vendor_deps protoc build test install metalinter @@ -59,15 +59,15 @@ update_tools: get_vendor_deps: @rm -rf vendor/ - @echo "--> Running glide install" - @glide install + @echo "--> Running dep ensure" + @dep ensure ######################################## ### Testing test: - go test -tags gcc `glide novendor` + go test -tags gcc $(shell go list ./... | grep -v vendor) test100: @for i in {1..100}; do make test; done diff --git a/common/types.pb.go b/common/types.pb.go index 047b7aee2..c301d28c0 100644 --- a/common/types.pb.go +++ b/common/types.pb.go @@ -1,5 +1,6 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: common/types.proto +// DO NOT EDIT! /* Package common is a generated protocol buffer package. From 066fe82a927aef6f7f6431af78f0d5156cb2cdb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20Vad=C3=A9e?= Date: Tue, 6 Mar 2018 08:29:18 +0100 Subject: [PATCH 379/515] pubsub implements service.OnReset (#156) --- pubsub/pubsub.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 54a4b8aed..28e008ca6 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -209,6 +209,11 @@ func (s *Server) OnStart() error { return nil } +// OnReset implements Service.OnReset +func (s *Server) OnReset() error { + return nil +} + func (s *Server) loop(state state) { loop: for cmd := range s.cmds { From b1cc688a61c53e39b92ceb5df370e3c94b19da4c Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 11 Mar 2018 22:46:31 -0700 Subject: [PATCH 380/515] encodeByteSlice uses uvarint for length instead of varint (#161) --- events/README.md | 2 +- events/events.go | 2 +- merkle/simple_map_test.go | 12 ++++++------ merkle/types.go | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/events/README.md b/events/README.md index 7a00d79dc..d7469515e 100644 --- a/events/README.md +++ b/events/README.md @@ -95,7 +95,7 @@ type EventCallback func(data EventData) type EventData interface { } ``` -Generic event data can be typed and registered with tendermint/go-wire +Generic event data can be typed and registered with tendermint/go-amino via concrete implementation of this interface diff --git a/events/events.go b/events/events.go index 12aa07813..3bc349306 100644 --- a/events/events.go +++ b/events/events.go @@ -9,7 +9,7 @@ import ( . "github.com/tendermint/tmlibs/common" ) -// Generic event data can be typed and registered with tendermint/go-wire +// Generic event data can be typed and registered with tendermint/go-amino // via concrete implementation of this interface type EventData interface { //AssertIsEventData() diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index 61210132b..c9c871354 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -17,37 +17,37 @@ func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() db.Set("key1", strHasher("value1")) - assert.Equal(t, "19618304d1ad2635c4238bce87f72331b22a11a1", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "acdb4f121bc6f25041eb263ab463f1cd79236a32", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", strHasher("value2")) - assert.Equal(t, "51cb96d3d41e1714def72eb4bacc211de9ddf284", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "b8cbf5adee8c524e14f531da9b49adbbbd66fffa", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", strHasher("value1")) db.Set("key2", strHasher("value2")) - assert.Equal(t, "58a0a99d5019fdcad4bcf55942e833b2dfab9421", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "1708aabc85bbe00242d3db8c299516aa54e48c38", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", strHasher("value2")) // NOTE: out of order db.Set("key1", strHasher("value1")) - assert.Equal(t, "58a0a99d5019fdcad4bcf55942e833b2dfab9421", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "1708aabc85bbe00242d3db8c299516aa54e48c38", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", strHasher("value1")) db.Set("key2", strHasher("value2")) db.Set("key3", strHasher("value3")) - assert.Equal(t, "cb56db3c7993e977f4c2789559ae3e5e468a6e9b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "e728afe72ce351eed6aca65c5f78da19b9a6e214", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", strHasher("value2")) // NOTE: out of order db.Set("key1", strHasher("value1")) db.Set("key3", strHasher("value3")) - assert.Equal(t, "cb56db3c7993e977f4c2789559ae3e5e468a6e9b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "e728afe72ce351eed6aca65c5f78da19b9a6e214", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } diff --git a/merkle/types.go b/merkle/types.go index e0fe35fa8..a0c491a7e 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -28,10 +28,10 @@ type Hasher interface { } //----------------------------------------------------------------------- -// NOTE: these are duplicated from go-wire so we dont need go-wire as a dep +// NOTE: these are duplicated from go-amino so we dont need go-amino as a dep func encodeByteSlice(w io.Writer, bz []byte) (err error) { - err = encodeVarint(w, int64(len(bz))) + err = encodeUvarint(w, uint64(len(bz))) if err != nil { return } @@ -39,9 +39,9 @@ func encodeByteSlice(w io.Writer, bz []byte) (err error) { return } -func encodeVarint(w io.Writer, i int64) (err error) { +func encodeUvarint(w io.Writer, i uint64) (err error) { var buf [10]byte - n := binary.PutVarint(buf[:], i) + n := binary.PutUvarint(buf[:], i) _, err = w.Write(buf[0:n]) return } From d289c9286e816a37336289a75752752a751bc918 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 15 Mar 2018 09:43:23 -0700 Subject: [PATCH 381/515] Implement NewPrefixDB (#164) * encodeByteSlice uses uvarint for length instead of varint * Implemented NewPrefixDB * Fix flowrate test (#165) * Complete implementation and fix tests * Add tests for MemBatch Write[Sync] --- common/types.pb.go | 3 +- db/c_level_db.go | 8 ++ db/common_test.go | 182 +++++++++++++++++------------- db/db_test.go | 190 +++++++++++++++++++++++++++++++ db/go_level_db.go | 18 ++- db/mem_batch.go | 22 +++- db/mem_db.go | 22 ++-- db/prefix_db.go | 263 +++++++++++++++++++++++++++++++++++++++++++ db/prefix_db_test.go | 44 ++++++++ db/types.go | 4 +- db/util.go | 24 ++-- flowrate/io_test.go | 10 +- 12 files changed, 683 insertions(+), 107 deletions(-) create mode 100644 db/db_test.go create mode 100644 db/prefix_db.go create mode 100644 db/prefix_db_test.go diff --git a/common/types.pb.go b/common/types.pb.go index c301d28c0..047b7aee2 100644 --- a/common/types.pb.go +++ b/common/types.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: common/types.proto -// DO NOT EDIT! /* Package common is a generated protocol buffer package. diff --git a/db/c_level_db.go b/db/c_level_db.go index a59137883..e3e6c1d5d 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -171,6 +171,14 @@ func (mBatch *cLevelDBBatch) Write() { } } +// Implements Batch. +func (mBatch *cLevelDBBatch) WriteSync() { + err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch) + if err != nil { + panic(err) + } +} + //---------------------------------------- // Iterator // NOTE This is almost identical to db/go_level_db.Iterator diff --git a/db/common_test.go b/db/common_test.go index 1b0f00416..5afec28b3 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -2,6 +2,7 @@ package db import ( "fmt" + "sync" "testing" "github.com/stretchr/testify/assert" @@ -9,6 +10,14 @@ import ( cmn "github.com/tendermint/tmlibs/common" ) +//---------------------------------------- +// Helper functions. + +func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) { + valueGot := db.Get(key) + assert.Equal(t, valueWanted, valueGot) +} + func checkValid(t *testing.T, itr Iterator, expected bool) { valid := itr.Valid() require.Equal(t, expected, valid) @@ -46,110 +55,131 @@ func checkValuePanics(t *testing.T, itr Iterator) { } func newTempDB(t *testing.T, backend DBBackendType) (db DB) { - dir, dirname := cmn.Tempdir("test_go_iterator") + dir, dirname := cmn.Tempdir("db_common_test") db = NewDB("testdb", backend, dirname) dir.Close() return db } -func TestDBIteratorSingleKey(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator(nil, nil) +//---------------------------------------- +// mockDB - checkValid(t, itr, true) - checkNext(t, itr, false) - checkValid(t, itr, false) - checkNextPanics(t, itr) +// NOTE: not actually goroutine safe. +// If you want something goroutine safe, maybe you just want a MemDB. +type mockDB struct { + mtx sync.Mutex + calls map[string]int +} - // Once invalid... - checkInvalid(t, itr) - }) +func newMockDB() *mockDB { + return &mockDB{ + calls: make(map[string]int), } } -func TestDBIteratorTwoKeys(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("1"), bz("value_1")) - db.SetSync(bz("2"), bz("value_1")) +func (mdb *mockDB) Mutex() *sync.Mutex { + return &(mdb.mtx) +} - { // Fail by calling Next too much - itr := db.Iterator(nil, nil) - checkValid(t, itr, true) +func (mdb *mockDB) Get([]byte) []byte { + mdb.calls["Get"] += 1 + return nil +} - checkNext(t, itr, true) - checkValid(t, itr, true) +func (mdb *mockDB) Has([]byte) bool { + mdb.calls["Has"] += 1 + return false +} - checkNext(t, itr, false) - checkValid(t, itr, false) +func (mdb *mockDB) Set([]byte, []byte) { + mdb.calls["Set"] += 1 +} - checkNextPanics(t, itr) +func (mdb *mockDB) SetSync([]byte, []byte) { + mdb.calls["SetSync"] += 1 +} - // Once invalid... - checkInvalid(t, itr) - } - }) - } +func (mdb *mockDB) SetNoLock([]byte, []byte) { + mdb.calls["SetNoLock"] += 1 } -func TestDBIteratorMany(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) +func (mdb *mockDB) SetNoLockSync([]byte, []byte) { + mdb.calls["SetNoLockSync"] += 1 +} - keys := make([][]byte, 100) - for i := 0; i < 100; i++ { - keys[i] = []byte{byte(i)} - } +func (mdb *mockDB) Delete([]byte, []byte) { + mdb.calls["Delete"] += 1 +} - value := []byte{5} - for _, k := range keys { - db.Set(k, value) - } +func (mdb *mockDB) DeleteSync([]byte, []byte) { + mdb.calls["DeleteSync"] += 1 +} - itr := db.Iterator(nil, nil) - defer itr.Close() - for ; itr.Valid(); itr.Next() { - assert.Equal(t, db.Get(itr.Key()), itr.Value()) - } - }) - } +func (mdb *mockDB) DeleteNoLock([]byte) { + mdb.calls["DeleteNoLock"] += 1 } -func TestDBIteratorEmpty(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - itr := db.Iterator(nil, nil) +func (mdb *mockDB) DeleteNoLockSync([]byte) { + mdb.calls["DeleteNoLockSync"] += 1 +} - checkInvalid(t, itr) - }) - } +func (mdb *mockDB) Iterator(start, end []byte) Iterator { + mdb.calls["Iterator"] += 1 + return &mockIterator{} } -func TestDBIteratorEmptyBeginAfter(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - itr := db.Iterator(bz("1"), nil) +func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator { + mdb.calls["ReverseIterator"] += 1 + return &mockIterator{} +} - checkInvalid(t, itr) - }) - } +func (mdb *mockDB) Close() { + mdb.calls["Close"] += 1 } -func TestDBIteratorNonemptyBeginAfter(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator(bz("2"), nil) +func (mdb *mockDB) NewBatch() Batch { + mdb.calls["NewBatch"] += 1 + return &memBatch{db: mdb} +} + +func (mdb *mockDB) Print() { + mdb.calls["Print"] += 1 + fmt.Sprintf("mockDB{%v}", mdb.Stats()) +} - checkInvalid(t, itr) - }) +func (mdb *mockDB) Stats() map[string]string { + mdb.calls["Stats"] += 1 + + res := make(map[string]string) + for key, count := range mdb.calls { + res[key] = fmt.Sprintf("%d", count) } + return res +} + +//---------------------------------------- +// mockIterator + +type mockIterator struct{} + +func (_ mockIterator) Domain() (start []byte, end []byte) { + return nil, nil +} + +func (_ mockIterator) Valid() bool { + return false +} + +func (_ mockIterator) Next() { +} + +func (_ mockIterator) Key() []byte { + return nil +} + +func (_ mockIterator) Value() []byte { + return nil +} + +func (_ mockIterator) Close() { } diff --git a/db/db_test.go b/db/db_test.go new file mode 100644 index 000000000..8884cea2d --- /dev/null +++ b/db/db_test.go @@ -0,0 +1,190 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDBIteratorSingleKey(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator(nil, nil) + + checkValid(t, itr, true) + checkNext(t, itr, false) + checkValid(t, itr, false) + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorTwoKeys(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + db.SetSync(bz("2"), bz("value_1")) + + { // Fail by calling Next too much + itr := db.Iterator(nil, nil) + checkValid(t, itr, true) + + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkNext(t, itr, false) + checkValid(t, itr, false) + + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + } + }) + } +} + +func TestDBIteratorMany(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + + keys := make([][]byte, 100) + for i := 0; i < 100; i++ { + keys[i] = []byte{byte(i)} + } + + value := []byte{5} + for _, k := range keys { + db.Set(k, value) + } + + itr := db.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + assert.Equal(t, db.Get(itr.Key()), itr.Value()) + } + }) + } +} + +func TestDBIteratorEmpty(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator(nil, nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorEmptyBeginAfter(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator(bz("1"), nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorNonemptyBeginAfter(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator(bz("2"), nil) + + checkInvalid(t, itr) + }) + } +} + +func TestDBBatchWrite1(t *testing.T) { + mdb := newMockDB() + batch := mdb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.Write() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWrite2(t *testing.T) { + mdb := newMockDB() + batch := mdb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.Write() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWriteSync1(t *testing.T) { + mdb := newMockDB() + batch := mdb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.WriteSync() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 2, mdb.calls["SetNoLock"]) + assert.Equal(t, 1, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) +} + +func TestDBBatchWriteSync2(t *testing.T) { + mdb := newMockDB() + batch := mdb.NewBatch() + + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.WriteSync() + + assert.Equal(t, 0, mdb.calls["Set"]) + assert.Equal(t, 0, mdb.calls["SetSync"]) + assert.Equal(t, 3, mdb.calls["SetNoLock"]) + assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) + assert.Equal(t, 0, mdb.calls["Delete"]) + assert.Equal(t, 0, mdb.calls["DeleteSync"]) + assert.Equal(t, 0, mdb.calls["DeleteNoLock"]) + assert.Equal(t, 1, mdb.calls["DeleteNoLockSync"]) +} diff --git a/db/go_level_db.go b/db/go_level_db.go index 9fed329bf..55ca36c39 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -110,10 +110,10 @@ func (db *GoLevelDB) Print() { str, _ := db.db.GetProperty("leveldb.stats") fmt.Printf("%v\n", str) - iter := db.db.NewIterator(nil, nil) - for iter.Next() { - key := iter.Key() - value := iter.Value() + itr := db.db.NewIterator(nil, nil) + for itr.Next() { + key := itr.Key() + value := itr.Value() fmt.Printf("[%X]:\t[%X]\n", key, value) } } @@ -167,7 +167,15 @@ func (mBatch *goLevelDBBatch) Delete(key []byte) { // Implements Batch. func (mBatch *goLevelDBBatch) Write() { - err := mBatch.db.db.Write(mBatch.batch, nil) + err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false}) + if err != nil { + panic(err) + } +} + +// Implements Batch. +func (mBatch *goLevelDBBatch) WriteSync() { + err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true}) if err != nil { panic(err) } diff --git a/db/mem_batch.go b/db/mem_batch.go index 7072d931a..756798ded 100644 --- a/db/mem_batch.go +++ b/db/mem_batch.go @@ -5,7 +5,9 @@ import "sync" type atomicSetDeleter interface { Mutex() *sync.Mutex SetNoLock(key, value []byte) + SetNoLockSync(key, value []byte) DeleteNoLock(key []byte) + DeleteNoLockSync(key []byte) } type memBatch struct { @@ -35,16 +37,34 @@ func (mBatch *memBatch) Delete(key []byte) { } func (mBatch *memBatch) Write() { + mBatch.write(false) +} + +func (mBatch *memBatch) WriteSync() { + mBatch.write(true) +} + +func (mBatch *memBatch) write(doSync bool) { mtx := mBatch.db.Mutex() mtx.Lock() defer mtx.Unlock() - for _, op := range mBatch.ops { + for i, op := range mBatch.ops { + if doSync && i == (len(mBatch.ops)-1) { + switch op.opType { + case opTypeSet: + mBatch.db.SetNoLockSync(op.key, op.value) + case opTypeDelete: + mBatch.db.DeleteNoLockSync(op.key) + } + break // we're done. + } switch op.opType { case opTypeSet: mBatch.db.SetNoLock(op.key, op.value) case opTypeDelete: mBatch.db.DeleteNoLock(op.key) } + } } diff --git a/db/mem_db.go b/db/mem_db.go index f2c484fa7..5439d6789 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -26,6 +26,11 @@ func NewMemDB() *MemDB { return database } +// Implements atomicSetDeleter. +func (db *MemDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + // Implements DB. func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() @@ -63,6 +68,11 @@ func (db *MemDB) SetSync(key []byte, value []byte) { // Implements atomicSetDeleter. func (db *MemDB) SetNoLock(key []byte, value []byte) { + db.SetNoLockSync(key, value) +} + +// Implements atomicSetDeleter. +func (db *MemDB) SetNoLockSync(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -87,6 +97,11 @@ func (db *MemDB) DeleteSync(key []byte) { // Implements atomicSetDeleter. func (db *MemDB) DeleteNoLock(key []byte) { + db.DeleteNoLockSync(key) +} + +// Implements atomicSetDeleter. +func (db *MemDB) DeleteNoLockSync(key []byte) { key = nonNilBytes(key) delete(db.db, string(key)) @@ -122,9 +137,6 @@ func (db *MemDB) Stats() map[string]string { return stats } -//---------------------------------------- -// Batch - // Implements DB. func (db *MemDB) NewBatch() Batch { db.mtx.Lock() @@ -133,10 +145,6 @@ func (db *MemDB) NewBatch() Batch { return &memBatch{db, nil} } -func (db *MemDB) Mutex() *sync.Mutex { - return &(db.mtx) -} - //---------------------------------------- // Iterator diff --git a/db/prefix_db.go b/db/prefix_db.go new file mode 100644 index 000000000..5947e7fce --- /dev/null +++ b/db/prefix_db.go @@ -0,0 +1,263 @@ +package db + +import ( + "bytes" + "fmt" + "sync" +) + +// IteratePrefix is a convenience function for iterating over a key domain +// restricted by prefix. +func IteratePrefix(db DB, prefix []byte) Iterator { + var start, end []byte + if len(prefix) == 0 { + start = nil + end = nil + } else { + start = cp(prefix) + end = cpIncr(prefix) + } + return db.Iterator(start, end) +} + +/* +TODO: Make test, maybe rename. +// Like IteratePrefix but the iterator strips the prefix from the keys. +func IteratePrefixStripped(db DB, prefix []byte) Iterator { + return newUnprefixIterator(prefix, IteratePrefix(db, prefix)) +} +*/ + +//---------------------------------------- +// prefixDB + +type prefixDB struct { + mtx sync.Mutex + prefix []byte + db DB +} + +// NewPrefixDB lets you namespace multiple DBs within a single DB. +func NewPrefixDB(db DB, prefix []byte) *prefixDB { + return &prefixDB{ + prefix: prefix, + db: db, + } +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) Mutex() *sync.Mutex { + return &(pdb.mtx) +} + +// Implements DB. +func (pdb *prefixDB) Get(key []byte) []byte { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.Get(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) Has(key []byte) bool { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.Has(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) Set(key []byte, value []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.Set(pdb.prefixed(key), value) +} + +// Implements DB. +func (pdb *prefixDB) SetSync(key []byte, value []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.SetSync(pdb.prefixed(key), value) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLock(key []byte, value []byte) { + pdb.db.Set(pdb.prefixed(key), value) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) { + pdb.db.SetSync(pdb.prefixed(key), value) +} + +// Implements DB. +func (pdb *prefixDB) Delete(key []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.Delete(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) DeleteSync(key []byte) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.DeleteSync(pdb.prefixed(key)) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLock(key []byte) { + pdb.db.Delete(pdb.prefixed(key)) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLockSync(key []byte) { + pdb.db.DeleteSync(pdb.prefixed(key)) +} + +// Implements DB. +func (pdb *prefixDB) Iterator(start, end []byte) Iterator { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pstart := append([]byte(pdb.prefix), start...) + pend := []byte(nil) + if end != nil { + pend = append([]byte(pdb.prefix), end...) + } + return newUnprefixIterator( + pdb.prefix, + pdb.db.Iterator( + pstart, + pend, + ), + ) +} + +// Implements DB. +func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pstart := []byte(nil) + if start != nil { + pstart = append([]byte(pdb.prefix), start...) + } + pend := []byte(nil) + if end != nil { + pend = append([]byte(pdb.prefix), end...) + } + return newUnprefixIterator( + pdb.prefix, + pdb.db.ReverseIterator( + pstart, + pend, + ), + ) +} + +// Implements DB. +func (pdb *prefixDB) NewBatch() Batch { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return &memBatch{pdb, nil} +} + +// Implements DB. +func (pdb *prefixDB) Close() { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pdb.db.Close() +} + +// Implements DB. +func (pdb *prefixDB) Print() { + fmt.Printf("prefix: %X\n", pdb.prefix) + + itr := pdb.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } +} + +// Implements DB. +func (pdb *prefixDB) Stats() map[string]string { + stats := make(map[string]string) + stats["prefixdb.prefix.string"] = string(pdb.prefix) + stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) + source := pdb.db.Stats() + for key, value := range source { + stats["prefixdb.source."+key] = value + } + return stats +} + +func (pdb *prefixDB) prefixed(key []byte) []byte { + return append([]byte(pdb.prefix), key...) +} + +//---------------------------------------- + +// Strips prefix while iterating from Iterator. +type unprefixIterator struct { + prefix []byte + source Iterator +} + +func newUnprefixIterator(prefix []byte, source Iterator) unprefixIterator { + return unprefixIterator{ + prefix: prefix, + source: source, + } +} + +func (itr unprefixIterator) Domain() (start []byte, end []byte) { + start, end = itr.source.Domain() + if len(start) > 0 { + start = stripPrefix(start, itr.prefix) + } + if len(end) > 0 { + end = stripPrefix(end, itr.prefix) + } + return +} + +func (itr unprefixIterator) Valid() bool { + return itr.source.Valid() +} + +func (itr unprefixIterator) Next() { + itr.source.Next() +} + +func (itr unprefixIterator) Key() (key []byte) { + return stripPrefix(itr.source.Key(), itr.prefix) +} + +func (itr unprefixIterator) Value() (value []byte) { + return itr.source.Value() +} + +func (itr unprefixIterator) Close() { + itr.source.Close() +} + +//---------------------------------------- + +func stripPrefix(key []byte, prefix []byte) (stripped []byte) { + if len(key) < len(prefix) { + panic("should not happen") + } + if !bytes.Equal(key[:len(prefix)], prefix) { + panic("should not happne") + } + return key[len(prefix):] +} diff --git a/db/prefix_db_test.go b/db/prefix_db_test.go new file mode 100644 index 000000000..fd44a7ec8 --- /dev/null +++ b/db/prefix_db_test.go @@ -0,0 +1,44 @@ +package db + +import "testing" + +func TestIteratePrefix(t *testing.T) { + db := NewMemDB() + // Under "key" prefix + db.Set(bz("key"), bz("value")) + db.Set(bz("key1"), bz("value1")) + db.Set(bz("key2"), bz("value2")) + db.Set(bz("key3"), bz("value3")) + db.Set(bz("something"), bz("else")) + db.Set(bz(""), bz("")) + db.Set(bz("k"), bz("val")) + db.Set(bz("ke"), bz("valu")) + db.Set(bz("kee"), bz("valuu")) + xitr := db.Iterator(nil, nil) + xitr.Key() + + pdb := NewPrefixDB(db, bz("key")) + checkValue(t, pdb, bz("key"), nil) + checkValue(t, pdb, bz(""), bz("value")) + checkValue(t, pdb, bz("key1"), nil) + checkValue(t, pdb, bz("1"), bz("value1")) + checkValue(t, pdb, bz("key2"), nil) + checkValue(t, pdb, bz("2"), bz("value2")) + checkValue(t, pdb, bz("key3"), nil) + checkValue(t, pdb, bz("3"), bz("value3")) + checkValue(t, pdb, bz("something"), nil) + checkValue(t, pdb, bz("k"), nil) + checkValue(t, pdb, bz("ke"), nil) + checkValue(t, pdb, bz("kee"), nil) + + itr := pdb.Iterator(nil, nil) + itr.Key() + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + itr.Close() +} diff --git a/db/types.go b/db/types.go index 07858087a..45146942e 100644 --- a/db/types.go +++ b/db/types.go @@ -1,5 +1,6 @@ package db +// DBs are goroutine safe. type DB interface { // Get returns nil iff key doesn't exist. @@ -35,7 +36,7 @@ type DB interface { // Iterate over a domain of keys in descending order. End is exclusive. // Start must be greater than end, or the Iterator is invalid. // If start is nil, iterates from the last/greatest item (inclusive). - // If end is nil, iterates up to the first/least item (iclusive). + // If end is nil, iterates up to the first/least item (inclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. // CONTRACT: start, end readonly []byte ReverseIterator(start, end []byte) Iterator @@ -59,6 +60,7 @@ type DB interface { type Batch interface { SetDeleter Write() + WriteSync() } type SetDeleter interface { diff --git a/db/util.go b/db/util.go index b0ab7f6ad..ecb392dd6 100644 --- a/db/util.go +++ b/db/util.go @@ -4,28 +4,20 @@ import ( "bytes" ) -func IteratePrefix(db DB, prefix []byte) Iterator { - var start, end []byte - if len(prefix) == 0 { - start = nil - end = nil - } else { - start = cp(prefix) - end = cpIncr(prefix) - } - return db.Iterator(start, end) -} - -//---------------------------------------- - func cp(bz []byte) (ret []byte) { ret = make([]byte, len(bz)) copy(ret, bz) return ret } +// Returns a slice of the same length (big endian) +// except incremented by one. +// Returns nil on overflow (e.g. if bz bytes are all 0xFF) // CONTRACT: len(bz) > 0 func cpIncr(bz []byte) (ret []byte) { + if len(bz) == 0 { + panic("cpIncr expects non-zero bz length") + } ret = cp(bz) for i := len(bz) - 1; i >= 0; i-- { if ret[i] < byte(0xFF) { @@ -33,6 +25,10 @@ func cpIncr(bz []byte) (ret []byte) { return } else { ret[i] = byte(0x00) + if i == 0 { + // Overflow + return nil + } } } return nil diff --git a/flowrate/io_test.go b/flowrate/io_test.go index db40337c9..c84029d5e 100644 --- a/flowrate/io_test.go +++ b/flowrate/io_test.go @@ -121,7 +121,15 @@ func TestWriter(t *testing.T) { w.SetBlocking(true) if n, err := w.Write(b[20:]); n != 80 || err != nil { t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _400ms { + } else if rt := time.Since(start); rt < _300ms { + // Explanation for `rt < _300ms` (as opposed to `< _400ms`) + // + // |<-- start | | + // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms + // sends: 20|20 |20 |20 |20# + // + // NOTE: The '#' symbol can thus happen before 400ms is up. + // Thus, we can only panic if rt < _300ms. t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) } From 536c27de8eefbf32b1738e0685401a7944dd76c1 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 17 Mar 2018 04:28:53 -0700 Subject: [PATCH 382/515] common/random.go supports seeding and *Rand (#121) * common/random.go supports seeding and *Rand * Ensure determinism --- common/random.go | 236 ++++++++++++++++++++++++++++++------------ common/random_test.go | 47 +++++---- 2 files changed, 194 insertions(+), 89 deletions(-) diff --git a/common/random.go b/common/random.go index ca71b6143..b945a88eb 100644 --- a/common/random.go +++ b/common/random.go @@ -13,34 +13,138 @@ const ( // pseudo random number generator. // seeded with OS randomness (crand) -var prng struct { + +type Rand struct { sync.Mutex *mrand.Rand } -func reset() { - b := cRandBytes(8) +var grand *Rand + +func init() { + grand = New() + grand.init() +} + +func New() *Rand { + rand := &Rand{} + rand.init() + return rand +} + +func (r *Rand) init() { + bz := cRandBytes(8) var seed uint64 for i := 0; i < 8; i++ { - seed |= uint64(b[i]) + seed |= uint64(bz[i]) seed <<= 8 } - prng.Lock() - prng.Rand = mrand.New(mrand.NewSource(int64(seed))) - prng.Unlock() + r.reset(int64(seed)) } -func init() { - reset() +func (r *Rand) reset(seed int64) { + r.Rand = mrand.New(mrand.NewSource(seed)) +} + +//---------------------------------------- +// Global functions + +func Seed(seed int64) { + grand.Seed(seed) +} + +func RandStr(length int) string { + return grand.RandStr(length) +} + +func RandUint16() uint16 { + return grand.RandUint16() +} + +func RandUint32() uint32 { + return grand.RandUint32() +} + +func RandUint64() uint64 { + return grand.RandUint64() +} + +func RandUint() uint { + return grand.RandUint() +} + +func RandInt16() int16 { + return grand.RandInt16() +} + +func RandInt32() int32 { + return grand.RandInt32() +} + +func RandInt64() int64 { + return grand.RandInt64() +} + +func RandInt() int { + return grand.RandInt() +} + +func RandInt31() int32 { + return grand.RandInt31() +} + +func RandInt63() int64 { + return grand.RandInt63() +} + +func RandUint16Exp() uint16 { + return grand.RandUint16Exp() +} + +func RandUint32Exp() uint32 { + return grand.RandUint32Exp() +} + +func RandUint64Exp() uint64 { + return grand.RandUint64Exp() +} + +func RandFloat32() float32 { + return grand.RandFloat32() +} + +func RandTime() time.Time { + return grand.RandTime() +} + +func RandBytes(n int) []byte { + return grand.RandBytes(n) +} + +func RandIntn(n int) int { + return grand.RandIntn(n) +} + +func RandPerm(n int) []int { + return grand.RandPerm(n) +} + +//---------------------------------------- +// Rand methods + +func (r *Rand) Seed(seed int64) { + r.Lock() + r.reset(seed) + r.Unlock() } // Constructs an alphanumeric string of given length. // It is not safe for cryptographic usage. -func RandStr(length int) string { +func (r *Rand) RandStr(length int) string { chars := []byte{} MAIN_LOOP: for { - val := RandInt63() + val := r.RandInt63() for i := 0; i < 10; i++ { v := int(val & 0x3f) // rightmost 6 bits if v >= 62 { // only 62 characters in strChars @@ -60,127 +164,127 @@ MAIN_LOOP: } // It is not safe for cryptographic usage. -func RandUint16() uint16 { - return uint16(RandUint32() & (1<<16 - 1)) +func (r *Rand) RandUint16() uint16 { + return uint16(r.RandUint32() & (1<<16 - 1)) } // It is not safe for cryptographic usage. -func RandUint32() uint32 { - prng.Lock() - u32 := prng.Uint32() - prng.Unlock() +func (r *Rand) RandUint32() uint32 { + r.Lock() + u32 := r.Uint32() + r.Unlock() return u32 } // It is not safe for cryptographic usage. -func RandUint64() uint64 { - return uint64(RandUint32())<<32 + uint64(RandUint32()) +func (r *Rand) RandUint64() uint64 { + return uint64(r.RandUint32())<<32 + uint64(r.RandUint32()) } // It is not safe for cryptographic usage. -func RandUint() uint { - prng.Lock() - i := prng.Int() - prng.Unlock() +func (r *Rand) RandUint() uint { + r.Lock() + i := r.Int() + r.Unlock() return uint(i) } // It is not safe for cryptographic usage. -func RandInt16() int16 { - return int16(RandUint32() & (1<<16 - 1)) +func (r *Rand) RandInt16() int16 { + return int16(r.RandUint32() & (1<<16 - 1)) } // It is not safe for cryptographic usage. -func RandInt32() int32 { - return int32(RandUint32()) +func (r *Rand) RandInt32() int32 { + return int32(r.RandUint32()) } // It is not safe for cryptographic usage. -func RandInt64() int64 { - return int64(RandUint64()) +func (r *Rand) RandInt64() int64 { + return int64(r.RandUint64()) } // It is not safe for cryptographic usage. -func RandInt() int { - prng.Lock() - i := prng.Int() - prng.Unlock() +func (r *Rand) RandInt() int { + r.Lock() + i := r.Int() + r.Unlock() return i } // It is not safe for cryptographic usage. -func RandInt31() int32 { - prng.Lock() - i31 := prng.Int31() - prng.Unlock() +func (r *Rand) RandInt31() int32 { + r.Lock() + i31 := r.Int31() + r.Unlock() return i31 } // It is not safe for cryptographic usage. -func RandInt63() int64 { - prng.Lock() - i63 := prng.Int63() - prng.Unlock() +func (r *Rand) RandInt63() int64 { + r.Lock() + i63 := r.Int63() + r.Unlock() return i63 } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. -func RandUint16Exp() uint16 { - bits := RandUint32() % 16 +func (r *Rand) RandUint16Exp() uint16 { + bits := r.RandUint32() % 16 if bits == 0 { return 0 } n := uint16(1 << (bits - 1)) - n += uint16(RandInt31()) & ((1 << (bits - 1)) - 1) + n += uint16(r.RandInt31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. -func RandUint32Exp() uint32 { - bits := RandUint32() % 32 +func (r *Rand) RandUint32Exp() uint32 { + bits := r.RandUint32() % 32 if bits == 0 { return 0 } n := uint32(1 << (bits - 1)) - n += uint32(RandInt31()) & ((1 << (bits - 1)) - 1) + n += uint32(r.RandInt31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. -func RandUint64Exp() uint64 { - bits := RandUint32() % 64 +func (r *Rand) RandUint64Exp() uint64 { + bits := r.RandUint32() % 64 if bits == 0 { return 0 } n := uint64(1 << (bits - 1)) - n += uint64(RandInt63()) & ((1 << (bits - 1)) - 1) + n += uint64(r.RandInt63()) & ((1 << (bits - 1)) - 1) return n } // It is not safe for cryptographic usage. -func RandFloat32() float32 { - prng.Lock() - f32 := prng.Float32() - prng.Unlock() +func (r *Rand) RandFloat32() float32 { + r.Lock() + f32 := r.Float32() + r.Unlock() return f32 } // It is not safe for cryptographic usage. -func RandTime() time.Time { - return time.Unix(int64(RandUint64Exp()), 0) +func (r *Rand) RandTime() time.Time { + return time.Unix(int64(r.RandUint64Exp()), 0) } // RandBytes returns n random bytes from the OS's source of entropy ie. via crypto/rand. // It is not safe for cryptographic usage. -func RandBytes(n int) []byte { +func (r *Rand) RandBytes(n int) []byte { // cRandBytes isn't guaranteed to be fast so instead // use random bytes generated from the internal PRNG bs := make([]byte, n) for i := 0; i < len(bs); i++ { - bs[i] = byte(RandInt() & 0xFF) + bs[i] = byte(r.RandInt() & 0xFF) } return bs } @@ -188,19 +292,19 @@ func RandBytes(n int) []byte { // RandIntn returns, as an int, a non-negative pseudo-random number in [0, n). // It panics if n <= 0. // It is not safe for cryptographic usage. -func RandIntn(n int) int { - prng.Lock() - i := prng.Intn(n) - prng.Unlock() +func (r *Rand) RandIntn(n int) int { + r.Lock() + i := r.Intn(n) + r.Unlock() return i } // RandPerm returns a pseudo-random permutation of n integers in [0, n). // It is not safe for cryptographic usage. -func RandPerm(n int) []int { - prng.Lock() - perm := prng.Perm(n) - prng.Unlock() +func (r *Rand) RandPerm(n int) []int { + r.Lock() + perm := r.Perm(n) + r.Unlock() return perm } diff --git a/common/random_test.go b/common/random_test.go index 216f2f8bc..b58b4a13a 100644 --- a/common/random_test.go +++ b/common/random_test.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - "io" mrand "math/rand" "sync" "testing" @@ -33,37 +32,38 @@ func TestRandIntn(t *testing.T) { } } -// It is essential that these tests run and never repeat their outputs -// lest we've been pwned and the behavior of our randomness is controlled. -// See Issues: -// * https://github.com/tendermint/tmlibs/issues/99 -// * https://github.com/tendermint/tendermint/issues/973 -func TestUniqueRng(t *testing.T) { - buf := new(bytes.Buffer) - outputs := make(map[string][]int) +// Test to make sure that we never call math.rand(). +// We do this by ensuring that outputs are deterministic. +func TestDeterminism(t *testing.T) { + var firstOutput string + + // Set math/rand's seed for the sake of debugging this test. + // (It isn't strictly necessary). + mrand.Seed(1) + for i := 0; i < 100; i++ { - testThemAll(buf) - output := buf.String() - buf.Reset() - runs, seen := outputs[output] - if seen { - t.Errorf("Run #%d's output was already seen in previous runs: %v", i, runs) + output := testThemAll() + if i == 0 { + firstOutput = output + } else { + if firstOutput != output { + t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", + i, firstOutput, output) + } } - outputs[output] = append(outputs[output], i) } } -func testThemAll(out io.Writer) { - // Reset the internal PRNG - reset() +func testThemAll() string { - // Set math/rand's Seed so that any direct invocations - // of math/rand will reveal themselves. - mrand.Seed(1) + // Such determinism. + grand.reset(1) + + // Use it. + out := new(bytes.Buffer) perm := RandPerm(10) blob, _ := json.Marshal(perm) fmt.Fprintf(out, "perm: %s\n", blob) - fmt.Fprintf(out, "randInt: %d\n", RandInt()) fmt.Fprintf(out, "randUint: %d\n", RandUint()) fmt.Fprintf(out, "randIntn: %d\n", RandIntn(97)) @@ -76,6 +76,7 @@ func testThemAll(out io.Writer) { fmt.Fprintf(out, "randUint16Exp: %d\n", RandUint16Exp()) fmt.Fprintf(out, "randUint32Exp: %d\n", RandUint32Exp()) fmt.Fprintf(out, "randUint64Exp: %d\n", RandUint64Exp()) + return out.String() } func TestRngConcurrencySafety(t *testing.T) { From 6d61ca3bb5354b3c5104e1b112a74e2f4009797d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 17 Mar 2018 12:34:23 +0100 Subject: [PATCH 383/515] New -> NewRand --- common/random.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/random.go b/common/random.go index b945a88eb..f70d6477f 100644 --- a/common/random.go +++ b/common/random.go @@ -22,11 +22,11 @@ type Rand struct { var grand *Rand func init() { - grand = New() + grand = NewRand() grand.init() } -func New() *Rand { +func NewRand() *Rand { rand := &Rand{} rand.init() return rand From 90cd89eab08dec6f9b45808371475dbf715d4632 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 17 Mar 2018 05:18:22 -0700 Subject: [PATCH 384/515] Unexpose r.rand (#167) --- common/random.go | 124 +++++++++++++++++++++++------------------------ 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/common/random.go b/common/random.go index f70d6477f..a2237487b 100644 --- a/common/random.go +++ b/common/random.go @@ -16,7 +16,7 @@ const ( type Rand struct { sync.Mutex - *mrand.Rand + rand *mrand.Rand } var grand *Rand @@ -43,7 +43,7 @@ func (r *Rand) init() { } func (r *Rand) reset(seed int64) { - r.Rand = mrand.New(mrand.NewSource(seed)) + r.rand = mrand.New(mrand.NewSource(seed)) } //---------------------------------------- @@ -54,79 +54,79 @@ func Seed(seed int64) { } func RandStr(length int) string { - return grand.RandStr(length) + return grand.Str(length) } func RandUint16() uint16 { - return grand.RandUint16() + return grand.Uint16() } func RandUint32() uint32 { - return grand.RandUint32() + return grand.Uint32() } func RandUint64() uint64 { - return grand.RandUint64() + return grand.Uint64() } func RandUint() uint { - return grand.RandUint() + return grand.Uint() } func RandInt16() int16 { - return grand.RandInt16() + return grand.Int16() } func RandInt32() int32 { - return grand.RandInt32() + return grand.Int32() } func RandInt64() int64 { - return grand.RandInt64() + return grand.Int64() } func RandInt() int { - return grand.RandInt() + return grand.Int() } func RandInt31() int32 { - return grand.RandInt31() + return grand.Int31() } func RandInt63() int64 { - return grand.RandInt63() + return grand.Int63() } func RandUint16Exp() uint16 { - return grand.RandUint16Exp() + return grand.Uint16Exp() } func RandUint32Exp() uint32 { - return grand.RandUint32Exp() + return grand.Uint32Exp() } func RandUint64Exp() uint64 { - return grand.RandUint64Exp() + return grand.Uint64Exp() } func RandFloat32() float32 { - return grand.RandFloat32() + return grand.Float32() } func RandTime() time.Time { - return grand.RandTime() + return grand.Time() } func RandBytes(n int) []byte { - return grand.RandBytes(n) + return grand.Bytes(n) } func RandIntn(n int) int { - return grand.RandIntn(n) + return grand.Intn(n) } func RandPerm(n int) []int { - return grand.RandPerm(n) + return grand.Perm(n) } //---------------------------------------- @@ -140,11 +140,11 @@ func (r *Rand) Seed(seed int64) { // Constructs an alphanumeric string of given length. // It is not safe for cryptographic usage. -func (r *Rand) RandStr(length int) string { +func (r *Rand) Str(length int) string { chars := []byte{} MAIN_LOOP: for { - val := r.RandInt63() + val := r.Int63() for i := 0; i < 10; i++ { v := int(val & 0x3f) // rightmost 6 bits if v >= 62 { // only 62 characters in strChars @@ -164,127 +164,127 @@ MAIN_LOOP: } // It is not safe for cryptographic usage. -func (r *Rand) RandUint16() uint16 { - return uint16(r.RandUint32() & (1<<16 - 1)) +func (r *Rand) Uint16() uint16 { + return uint16(r.rand.Uint32() & (1<<16 - 1)) } // It is not safe for cryptographic usage. -func (r *Rand) RandUint32() uint32 { +func (r *Rand) Uint32() uint32 { r.Lock() - u32 := r.Uint32() + u32 := r.rand.Uint32() r.Unlock() return u32 } // It is not safe for cryptographic usage. -func (r *Rand) RandUint64() uint64 { - return uint64(r.RandUint32())<<32 + uint64(r.RandUint32()) +func (r *Rand) Uint64() uint64 { + return uint64(r.rand.Uint32())<<32 + uint64(r.rand.Uint32()) } // It is not safe for cryptographic usage. -func (r *Rand) RandUint() uint { +func (r *Rand) Uint() uint { r.Lock() - i := r.Int() + i := r.rand.Int() r.Unlock() return uint(i) } // It is not safe for cryptographic usage. -func (r *Rand) RandInt16() int16 { - return int16(r.RandUint32() & (1<<16 - 1)) +func (r *Rand) Int16() int16 { + return int16(r.rand.Uint32() & (1<<16 - 1)) } // It is not safe for cryptographic usage. -func (r *Rand) RandInt32() int32 { - return int32(r.RandUint32()) +func (r *Rand) Int32() int32 { + return int32(r.rand.Uint32()) } // It is not safe for cryptographic usage. -func (r *Rand) RandInt64() int64 { - return int64(r.RandUint64()) +func (r *Rand) Int64() int64 { + return int64(r.rand.Uint64()) } // It is not safe for cryptographic usage. -func (r *Rand) RandInt() int { +func (r *Rand) Int() int { r.Lock() - i := r.Int() + i := r.rand.Int() r.Unlock() return i } // It is not safe for cryptographic usage. -func (r *Rand) RandInt31() int32 { +func (r *Rand) Int31() int32 { r.Lock() - i31 := r.Int31() + i31 := r.rand.Int31() r.Unlock() return i31 } // It is not safe for cryptographic usage. -func (r *Rand) RandInt63() int64 { +func (r *Rand) Int63() int64 { r.Lock() - i63 := r.Int63() + i63 := r.rand.Int63() r.Unlock() return i63 } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. -func (r *Rand) RandUint16Exp() uint16 { - bits := r.RandUint32() % 16 +func (r *Rand) Uint16Exp() uint16 { + bits := r.rand.Uint32() % 16 if bits == 0 { return 0 } n := uint16(1 << (bits - 1)) - n += uint16(r.RandInt31()) & ((1 << (bits - 1)) - 1) + n += uint16(r.rand.Int31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. -func (r *Rand) RandUint32Exp() uint32 { - bits := r.RandUint32() % 32 +func (r *Rand) Uint32Exp() uint32 { + bits := r.rand.Uint32() % 32 if bits == 0 { return 0 } n := uint32(1 << (bits - 1)) - n += uint32(r.RandInt31()) & ((1 << (bits - 1)) - 1) + n += uint32(r.rand.Int31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. -func (r *Rand) RandUint64Exp() uint64 { - bits := r.RandUint32() % 64 +func (r *Rand) Uint64Exp() uint64 { + bits := r.rand.Uint32() % 64 if bits == 0 { return 0 } n := uint64(1 << (bits - 1)) - n += uint64(r.RandInt63()) & ((1 << (bits - 1)) - 1) + n += uint64(r.rand.Int63()) & ((1 << (bits - 1)) - 1) return n } // It is not safe for cryptographic usage. -func (r *Rand) RandFloat32() float32 { +func (r *Rand) Float32() float32 { r.Lock() - f32 := r.Float32() + f32 := r.rand.Float32() r.Unlock() return f32 } // It is not safe for cryptographic usage. -func (r *Rand) RandTime() time.Time { - return time.Unix(int64(r.RandUint64Exp()), 0) +func (r *Rand) Time() time.Time { + return time.Unix(int64(r.Uint64Exp()), 0) } // RandBytes returns n random bytes from the OS's source of entropy ie. via crypto/rand. // It is not safe for cryptographic usage. -func (r *Rand) RandBytes(n int) []byte { +func (r *Rand) Bytes(n int) []byte { // cRandBytes isn't guaranteed to be fast so instead // use random bytes generated from the internal PRNG bs := make([]byte, n) for i := 0; i < len(bs); i++ { - bs[i] = byte(r.RandInt() & 0xFF) + bs[i] = byte(r.rand.Int() & 0xFF) } return bs } @@ -292,18 +292,18 @@ func (r *Rand) RandBytes(n int) []byte { // RandIntn returns, as an int, a non-negative pseudo-random number in [0, n). // It panics if n <= 0. // It is not safe for cryptographic usage. -func (r *Rand) RandIntn(n int) int { +func (r *Rand) Intn(n int) int { r.Lock() - i := r.Intn(n) + i := r.rand.Intn(n) r.Unlock() return i } // RandPerm returns a pseudo-random permutation of n integers in [0, n). // It is not safe for cryptographic usage. -func (r *Rand) RandPerm(n int) []int { +func (r *Rand) Perm(n int) []int { r.Lock() - perm := r.Perm(n) + perm := r.rand.Perm(n) r.Unlock() return perm } From de36bfe31c1052a34b631284f130aab86b8534ae Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 17 Mar 2018 13:56:39 +0100 Subject: [PATCH 385/515] Add TMLibs/Error from Cosmos-SDK/Error --- common/errors.go | 93 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 91 insertions(+), 2 deletions(-) diff --git a/common/errors.go b/common/errors.go index 4710b9ee0..4a15d0ee6 100644 --- a/common/errors.go +++ b/common/errors.go @@ -2,8 +2,96 @@ package common import ( "fmt" + "runtime" ) +//---------------------------------------- +// Error & cmnError + +type Error interface { + Error() string + Trace(msg string) Error + TraceCause(cause error, msg string) Error + Cause() error +} + +func NewError(msg string) Error { + return newError(msg) +} + +type traceItem struct { + msg string + filename string + lineno int +} + +func (ti traceItem) String() string { + return fmt.Sprintf("%v:%v %v", ti.filename, ti.lineno, ti.msg) +} + +type cmnError struct { + msg string + cause error + traces []traceItem +} + +func newError(msg string) *cmnError { + return &cmnError{ + msg: msg, + cause: nil, + traces: nil, + } +} + +func (err *cmnError) Error() string { + return fmt.Sprintf("Error{%s,%v,%v}", err.msg, err.cause, len(err.traces)) +} + +// Add tracing information with msg. +func (err *cmnError) Trace(msg string) Error { + return err.doTrace(msg, 2) +} + +// Add tracing information with cause and msg. +// If a cause was already set before, it is overwritten. +func (err *cmnError) TraceCause(cause error, msg string) Error { + err.cause = cause + return err.doTrace(msg, 2) +} + +func (err *cmnError) doTrace(msg string, n int) Error { + _, fn, line, ok := runtime.Caller(n) + if !ok { + if fn == "" { + fn = "" + } + if line <= 0 { + line = -1 + } + } + // Include file & line number & msg. + // Do not include the whole stack trace. + err.traces = append(err.traces, traceItem{ + filename: fn, + lineno: line, + msg: msg, + }) + return err +} + +// Return last known cause. +// NOTE: The meaning of "cause" is left for the caller to define. +// There exists to canonical definition of "cause". +// Instead of blaming, try to handle-or-organize it. +func (err *cmnError) Cause() error { + return err.cause +} + +//---------------------------------------- +// StackError + +// NOTE: Used by Tendermint p2p upon recovery. +// Err could be "Reason", since it isn't an error type. type StackError struct { Err interface{} Stack []byte @@ -17,8 +105,9 @@ func (se StackError) Error() string { return se.String() } -//-------------------------------------------------------------------------------------------------- -// panic wrappers +//---------------------------------------- +// Panic wrappers +// XXX DEPRECATED // A panic resulting from a sanity check means there is a programmer error // and some guarantee is not satisfied. From bb875303c29d675c0934bfae026642a8c2a29358 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 17 Mar 2018 14:42:08 +0100 Subject: [PATCH 386/515] Add NewErrorWithCause() --- common/errors.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/common/errors.go b/common/errors.go index 4a15d0ee6..4e2591a9f 100644 --- a/common/errors.go +++ b/common/errors.go @@ -16,7 +16,11 @@ type Error interface { } func NewError(msg string) Error { - return newError(msg) + return newError(msg, nil) +} + +func NewErrorWithCause(cause error, msg string) Error { + return newError(msg, cause) } type traceItem struct { @@ -35,10 +39,10 @@ type cmnError struct { traces []traceItem } -func newError(msg string) *cmnError { +func newError(msg string, cause error) *cmnError { return &cmnError{ msg: msg, - cause: nil, + cause: cause, traces: nil, } } From 99437a96fb1a074f9bce622cfcfab87a8f41313f Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 17 Mar 2018 15:23:03 +0100 Subject: [PATCH 387/515] Add efficient implementation of fmt and use for errors. --- common/errors.go | 18 ++++++++++++------ common/string.go | 10 ++++++++-- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/common/errors.go b/common/errors.go index 4e2591a9f..4becd0446 100644 --- a/common/errors.go +++ b/common/errors.go @@ -10,16 +10,19 @@ import ( type Error interface { Error() string - Trace(msg string) Error - TraceCause(cause error, msg string) Error + Trace(format string, a ...interface{}) Error + TraceCause(cause error, format string, a ...interface{}) Error Cause() error } -func NewError(msg string) Error { +func NewError(format string, a ...interface{}) Error { + msg := Fmt(format, a...) return newError(msg, nil) + } -func NewErrorWithCause(cause error, msg string) Error { +func NewErrorWithCause(cause error, format string, a ...interface{}) Error { + msg := Fmt(format, a...) return newError(msg, cause) } @@ -39,6 +42,7 @@ type cmnError struct { traces []traceItem } +// NOTE: Do not expose, it's not very friendly. func newError(msg string, cause error) *cmnError { return &cmnError{ msg: msg, @@ -52,13 +56,15 @@ func (err *cmnError) Error() string { } // Add tracing information with msg. -func (err *cmnError) Trace(msg string) Error { +func (err *cmnError) Trace(format string, a ...interface{}) Error { + msg := Fmt(format, a...) return err.doTrace(msg, 2) } // Add tracing information with cause and msg. // If a cause was already set before, it is overwritten. -func (err *cmnError) TraceCause(cause error, msg string) Error { +func (err *cmnError) TraceCause(cause error, format string, a ...interface{}) Error { + msg := Fmt(format, a...) err.cause = cause return err.doTrace(msg, 2) } diff --git a/common/string.go b/common/string.go index a6895eb25..64484921f 100644 --- a/common/string.go +++ b/common/string.go @@ -6,8 +6,14 @@ import ( "strings" ) -// Fmt shorthand, XXX DEPRECATED -var Fmt = fmt.Sprintf +// Like fmt.Sprintf, but skips formatting if args are empty. +var Fmt = func(format string, a ...interface{}) string { + if len(a) == 0 { + return format + } else { + return fmt.Sprintf(format, a...) + } +} // RightPadString adds spaces to the right of a string to make it length totalLength func RightPadString(s string, totalLength int) string { From 9b9a9e7f8c73f6d0ae8672438a271f32060eebcb Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 17 Mar 2018 16:32:49 +0100 Subject: [PATCH 388/515] Add Error Type for switching --- common/errors.go | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/common/errors.go b/common/errors.go index 4becd0446..8e4b02283 100644 --- a/common/errors.go +++ b/common/errors.go @@ -13,17 +13,27 @@ type Error interface { Trace(format string, a ...interface{}) Error TraceCause(cause error, format string, a ...interface{}) Error Cause() error + Type() interface{} + WithType(t interface{}) Error } +// New Error with no cause where the type is the format string of the message.. func NewError(format string, a ...interface{}) Error { msg := Fmt(format, a...) - return newError(msg, nil) + return newError(msg, nil, format) } +// New Error with cause where the type is the cause, with message.. func NewErrorWithCause(cause error, format string, a ...interface{}) Error { msg := Fmt(format, a...) - return newError(msg, cause) + return newError(msg, cause, cause) +} + +// New Error with specified type and message. +func NewErrorWithType(type_ interface{}, format string, a ...interface{}) Error { + msg := Fmt(format, a...) + return newError(msg, nil, type_) } type traceItem struct { @@ -39,20 +49,22 @@ func (ti traceItem) String() string { type cmnError struct { msg string cause error + type_ interface{} traces []traceItem } // NOTE: Do not expose, it's not very friendly. -func newError(msg string, cause error) *cmnError { +func newError(msg string, cause error, type_ interface{}) *cmnError { return &cmnError{ msg: msg, cause: cause, + type_: type_, traces: nil, } } func (err *cmnError) Error() string { - return fmt.Sprintf("Error{%s,%v,%v}", err.msg, err.cause, len(err.traces)) + return fmt.Sprintf("Error{%v:%s,%v,%v}", err.type_, err.msg, err.cause, len(err.traces)) } // Add tracing information with msg. @@ -69,6 +81,18 @@ func (err *cmnError) TraceCause(cause error, format string, a ...interface{}) Er return err.doTrace(msg, 2) } +// Return the "type" of this message, primarily for switching +// to handle this error. +func (err *cmnError) Type() interface{} { + return err.type_ +} + +// Overwrites the error's type. +func (err *cmnError) WithType(type_ interface{}) Error { + err.type_ = type_ + return err +} + func (err *cmnError) doTrace(msg string, n int) Error { _, fn, line, ok := runtime.Caller(n) if !ok { From 34125870360c5f0b8d585f90c97ef8acbb66d647 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 18 Mar 2018 01:50:15 +0100 Subject: [PATCH 389/515] Fix race condition in random.go --- common/random.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/common/random.go b/common/random.go index a2237487b..af531992e 100644 --- a/common/random.go +++ b/common/random.go @@ -165,7 +165,7 @@ MAIN_LOOP: // It is not safe for cryptographic usage. func (r *Rand) Uint16() uint16 { - return uint16(r.rand.Uint32() & (1<<16 - 1)) + return uint16(r.Uint32() & (1<<16 - 1)) } // It is not safe for cryptographic usage. @@ -178,7 +178,7 @@ func (r *Rand) Uint32() uint32 { // It is not safe for cryptographic usage. func (r *Rand) Uint64() uint64 { - return uint64(r.rand.Uint32())<<32 + uint64(r.rand.Uint32()) + return uint64(r.Uint32())<<32 + uint64(r.Uint32()) } // It is not safe for cryptographic usage. @@ -191,17 +191,17 @@ func (r *Rand) Uint() uint { // It is not safe for cryptographic usage. func (r *Rand) Int16() int16 { - return int16(r.rand.Uint32() & (1<<16 - 1)) + return int16(r.Uint32() & (1<<16 - 1)) } // It is not safe for cryptographic usage. func (r *Rand) Int32() int32 { - return int32(r.rand.Uint32()) + return int32(r.Uint32()) } // It is not safe for cryptographic usage. func (r *Rand) Int64() int64 { - return int64(r.rand.Uint64()) + return int64(r.Uint64()) } // It is not safe for cryptographic usage. @@ -231,36 +231,36 @@ func (r *Rand) Int63() int64 { // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. func (r *Rand) Uint16Exp() uint16 { - bits := r.rand.Uint32() % 16 + bits := r.Uint32() % 16 if bits == 0 { return 0 } n := uint16(1 << (bits - 1)) - n += uint16(r.rand.Int31()) & ((1 << (bits - 1)) - 1) + n += uint16(r.Int31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. func (r *Rand) Uint32Exp() uint32 { - bits := r.rand.Uint32() % 32 + bits := r.Uint32() % 32 if bits == 0 { return 0 } n := uint32(1 << (bits - 1)) - n += uint32(r.rand.Int31()) & ((1 << (bits - 1)) - 1) + n += uint32(r.Int31()) & ((1 << (bits - 1)) - 1) return n } // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. func (r *Rand) Uint64Exp() uint64 { - bits := r.rand.Uint32() % 64 + bits := r.Uint32() % 64 if bits == 0 { return 0 } n := uint64(1 << (bits - 1)) - n += uint64(r.rand.Int63()) & ((1 << (bits - 1)) - 1) + n += uint64(r.Int63()) & ((1 << (bits - 1)) - 1) return n } @@ -284,7 +284,7 @@ func (r *Rand) Bytes(n int) []byte { // use random bytes generated from the internal PRNG bs := make([]byte, n) for i := 0; i < len(bs); i++ { - bs[i] = byte(r.rand.Int() & 0xFF) + bs[i] = byte(r.Int() & 0xFF) } return bs } From b0e0dc5de387ccdf96cb4c3e0637f07c73e5e9e0 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 18 Mar 2018 01:52:28 +0100 Subject: [PATCH 390/515] Implement DebugDB (#166) --- db/common_test.go | 6 +- db/db_test.go | 12 ++- db/debug_db.go | 216 ++++++++++++++++++++++++++++++++++++++++++ db/mem_batch.go | 7 +- db/prefix_db.go | 10 +- merkle/simple_tree.go | 3 + 6 files changed, 239 insertions(+), 15 deletions(-) create mode 100644 db/debug_db.go diff --git a/db/common_test.go b/db/common_test.go index 5afec28b3..7f9d10e9b 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -107,11 +107,11 @@ func (mdb *mockDB) SetNoLockSync([]byte, []byte) { mdb.calls["SetNoLockSync"] += 1 } -func (mdb *mockDB) Delete([]byte, []byte) { +func (mdb *mockDB) Delete([]byte) { mdb.calls["Delete"] += 1 } -func (mdb *mockDB) DeleteSync([]byte, []byte) { +func (mdb *mockDB) DeleteSync([]byte) { mdb.calls["DeleteSync"] += 1 } @@ -144,7 +144,7 @@ func (mdb *mockDB) NewBatch() Batch { func (mdb *mockDB) Print() { mdb.calls["Print"] += 1 - fmt.Sprintf("mockDB{%v}", mdb.Stats()) + fmt.Printf("mockDB{%v}", mdb.Stats()) } func (mdb *mockDB) Stats() map[string]string { diff --git a/db/db_test.go b/db/db_test.go index 8884cea2d..3d6ac38c4 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -111,7 +111,8 @@ func TestDBIteratorNonemptyBeginAfter(t *testing.T) { func TestDBBatchWrite1(t *testing.T) { mdb := newMockDB() - batch := mdb.NewBatch() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() batch.Set(bz("1"), bz("1")) batch.Set(bz("2"), bz("2")) @@ -131,7 +132,8 @@ func TestDBBatchWrite1(t *testing.T) { func TestDBBatchWrite2(t *testing.T) { mdb := newMockDB() - batch := mdb.NewBatch() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() batch.Set(bz("1"), bz("1")) batch.Set(bz("2"), bz("2")) @@ -151,7 +153,8 @@ func TestDBBatchWrite2(t *testing.T) { func TestDBBatchWriteSync1(t *testing.T) { mdb := newMockDB() - batch := mdb.NewBatch() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() batch.Set(bz("1"), bz("1")) batch.Set(bz("2"), bz("2")) @@ -171,7 +174,8 @@ func TestDBBatchWriteSync1(t *testing.T) { func TestDBBatchWriteSync2(t *testing.T) { mdb := newMockDB() - batch := mdb.NewBatch() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() batch.Set(bz("1"), bz("1")) batch.Set(bz("2"), bz("2")) diff --git a/db/debug_db.go b/db/debug_db.go new file mode 100644 index 000000000..7a15bc294 --- /dev/null +++ b/db/debug_db.go @@ -0,0 +1,216 @@ +package db + +import ( + "fmt" + "sync" +) + +//---------------------------------------- +// debugDB + +type debugDB struct { + label string + db DB +} + +// For printing all operationgs to the console for debugging. +func NewDebugDB(label string, db DB) debugDB { + return debugDB{ + label: label, + db: db, + } +} + +// Implements atomicSetDeleter. +func (ddb debugDB) Mutex() *sync.Mutex { return nil } + +// Implements DB. +func (ddb debugDB) Get(key []byte) (value []byte) { + defer fmt.Printf("%v.Get(%X) %X\n", ddb.label, key, value) + value = ddb.db.Get(key) + return +} + +// Implements DB. +func (ddb debugDB) Has(key []byte) (has bool) { + defer fmt.Printf("%v.Has(%X) %v\n", ddb.label, key, has) + return ddb.db.Has(key) +} + +// Implements DB. +func (ddb debugDB) Set(key []byte, value []byte) { + fmt.Printf("%v.Set(%X, %X)\n", ddb.label, key, value) + ddb.db.Set(key, value) +} + +// Implements DB. +func (ddb debugDB) SetSync(key []byte, value []byte) { + fmt.Printf("%v.SetSync(%X, %X)\n", ddb.label, key, value) + ddb.db.SetSync(key, value) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) SetNoLock(key []byte, value []byte) { + fmt.Printf("%v.SetNoLock(%X, %X)\n", ddb.label, key, value) + ddb.db.Set(key, value) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) SetNoLockSync(key []byte, value []byte) { + fmt.Printf("%v.SetNoLockSync(%X, %X)\n", ddb.label, key, value) + ddb.db.SetSync(key, value) +} + +// Implements DB. +func (ddb debugDB) Delete(key []byte) { + fmt.Printf("%v.Delete(%X)\n", ddb.label, key) + ddb.db.Delete(key) +} + +// Implements DB. +func (ddb debugDB) DeleteSync(key []byte) { + fmt.Printf("%v.DeleteSync(%X)\n", ddb.label, key) + ddb.db.DeleteSync(key) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) DeleteNoLock(key []byte) { + fmt.Printf("%v.DeleteNoLock(%X)\n", ddb.label, key) + ddb.db.Delete(key) +} + +// Implements atomicSetDeleter. +func (ddb debugDB) DeleteNoLockSync(key []byte) { + fmt.Printf("%v.DeleteNoLockSync(%X)\n", ddb.label, key) + ddb.db.DeleteSync(key) +} + +// Implements DB. +func (ddb debugDB) Iterator(start, end []byte) Iterator { + fmt.Printf("%v.Iterator(%X, %X)\n", ddb.label, start, end) + return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end)) +} + +// Implements DB. +func (ddb debugDB) ReverseIterator(start, end []byte) Iterator { + fmt.Printf("%v.ReverseIterator(%X, %X)\n", ddb.label, start, end) + return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end)) +} + +// Implements DB. +func (ddb debugDB) NewBatch() Batch { + fmt.Printf("%v.NewBatch()\n", ddb.label) + return NewDebugBatch(ddb.label, ddb.db.NewBatch()) +} + +// Implements DB. +func (ddb debugDB) Close() { + fmt.Printf("%v.Close()\n", ddb.label) + ddb.db.Close() +} + +// Implements DB. +func (ddb debugDB) Print() { + ddb.db.Print() +} + +// Implements DB. +func (ddb debugDB) Stats() map[string]string { + return ddb.db.Stats() +} + +//---------------------------------------- +// debugIterator + +type debugIterator struct { + label string + itr Iterator +} + +// For printing all operationgs to the console for debugging. +func NewDebugIterator(label string, itr Iterator) debugIterator { + return debugIterator{ + label: label, + itr: itr, + } +} + +// Implements Iterator. +func (ditr debugIterator) Domain() (start []byte, end []byte) { + defer fmt.Printf("%v.itr.Domain() (%X,%X)\n", ditr.label, start, end) + start, end = ditr.itr.Domain() + return +} + +// Implements Iterator. +func (ditr debugIterator) Valid() (ok bool) { + defer fmt.Printf("%v.itr.Valid() %v\n", ditr.label, ok) + ok = ditr.itr.Valid() + return +} + +// Implements Iterator. +func (ditr debugIterator) Next() { + fmt.Printf("%v.itr.Next()\n", ditr.label) + ditr.itr.Next() +} + +// Implements Iterator. +func (ditr debugIterator) Key() (key []byte) { + fmt.Printf("%v.itr.Key() %X\n", ditr.label, key) + key = ditr.itr.Key() + return +} + +// Implements Iterator. +func (ditr debugIterator) Value() (value []byte) { + fmt.Printf("%v.itr.Value() %X\n", ditr.label, value) + value = ditr.itr.Value() + return +} + +// Implements Iterator. +func (ditr debugIterator) Close() { + fmt.Printf("%v.itr.Close()\n", ditr.label) + ditr.itr.Close() +} + +//---------------------------------------- +// debugBatch + +type debugBatch struct { + label string + bch Batch +} + +// For printing all operationgs to the console for debugging. +func NewDebugBatch(label string, bch Batch) debugBatch { + return debugBatch{ + label: label, + bch: bch, + } +} + +// Implements Batch. +func (dbch debugBatch) Set(key, value []byte) { + fmt.Printf("%v.batch.Set(%X, %X)\n", dbch.label, key, value) + dbch.bch.Set(key, value) +} + +// Implements Batch. +func (dbch debugBatch) Delete(key []byte) { + fmt.Printf("%v.batch.Delete(%X)\n", dbch.label, key) + dbch.bch.Delete(key) +} + +// Implements Batch. +func (dbch debugBatch) Write() { + fmt.Printf("%v.batch.Write()\n", dbch.label) + dbch.bch.Write() +} + +// Implements Batch. +func (dbch debugBatch) WriteSync() { + fmt.Printf("%v.batch.WriteSync()\n", dbch.label) + dbch.bch.WriteSync() +} diff --git a/db/mem_batch.go b/db/mem_batch.go index 756798ded..81a63d62b 100644 --- a/db/mem_batch.go +++ b/db/mem_batch.go @@ -45,9 +45,10 @@ func (mBatch *memBatch) WriteSync() { } func (mBatch *memBatch) write(doSync bool) { - mtx := mBatch.db.Mutex() - mtx.Lock() - defer mtx.Unlock() + if mtx := mBatch.db.Mutex(); mtx != nil { + mtx.Lock() + defer mtx.Unlock() + } for i, op := range mBatch.ops { if doSync && i == (len(mBatch.ops)-1) { diff --git a/db/prefix_db.go b/db/prefix_db.go index 5947e7fce..4381ce070 100644 --- a/db/prefix_db.go +++ b/db/prefix_db.go @@ -123,10 +123,10 @@ func (pdb *prefixDB) Iterator(start, end []byte) Iterator { pdb.mtx.Lock() defer pdb.mtx.Unlock() - pstart := append([]byte(pdb.prefix), start...) + pstart := append(pdb.prefix, start...) pend := []byte(nil) if end != nil { - pend = append([]byte(pdb.prefix), end...) + pend = append(pdb.prefix, end...) } return newUnprefixIterator( pdb.prefix, @@ -144,11 +144,11 @@ func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator { pstart := []byte(nil) if start != nil { - pstart = append([]byte(pdb.prefix), start...) + pstart = append(pdb.prefix, start...) } pend := []byte(nil) if end != nil { - pend = append([]byte(pdb.prefix), end...) + pend = append(pdb.prefix, end...) } return newUnprefixIterator( pdb.prefix, @@ -201,7 +201,7 @@ func (pdb *prefixDB) Stats() map[string]string { } func (pdb *prefixDB) prefixed(key []byte) []byte { - return append([]byte(pdb.prefix), key...) + return append(pdb.prefix, key...) } //---------------------------------------- diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index a363ea8e8..9bdf52cb2 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -31,6 +31,9 @@ import ( func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { var hasher = ripemd160.New() err := encodeByteSlice(hasher, left) + if err != nil { + panic(err) + } err = encodeByteSlice(hasher, right) if err != nil { panic(err) From b1c9b825311d642be437397759e0e93d7f368cf5 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sun, 18 Mar 2018 04:17:11 -0700 Subject: [PATCH 391/515] common: NewBitArray never crashes on negatives (#170) Fixes #169 Fixes https://github.com/tendermint/tendermint/issues/1322 The previous code was very trusting assuming that rational actors will use this code. However, Byzantine actors don't care and in the case of the linked issue negative lengths can be sent to this code unfettered having been received from a peer. This code is essentially just a sign change from `==` to `<=` and we've gutted out that attack by being more defensive. --- common/bit_array.go | 2 +- common/bit_array_test.go | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/common/bit_array.go b/common/bit_array.go index 7cc84705e..a3a87ccab 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -15,7 +15,7 @@ type BitArray struct { // There is no BitArray whose Size is 0. Use nil instead. func NewBitArray(bits int) *BitArray { - if bits == 0 { + if bits <= 0 { return nil } return &BitArray{ diff --git a/common/bit_array_test.go b/common/bit_array_test.go index 94a312b7e..fbc438cd1 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -208,3 +208,10 @@ func TestUpdateNeverPanics(t *testing.T) { b.Update(a) } } + +func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { + bitList := []int{-127, -128, -1<<31} + for _, bits := range bitList { + _ = NewBitArray(bits) + } +} From 4b0058dd6422eef7b8939fead2423aa692fecd2c Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sun, 18 Mar 2018 04:19:23 -0700 Subject: [PATCH 392/515] common: remove {Left, Right}PadString (#168) Fixes #134 Those functions are unused in the whole Tendermint Github organization plus they were unnecessariy verbose and could have been concisely replaced with ```go func RightPadString(s string, totalLength uint) string { return fmt.Sprintf("% *s", totalLength, s) } func LeftPadString(s string, totalLength uint) string { return fmt.Sprintf("% -*s", totalLength, s) } ``` delete them anyways --- common/string.go | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/common/string.go b/common/string.go index 64484921f..ae140c9f4 100644 --- a/common/string.go +++ b/common/string.go @@ -15,24 +15,6 @@ var Fmt = func(format string, a ...interface{}) string { } } -// RightPadString adds spaces to the right of a string to make it length totalLength -func RightPadString(s string, totalLength int) string { - remaining := totalLength - len(s) - if remaining > 0 { - s = s + strings.Repeat(" ", remaining) - } - return s -} - -// LeftPadString adds spaces to the left of a string to make it length totalLength -func LeftPadString(s string, totalLength int) string { - remaining := totalLength - len(s) - if remaining > 0 { - s = strings.Repeat(" ", remaining) + s - } - return s -} - // IsHex returns true for non-empty hex-string prefixed with "0x" func IsHex(s string) bool { if len(s) > 2 && strings.EqualFold(s[:2], "0x") { From bb65f097fdb0f0ba9652bb65682676243b255aea Mon Sep 17 00:00:00 2001 From: Alexander Simmerl Date: Mon, 19 Mar 2018 09:38:28 +0100 Subject: [PATCH 393/515] Simplify WriteFileAtomic We can make the implementation more robust by adjusting our assumptions and leverage explicit file modes for syncing. Additionally we going to assume that we want to clean up and can't really recover if thos operations (file close and removal) fail. * utilise file mode for majority of concerns * improve test coverage by covering more assumptions * signature parity with ioutil.WriteFile * always clean up Replaces #160 --- common/os.go | 44 ++++++++++++++++++++++---------------------- common/os_test.go | 34 ++++++++++++++++++++++++++++------ 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/common/os.go b/common/os.go index 36fc969fa..f1e07115c 100644 --- a/common/os.go +++ b/common/os.go @@ -124,32 +124,32 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { } } -// WriteFileAtomic writes newBytes to temp and atomically moves to filePath -// when everything else succeeds. -func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { - dir := filepath.Dir(filePath) - f, err := ioutil.TempFile(dir, "") +// WriteFileAtomic creates a temporary file with data and the perm given and +// swaps it atomically with filename if successful. +func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { + var ( + dir = filepath.Dir(filename) + tempFile = filepath.Join(dir, "write-file-atomic-"+RandStr(32)) + // Override in case it does exist, create in case it doesn't and force kernel + // flush, which still leaves the potential of lingering disk cache. + flag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC + ) + + f, err := os.OpenFile(tempFile, flag, perm) if err != nil { return err } - _, err = f.Write(newBytes) - if err == nil { - err = f.Sync() - } - if closeErr := f.Close(); err == nil { - err = closeErr - } - if permErr := os.Chmod(f.Name(), mode); err == nil { - err = permErr - } - if err == nil { - err = os.Rename(f.Name(), filePath) - } - // any err should result in full cleanup - if err != nil { - os.Remove(f.Name()) + // Clean up in any case. Defer stacking order is last-in-first-out. + defer os.Remove(f.Name()) + defer f.Close() + + if n, err := f.Write(data); err != nil { + return err + } else if n < len(data) { + return io.ErrShortWrite } - return err + + return os.Rename(f.Name(), filename) } //-------------------------------------------------------------------------------- diff --git a/common/os_test.go b/common/os_test.go index 126723aa6..97ad672b5 100644 --- a/common/os_test.go +++ b/common/os_test.go @@ -2,30 +2,52 @@ package common import ( "bytes" - "fmt" "io/ioutil" + "math/rand" "os" "testing" "time" ) func TestWriteFileAtomic(t *testing.T) { - data := []byte("Becatron") - fname := fmt.Sprintf("/tmp/write-file-atomic-test-%v.txt", time.Now().UnixNano()) - err := WriteFileAtomic(fname, data, 0664) + var ( + seed = rand.New(rand.NewSource(time.Now().UnixNano())) + data = []byte(RandStr(seed.Intn(2048))) + old = RandBytes(seed.Intn(2048)) + perm os.FileMode = 0600 + ) + + f, err := ioutil.TempFile("/tmp", "write-atomic-test-") if err != nil { t.Fatal(err) } - rData, err := ioutil.ReadFile(fname) + defer os.Remove(f.Name()) + + if err := ioutil.WriteFile(f.Name(), old, 0664); err != nil { + t.Fatal(err) + } + + if err := WriteFileAtomic(f.Name(), data, perm); err != nil { + t.Fatal(err) + } + + rData, err := ioutil.ReadFile(f.Name()) if err != nil { t.Fatal(err) } + if !bytes.Equal(data, rData) { t.Fatalf("data mismatch: %v != %v", data, rData) } - if err := os.Remove(fname); err != nil { + + stat, err := os.Stat(f.Name()) + if err != nil { t.Fatal(err) } + + if have, want := stat.Mode().Perm(), perm; have != want { + t.Errorf("have %v, want %v", have, want) + } } func TestGoPath(t *testing.T) { From bf24f2dcc529772dbb943043eca319441108796c Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 20 Mar 2018 19:24:18 +0100 Subject: [PATCH 394/515] Implement better Parallel (#174) * Implement better Parallel --- common/async.go | 67 ++++++++++++++++---- common/async_test.go | 145 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 201 insertions(+), 11 deletions(-) create mode 100644 common/async_test.go diff --git a/common/async.go b/common/async.go index 1d302c344..23d1a42b3 100644 --- a/common/async.go +++ b/common/async.go @@ -1,15 +1,60 @@ package common -import "sync" - -func Parallel(tasks ...func()) { - var wg sync.WaitGroup - wg.Add(len(tasks)) - for _, task := range tasks { - go func(task func()) { - task() - wg.Done() - }(task) +// val: the value returned after task execution. +// err: the error returned during task completion. +// abort: tells Parallel to return, whether or not all tasks have completed. +type Task func(i int) (val interface{}, err error, abort bool) + +type TaskResult struct { + Value interface{} + Error error + Panic interface{} +} + +type TaskResultCh <-chan TaskResult + +// Run tasks in parallel, with ability to abort early. +// NOTE: Do not implement quit features here. Instead, provide convenient +// concurrent quit-like primitives, passed implicitly via Task closures. (e.g. +// it's not Parallel's concern how you quit/abort your tasks). +func Parallel(tasks ...Task) []TaskResultCh { + var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. + var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. + + // Start all tasks in parallel in separate goroutines. + // When the task is complete, it will appear in the + // respective taskResultCh (associated by task index). + for i, task := range tasks { + var taskResultCh = make(chan TaskResult, 1) // Capacity for 1 result. + taskResultChz[i] = taskResultCh + go func(i int, task Task, taskResultCh chan TaskResult) { + // Recovery + defer func() { + if pnk := recover(); pnk != nil { + taskResultCh <- TaskResult{nil, nil, pnk} + taskDoneCh <- false + } + }() + // Run the task. + var val, err, abort = task(i) + // Send val/err to taskResultCh. + // NOTE: Below this line, nothing must panic/ + taskResultCh <- TaskResult{val, err, nil} + // Decrement waitgroup. + taskDoneCh <- abort + }(i, task, taskResultCh) + } + + // Wait until all tasks are done, or until abort. + for i := 0; i < len(tasks); i++ { + abort := <-taskDoneCh + if abort { + break + } } - wg.Wait() + + // Caller can use this however they want. + // TODO: implement convenience functions to + // make sense of this structure safely. + return taskResultChz } diff --git a/common/async_test.go b/common/async_test.go new file mode 100644 index 000000000..1d6b0e7b0 --- /dev/null +++ b/common/async_test.go @@ -0,0 +1,145 @@ +package common + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestParallel(t *testing.T) { + + // Create tasks. + var counter = new(int32) + var tasks = make([]Task, 100*1000) + for i := 0; i < len(tasks); i++ { + tasks[i] = func(i int) (res interface{}, err error, abort bool) { + atomic.AddInt32(counter, 1) + return -1 * i, nil, false + } + } + + // Run in parallel. + var taskResultChz = Parallel(tasks...) + + // Verify. + assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") + var failedTasks int + for i := 0; i < len(tasks); i++ { + select { + case taskResult := <-taskResultChz[i]: + if taskResult.Error != nil { + assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) + failedTasks += 1 + } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { + failedTasks += 1 + } else { + // Good! + } + default: + failedTasks += 1 + } + } + assert.Equal(t, failedTasks, 0, "No task should have failed") + +} + +func TestParallelAbort(t *testing.T) { + + var flow1 = make(chan struct{}, 1) + var flow2 = make(chan struct{}, 1) + var flow3 = make(chan struct{}, 1) // Cap must be > 0 to prevent blocking. + var flow4 = make(chan struct{}, 1) + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 0) + flow1 <- struct{}{} + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 1) + flow2 <- <-flow1 + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 2) + flow3 <- <-flow2 + return 2, nil, true + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 3) + <-flow4 + return 3, nil, false + }, + } + + // Run in parallel. + var taskResultChz = Parallel(tasks...) + + // Verify task #3. + // Initially taskResultCh[3] sends nothing since flow4 didn't send. + waitTimeout(t, taskResultChz[3], "Task #3") + + // Now let the last task (#3) complete after abort. + flow4 <- <-flow3 + + // Verify task #0, #1, #2. + waitFor(t, taskResultChz[0], "Task #0", 0, nil, nil) + waitFor(t, taskResultChz[1], "Task #1", 1, errors.New("some error"), nil) + waitFor(t, taskResultChz[2], "Task #2", 2, nil, nil) +} + +func TestParallelRecover(t *testing.T) { + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + panic(2) + }, + } + + // Run in parallel. + var taskResultChz = Parallel(tasks...) + + // Verify task #0, #1, #2. + waitFor(t, taskResultChz[0], "Task #0", 0, nil, nil) + waitFor(t, taskResultChz[1], "Task #1", 1, errors.New("some error"), nil) + waitFor(t, taskResultChz[2], "Task #2", nil, nil, 2) +} + +// Wait for result +func waitFor(t *testing.T, taskResultCh TaskResultCh, taskName string, val interface{}, err error, pnk interface{}) { + select { + case taskResult, ok := <-taskResultCh: + assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) + assert.Equal(t, val, taskResult.Value, taskName) + assert.Equal(t, err, taskResult.Error, taskName) + assert.Equal(t, pnk, taskResult.Panic, taskName) + default: + assert.Fail(t, "Failed to receive result for %v", taskName) + } +} + +// Wait for timeout (no result) +func waitTimeout(t *testing.T, taskResultCh TaskResultCh, taskName string) { + select { + case _, ok := <-taskResultCh: + if !ok { + assert.Fail(t, "TaskResultCh unexpectedly closed (%v)", taskName) + } else { + assert.Fail(t, "TaskResultCh unexpectedly returned for %v", taskName) + } + case <-time.After(1 * time.Second): // TODO use deterministic time? + // Good! + } +} From db48010e813ca5b527ef6081c00fb11e325eedb0 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 20 Mar 2018 19:58:05 +0100 Subject: [PATCH 395/515] Add return parameter to Parallel --- common/async.go | 18 ++++++++++++++++-- common/async_test.go | 9 ++++++--- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/common/async.go b/common/async.go index 23d1a42b3..e7bc71b1a 100644 --- a/common/async.go +++ b/common/async.go @@ -1,5 +1,9 @@ package common +import ( + "sync/atomic" +) + // val: the value returned after task execution. // err: the error returned during task completion. // abort: tells Parallel to return, whether or not all tasks have completed. @@ -14,12 +18,15 @@ type TaskResult struct { type TaskResultCh <-chan TaskResult // Run tasks in parallel, with ability to abort early. +// Returns ok=false iff any of the tasks returned abort=true. // NOTE: Do not implement quit features here. Instead, provide convenient // concurrent quit-like primitives, passed implicitly via Task closures. (e.g. // it's not Parallel's concern how you quit/abort your tasks). -func Parallel(tasks ...Task) []TaskResultCh { +func Parallel(tasks ...Task) (chz []TaskResultCh, ok bool) { var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. + var numPanics = new(int32) // Keep track of panics to set ok=false later. + ok = true // We will set it to false iff any tasks panic'd or returned abort. // Start all tasks in parallel in separate goroutines. // When the task is complete, it will appear in the @@ -31,6 +38,7 @@ func Parallel(tasks ...Task) []TaskResultCh { // Recovery defer func() { if pnk := recover(); pnk != nil { + atomic.AddInt32(numPanics, 1) taskResultCh <- TaskResult{nil, nil, pnk} taskDoneCh <- false } @@ -46,15 +54,21 @@ func Parallel(tasks ...Task) []TaskResultCh { } // Wait until all tasks are done, or until abort. + // DONE_LOOP: for i := 0; i < len(tasks); i++ { abort := <-taskDoneCh if abort { + ok = false break } } + // Ok is also false if there were any panics. + // We must do this check here (after DONE_LOOP). + ok = ok && (atomic.LoadInt32(numPanics) == 0) + // Caller can use this however they want. // TODO: implement convenience functions to // make sense of this structure safely. - return taskResultChz + return taskResultChz, ok } diff --git a/common/async_test.go b/common/async_test.go index 1d6b0e7b0..f2a83d56d 100644 --- a/common/async_test.go +++ b/common/async_test.go @@ -22,7 +22,8 @@ func TestParallel(t *testing.T) { } // Run in parallel. - var taskResultChz = Parallel(tasks...) + var taskResultChz, ok = Parallel(tasks...) + assert.True(t, ok) // Verify. assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") @@ -78,7 +79,8 @@ func TestParallelAbort(t *testing.T) { } // Run in parallel. - var taskResultChz = Parallel(tasks...) + var taskResultChz, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we aborted task #2.") // Verify task #3. // Initially taskResultCh[3] sends nothing since flow4 didn't send. @@ -109,7 +111,8 @@ func TestParallelRecover(t *testing.T) { } // Run in parallel. - var taskResultChz = Parallel(tasks...) + var taskResultChz, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we panic'd in task #2.") // Verify task #0, #1, #2. waitFor(t, taskResultChz[0], "Task #0", 0, nil, nil) From 4caf943f49759c5429a1228e390330e622a43738 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 20 Mar 2018 21:43:58 +0100 Subject: [PATCH 396/515] Parallel returns a TaskResultSet --- common/async.go | 95 +++++++++++++++++++++++++++++++++++++++++--- common/async_test.go | 24 +++++------ 2 files changed, 102 insertions(+), 17 deletions(-) diff --git a/common/async.go b/common/async.go index e7bc71b1a..64e320760 100644 --- a/common/async.go +++ b/common/async.go @@ -4,6 +4,9 @@ import ( "sync/atomic" ) +//---------------------------------------- +// Task + // val: the value returned after task execution. // err: the error returned during task completion. // abort: tells Parallel to return, whether or not all tasks have completed. @@ -17,12 +20,97 @@ type TaskResult struct { type TaskResultCh <-chan TaskResult +type taskResultOK struct { + TaskResult + OK bool +} + +type TaskResultSet struct { + chz []TaskResultCh + results []taskResultOK +} + +func newTaskResultSet(chz []TaskResultCh) *TaskResultSet { + return &TaskResultSet{ + chz: chz, + results: nil, + } +} + +func (trs *TaskResultSet) Channels() []TaskResultCh { + return trs.chz +} + +func (trs *TaskResultSet) LastResult(index int) (TaskResult, bool) { + if len(trs.results) <= index { + return TaskResult{}, false + } + resultOK := trs.results[index] + return resultOK.TaskResult, resultOK.OK +} + +// NOTE: Not concurrency safe. +func (trs *TaskResultSet) Reap() { + if trs.results == nil { + trs.results = make([]taskResultOK, len(trs.chz)) + } + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + select { + case result := <-trch: + // Overwrite result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + default: + // Do nothing. + } + } +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstValue() interface{} { + for _, result := range trs.results { + if result.Value != nil { + return result.Value + } + } + return nil +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstError() error { + for _, result := range trs.results { + if result.Error != nil { + return result.Error + } + } + return nil +} + +// Returns the firstmost (by task index) panic as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstPanic() interface{} { + for _, result := range trs.results { + if result.Panic != nil { + return result.Panic + } + } + return nil +} + +//---------------------------------------- +// Parallel + // Run tasks in parallel, with ability to abort early. // Returns ok=false iff any of the tasks returned abort=true. // NOTE: Do not implement quit features here. Instead, provide convenient // concurrent quit-like primitives, passed implicitly via Task closures. (e.g. // it's not Parallel's concern how you quit/abort your tasks). -func Parallel(tasks ...Task) (chz []TaskResultCh, ok bool) { +func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. var numPanics = new(int32) // Keep track of panics to set ok=false later. @@ -67,8 +155,5 @@ func Parallel(tasks ...Task) (chz []TaskResultCh, ok bool) { // We must do this check here (after DONE_LOOP). ok = ok && (atomic.LoadInt32(numPanics) == 0) - // Caller can use this however they want. - // TODO: implement convenience functions to - // make sense of this structure safely. - return taskResultChz, ok + return newTaskResultSet(taskResultChz), ok } diff --git a/common/async_test.go b/common/async_test.go index f2a83d56d..3b47c3fa2 100644 --- a/common/async_test.go +++ b/common/async_test.go @@ -22,7 +22,7 @@ func TestParallel(t *testing.T) { } // Run in parallel. - var taskResultChz, ok = Parallel(tasks...) + var trs, ok = Parallel(tasks...) assert.True(t, ok) // Verify. @@ -30,7 +30,7 @@ func TestParallel(t *testing.T) { var failedTasks int for i := 0; i < len(tasks); i++ { select { - case taskResult := <-taskResultChz[i]: + case taskResult := <-trs.chz[i]: if taskResult.Error != nil { assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) failedTasks += 1 @@ -79,20 +79,20 @@ func TestParallelAbort(t *testing.T) { } // Run in parallel. - var taskResultChz, ok = Parallel(tasks...) + var taskResultSet, ok = Parallel(tasks...) assert.False(t, ok, "ok should be false since we aborted task #2.") // Verify task #3. - // Initially taskResultCh[3] sends nothing since flow4 didn't send. - waitTimeout(t, taskResultChz[3], "Task #3") + // Initially taskResultSet.chz[3] sends nothing since flow4 didn't send. + waitTimeout(t, taskResultSet.chz[3], "Task #3") // Now let the last task (#3) complete after abort. flow4 <- <-flow3 // Verify task #0, #1, #2. - waitFor(t, taskResultChz[0], "Task #0", 0, nil, nil) - waitFor(t, taskResultChz[1], "Task #1", 1, errors.New("some error"), nil) - waitFor(t, taskResultChz[2], "Task #2", 2, nil, nil) + waitFor(t, taskResultSet.chz[0], "Task #0", 0, nil, nil) + waitFor(t, taskResultSet.chz[1], "Task #1", 1, errors.New("some error"), nil) + waitFor(t, taskResultSet.chz[2], "Task #2", 2, nil, nil) } func TestParallelRecover(t *testing.T) { @@ -111,13 +111,13 @@ func TestParallelRecover(t *testing.T) { } // Run in parallel. - var taskResultChz, ok = Parallel(tasks...) + var taskResultSet, ok = Parallel(tasks...) assert.False(t, ok, "ok should be false since we panic'd in task #2.") // Verify task #0, #1, #2. - waitFor(t, taskResultChz[0], "Task #0", 0, nil, nil) - waitFor(t, taskResultChz[1], "Task #1", 1, errors.New("some error"), nil) - waitFor(t, taskResultChz[2], "Task #2", nil, nil, 2) + waitFor(t, taskResultSet.chz[0], "Task #0", 0, nil, nil) + waitFor(t, taskResultSet.chz[1], "Task #1", 1, errors.New("some error"), nil) + waitFor(t, taskResultSet.chz[2], "Task #2", nil, nil, 2) } // Wait for result From 4e5c655944c9a636eaed549e6ad8fd8011fb4d42 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 20 Mar 2018 23:08:51 +0100 Subject: [PATCH 397/515] Parallel reaps automatically before returning --- common/async.go | 7 +++--- common/async_test.go | 59 +++++++++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/common/async.go b/common/async.go index 64e320760..31dc2e968 100644 --- a/common/async.go +++ b/common/async.go @@ -41,7 +41,7 @@ func (trs *TaskResultSet) Channels() []TaskResultCh { return trs.chz } -func (trs *TaskResultSet) LastResult(index int) (TaskResult, bool) { +func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) { if len(trs.results) <= index { return TaskResult{}, false } @@ -50,7 +50,7 @@ func (trs *TaskResultSet) LastResult(index int) (TaskResult, bool) { } // NOTE: Not concurrency safe. -func (trs *TaskResultSet) Reap() { +func (trs *TaskResultSet) Reap() *TaskResultSet { if trs.results == nil { trs.results = make([]taskResultOK, len(trs.chz)) } @@ -67,6 +67,7 @@ func (trs *TaskResultSet) Reap() { // Do nothing. } } + return trs } // Returns the firstmost (by task index) error as @@ -155,5 +156,5 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { // We must do this check here (after DONE_LOOP). ok = ok && (atomic.LoadInt32(numPanics) == 0) - return newTaskResultSet(taskResultChz), ok + return newTaskResultSet(taskResultChz).Reap(), ok } diff --git a/common/async_test.go b/common/async_test.go index 3b47c3fa2..2e8db26eb 100644 --- a/common/async_test.go +++ b/common/async_test.go @@ -2,6 +2,7 @@ package common import ( "errors" + "fmt" "sync/atomic" "testing" "time" @@ -29,22 +30,27 @@ func TestParallel(t *testing.T) { assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") var failedTasks int for i := 0; i < len(tasks); i++ { - select { - case taskResult := <-trs.chz[i]: - if taskResult.Error != nil { - assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) - failedTasks += 1 - } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { - failedTasks += 1 - } else { - // Good! - } - default: + taskResult, ok := trs.LatestResult(i) + if !ok { + assert.Fail(t, "Task #%v did not complete.", i) + failedTasks += 1 + } else if taskResult.Error != nil { + assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) + failedTasks += 1 + } else if taskResult.Panic != nil { + assert.Fail(t, "Task should not have panic'd but got %v", taskResult.Panic) + failedTasks += 1 + } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { + assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) failedTasks += 1 + } else { + // Good! } } assert.Equal(t, failedTasks, 0, "No task should have failed") - + assert.Nil(t, trs.FirstError(), "There should be no errors") + assert.Nil(t, trs.FirstPanic(), "There should be no panics") + assert.Equal(t, 0, trs.FirstValue(), "First value should be 0") } func TestParallelAbort(t *testing.T) { @@ -90,9 +96,9 @@ func TestParallelAbort(t *testing.T) { flow4 <- <-flow3 // Verify task #0, #1, #2. - waitFor(t, taskResultSet.chz[0], "Task #0", 0, nil, nil) - waitFor(t, taskResultSet.chz[1], "Task #1", 1, errors.New("some error"), nil) - waitFor(t, taskResultSet.chz[2], "Task #2", 2, nil, nil) + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, 2, nil, nil) } func TestParallelRecover(t *testing.T) { @@ -115,22 +121,19 @@ func TestParallelRecover(t *testing.T) { assert.False(t, ok, "ok should be false since we panic'd in task #2.") // Verify task #0, #1, #2. - waitFor(t, taskResultSet.chz[0], "Task #0", 0, nil, nil) - waitFor(t, taskResultSet.chz[1], "Task #1", 1, errors.New("some error"), nil) - waitFor(t, taskResultSet.chz[2], "Task #2", nil, nil, 2) + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, nil, nil, 2) } // Wait for result -func waitFor(t *testing.T, taskResultCh TaskResultCh, taskName string, val interface{}, err error, pnk interface{}) { - select { - case taskResult, ok := <-taskResultCh: - assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) - assert.Equal(t, val, taskResult.Value, taskName) - assert.Equal(t, err, taskResult.Error, taskName) - assert.Equal(t, pnk, taskResult.Panic, taskName) - default: - assert.Fail(t, "Failed to receive result for %v", taskName) - } +func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val interface{}, err error, pnk interface{}) { + taskResult, ok := taskResultSet.LatestResult(index) + taskName := fmt.Sprintf("Task #%v", index) + assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) + assert.Equal(t, val, taskResult.Value, taskName) + assert.Equal(t, err, taskResult.Error, taskName) + assert.Equal(t, pnk, taskResult.Panic, taskName) } // Wait for timeout (no result) From b6400af7ac9f1ac0d8c5a956ca0f21e05938f46f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 21 Mar 2018 05:15:30 +0100 Subject: [PATCH 398/515] update version, changelog --- CHANGELOG.md | 19 +++++++++++++++++++ version/version.go | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3a305b20..2fbf64873 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 0.7.1 (TBD) + +BREAKING: + + - [merkle] PutVarint->PutUvarint in encodeByteSlice + - [db] batch.WriteSync() + +FEATURES: + + - [db] NewPrefixDB for a DB with all keys prefixed + +IMPROVEMENTS: + + - glide -> dep + +BUG FIXES: + + + ## 0.7.0 (February 20, 2018) BREAKING: diff --git a/version/version.go b/version/version.go index 2c0474fa8..c683dd245 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.7.0" +const Version = "0.8.0-dev" From dc1042eb5f429665d7fd9dd2e2720e255c4e2e99 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 22 Mar 2018 13:55:55 -0400 Subject: [PATCH 399/515] finish changelog --- CHANGELOG.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fbf64873..7d3e789a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,15 +1,20 @@ # Changelog -## 0.7.1 (TBD) +## 0.8.0 (March 22, 2018) BREAKING: - - [merkle] PutVarint->PutUvarint in encodeByteSlice + - [merkle] `PutVarint->PutUvarint` in encodeByteSlice - [db] batch.WriteSync() + - [common] Refactored and fixed `Parallel` function + - [common] Refactored `Rand` functionality + - [common] Remove unused `Right/LeftPadString` functions FEATURES: - [db] NewPrefixDB for a DB with all keys prefixed + - [db] NewDebugDB prints everything during operation + - [common] Error interface (so we don't use pkg/errors) IMPROVEMENTS: @@ -17,7 +22,8 @@ IMPROVEMENTS: BUG FIXES: - + - [common] Fix panic in NewBitArray for negative bits + - [common] Fix and simplify WriteFileAtomic so it cleans up properly ## 0.7.0 (February 20, 2018) From 97bdad8262f3da14f17048dc11f095790e07fb02 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sun, 18 Mar 2018 04:17:11 -0700 Subject: [PATCH 400/515] common: NewBitArray never crashes on negatives (#170) Fixes #169 Fixes https://github.com/tendermint/tendermint/issues/1322 The previous code was very trusting assuming that rational actors will use this code. However, Byzantine actors don't care and in the case of the linked issue negative lengths can be sent to this code unfettered having been received from a peer. This code is essentially just a sign change from `==` to `<=` and we've gutted out that attack by being more defensive. --- common/bit_array.go | 2 +- common/bit_array_test.go | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/common/bit_array.go b/common/bit_array.go index 7cc84705e..a3a87ccab 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -15,7 +15,7 @@ type BitArray struct { // There is no BitArray whose Size is 0. Use nil instead. func NewBitArray(bits int) *BitArray { - if bits == 0 { + if bits <= 0 { return nil } return &BitArray{ diff --git a/common/bit_array_test.go b/common/bit_array_test.go index 94a312b7e..fbc438cd1 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -208,3 +208,10 @@ func TestUpdateNeverPanics(t *testing.T) { b.Update(a) } } + +func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { + bitList := []int{-127, -128, -1<<31} + for _, bits := range bitList { + _ = NewBitArray(bits) + } +} From d46b9afb792b1ee34a49c9f9eb5f9ca7c4ef00a3 Mon Sep 17 00:00:00 2001 From: Alexander Simmerl Date: Mon, 19 Mar 2018 09:38:28 +0100 Subject: [PATCH 401/515] Simplify WriteFileAtomic We can make the implementation more robust by adjusting our assumptions and leverage explicit file modes for syncing. Additionally we going to assume that we want to clean up and can't really recover if thos operations (file close and removal) fail. * utilise file mode for majority of concerns * improve test coverage by covering more assumptions * signature parity with ioutil.WriteFile * always clean up Replaces #160 --- common/os.go | 44 ++++++++++++++++++++++---------------------- common/os_test.go | 34 ++++++++++++++++++++++++++++------ 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/common/os.go b/common/os.go index 36fc969fa..f1e07115c 100644 --- a/common/os.go +++ b/common/os.go @@ -124,32 +124,32 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { } } -// WriteFileAtomic writes newBytes to temp and atomically moves to filePath -// when everything else succeeds. -func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error { - dir := filepath.Dir(filePath) - f, err := ioutil.TempFile(dir, "") +// WriteFileAtomic creates a temporary file with data and the perm given and +// swaps it atomically with filename if successful. +func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { + var ( + dir = filepath.Dir(filename) + tempFile = filepath.Join(dir, "write-file-atomic-"+RandStr(32)) + // Override in case it does exist, create in case it doesn't and force kernel + // flush, which still leaves the potential of lingering disk cache. + flag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC + ) + + f, err := os.OpenFile(tempFile, flag, perm) if err != nil { return err } - _, err = f.Write(newBytes) - if err == nil { - err = f.Sync() - } - if closeErr := f.Close(); err == nil { - err = closeErr - } - if permErr := os.Chmod(f.Name(), mode); err == nil { - err = permErr - } - if err == nil { - err = os.Rename(f.Name(), filePath) - } - // any err should result in full cleanup - if err != nil { - os.Remove(f.Name()) + // Clean up in any case. Defer stacking order is last-in-first-out. + defer os.Remove(f.Name()) + defer f.Close() + + if n, err := f.Write(data); err != nil { + return err + } else if n < len(data) { + return io.ErrShortWrite } - return err + + return os.Rename(f.Name(), filename) } //-------------------------------------------------------------------------------- diff --git a/common/os_test.go b/common/os_test.go index 126723aa6..97ad672b5 100644 --- a/common/os_test.go +++ b/common/os_test.go @@ -2,30 +2,52 @@ package common import ( "bytes" - "fmt" "io/ioutil" + "math/rand" "os" "testing" "time" ) func TestWriteFileAtomic(t *testing.T) { - data := []byte("Becatron") - fname := fmt.Sprintf("/tmp/write-file-atomic-test-%v.txt", time.Now().UnixNano()) - err := WriteFileAtomic(fname, data, 0664) + var ( + seed = rand.New(rand.NewSource(time.Now().UnixNano())) + data = []byte(RandStr(seed.Intn(2048))) + old = RandBytes(seed.Intn(2048)) + perm os.FileMode = 0600 + ) + + f, err := ioutil.TempFile("/tmp", "write-atomic-test-") if err != nil { t.Fatal(err) } - rData, err := ioutil.ReadFile(fname) + defer os.Remove(f.Name()) + + if err := ioutil.WriteFile(f.Name(), old, 0664); err != nil { + t.Fatal(err) + } + + if err := WriteFileAtomic(f.Name(), data, perm); err != nil { + t.Fatal(err) + } + + rData, err := ioutil.ReadFile(f.Name()) if err != nil { t.Fatal(err) } + if !bytes.Equal(data, rData) { t.Fatalf("data mismatch: %v != %v", data, rData) } - if err := os.Remove(fname); err != nil { + + stat, err := os.Stat(f.Name()) + if err != nil { t.Fatal(err) } + + if have, want := stat.Mode().Perm(), perm; have != want { + t.Errorf("have %v, want %v", have, want) + } } func TestGoPath(t *testing.T) { From db3d1cb7fa388e5ec48ac5cc5e57efe505592fd8 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 22 Mar 2018 19:33:10 -0400 Subject: [PATCH 402/515] changelog and version --- CHANGELOG.md | 11 +++++++++++ version/version.go | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3a305b20..69026e113 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Changelog +## 0.7.1 (March 22, 2018) + +IMPROVEMENTS: + + - glide -> dep + +BUG FIXES: + + - [common] Fix panic in NewBitArray for negative bits + - [common] Fix and simplify WriteFileAtomic so it cleans up properly + ## 0.7.0 (February 20, 2018) BREAKING: diff --git a/version/version.go b/version/version.go index 2c0474fa8..5449f1478 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.7.0" +const Version = "0.7.1" From 87c0473730a7fd6f97e9f8be3cfeeaa21102fa41 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 24 Mar 2018 22:19:44 +0100 Subject: [PATCH 403/515] New Error (#180) * New Error can capture Stacktrace * Intelligent ErrorWrap * Review fixes --- common/errors.go | 248 ++++++++++++++++++++++++++++-------------- common/errors_test.go | 107 ++++++++++++++++++ 2 files changed, 276 insertions(+), 79 deletions(-) create mode 100644 common/errors_test.go diff --git a/common/errors.go b/common/errors.go index 8e4b02283..52c45c799 100644 --- a/common/errors.go +++ b/common/errors.go @@ -5,138 +5,228 @@ import ( "runtime" ) +//---------------------------------------- +// Convenience methods + +// ErrorWrap will just call .TraceFrom(), or create a new *cmnError. +func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { + msg := Fmt(format, args...) + if causeCmnError, ok := cause.(*cmnError); ok { + return causeCmnError.TraceFrom(1, msg) + } else { + // NOTE: cause may be nil. + // NOTE: do not use causeCmnError here, not the same as nil. + return newError(msg, cause, cause).Stacktrace() + } +} + //---------------------------------------- // Error & cmnError +/* +Usage: + +```go + // Error construction + var someT = errors.New("Some err type") + var err1 error = NewErrorWithT(someT, "my message") + ... + // Wrapping + var err2 error = ErrorWrap(err1, "another message") + if (err1 != err2) { panic("should be the same") + ... + // Error handling + switch err2.T() { + case someT: ... + default: ... + } +``` + +*/ type Error interface { Error() string - Trace(format string, a ...interface{}) Error - TraceCause(cause error, format string, a ...interface{}) Error - Cause() error - Type() interface{} - WithType(t interface{}) Error + Message() string + Stacktrace() Error + Trace(format string, args ...interface{}) Error + TraceFrom(offset int, format string, args ...interface{}) Error + Cause() interface{} + WithT(t interface{}) Error + T() interface{} + Format(s fmt.State, verb rune) } // New Error with no cause where the type is the format string of the message.. -func NewError(format string, a ...interface{}) Error { - msg := Fmt(format, a...) +func NewError(format string, args ...interface{}) Error { + msg := Fmt(format, args...) return newError(msg, nil, format) } -// New Error with cause where the type is the cause, with message.. -func NewErrorWithCause(cause error, format string, a ...interface{}) Error { - msg := Fmt(format, a...) - return newError(msg, cause, cause) -} - // New Error with specified type and message. -func NewErrorWithType(type_ interface{}, format string, a ...interface{}) Error { - msg := Fmt(format, a...) - return newError(msg, nil, type_) +func NewErrorWithT(t interface{}, format string, args ...interface{}) Error { + msg := Fmt(format, args...) + return newError(msg, nil, t) } -type traceItem struct { - msg string - filename string - lineno int -} +// NOTE: The name of a function "NewErrorWithCause()" implies that you are +// creating a new Error, yet, if the cause is an Error, creating a new Error to +// hold a ref to the old Error is probably *not* what you want to do. +// So, use ErrorWrap(cause, format, a...) instead, which returns the same error +// if cause is an Error. +// IF you must set an Error as the cause of an Error, +// then you can use the WithCauser interface to do so manually. +// e.g. (error).(tmlibs.WithCauser).WithCause(causeError) -func (ti traceItem) String() string { - return fmt.Sprintf("%v:%v %v", ti.filename, ti.lineno, ti.msg) +type WithCauser interface { + WithCause(cause interface{}) Error } type cmnError struct { - msg string - cause error - type_ interface{} - traces []traceItem + msg string // first msg which also appears in msg + cause interface{} // underlying cause (or panic object) + t interface{} // for switching on error + msgtraces []msgtraceItem // all messages traced + stacktrace []uintptr // first stack trace } -// NOTE: Do not expose, it's not very friendly. -func newError(msg string, cause error, type_ interface{}) *cmnError { +var _ WithCauser = &cmnError{} +var _ Error = &cmnError{} + +// NOTE: do not expose. +func newError(msg string, cause interface{}, t interface{}) *cmnError { return &cmnError{ - msg: msg, - cause: cause, - type_: type_, - traces: nil, + msg: msg, + cause: cause, + t: t, + msgtraces: nil, + stacktrace: nil, } } +func (err *cmnError) Message() string { + return err.msg +} + func (err *cmnError) Error() string { - return fmt.Sprintf("Error{%v:%s,%v,%v}", err.type_, err.msg, err.cause, len(err.traces)) + return fmt.Sprintf("%v", err) +} + +// Captures a stacktrace if one was not already captured. +func (err *cmnError) Stacktrace() Error { + if err.stacktrace == nil { + var offset = 3 + var depth = 32 + err.stacktrace = captureStacktrace(offset, depth) + } + return err } // Add tracing information with msg. -func (err *cmnError) Trace(format string, a ...interface{}) Error { - msg := Fmt(format, a...) - return err.doTrace(msg, 2) +func (err *cmnError) Trace(format string, args ...interface{}) Error { + msg := Fmt(format, args...) + return err.doTrace(msg, 0) } -// Add tracing information with cause and msg. -// If a cause was already set before, it is overwritten. -func (err *cmnError) TraceCause(cause error, format string, a ...interface{}) Error { - msg := Fmt(format, a...) - err.cause = cause - return err.doTrace(msg, 2) +// Same as Trace, but traces the line `offset` calls out. +// If n == 0, the behavior is identical to Trace(). +func (err *cmnError) TraceFrom(offset int, format string, args ...interface{}) Error { + msg := Fmt(format, args...) + return err.doTrace(msg, offset) } -// Return the "type" of this message, primarily for switching -// to handle this error. -func (err *cmnError) Type() interface{} { - return err.type_ +// Return last known cause. +// NOTE: The meaning of "cause" is left for the caller to define. +// There exists no "canonical" definition of "cause". +// Instead of blaming, try to handle it, or organize it. +func (err *cmnError) Cause() interface{} { + return err.cause +} + +// Overwrites the Error's cause. +func (err *cmnError) WithCause(cause interface{}) Error { + err.cause = cause + return err } -// Overwrites the error's type. -func (err *cmnError) WithType(type_ interface{}) Error { - err.type_ = type_ +// Overwrites the Error's type. +func (err *cmnError) WithT(t interface{}) Error { + err.t = t return err } +// Return the "type" of this message, primarily for switching +// to handle this Error. +func (err *cmnError) T() interface{} { + return err.t +} + func (err *cmnError) doTrace(msg string, n int) Error { - _, fn, line, ok := runtime.Caller(n) - if !ok { - if fn == "" { - fn = "" - } - if line <= 0 { - line = -1 - } - } + pc, _, _, _ := runtime.Caller(n + 2) // +1 for doTrace(). +1 for the caller. // Include file & line number & msg. // Do not include the whole stack trace. - err.traces = append(err.traces, traceItem{ - filename: fn, - lineno: line, - msg: msg, + err.msgtraces = append(err.msgtraces, msgtraceItem{ + pc: pc, + msg: msg, }) return err } -// Return last known cause. -// NOTE: The meaning of "cause" is left for the caller to define. -// There exists to canonical definition of "cause". -// Instead of blaming, try to handle-or-organize it. -func (err *cmnError) Cause() error { - return err.cause +func (err *cmnError) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", &err))) + default: + if s.Flag('#') { + s.Write([]byte("--= Error =--\n")) + // Write msg. + s.Write([]byte(fmt.Sprintf("Message: %#s\n", err.msg))) + // Write cause. + s.Write([]byte(fmt.Sprintf("Cause: %#v\n", err.cause))) + // Write type. + s.Write([]byte(fmt.Sprintf("T: %#v\n", err.t))) + // Write msg trace items. + s.Write([]byte(fmt.Sprintf("Msg Traces:\n"))) + for i, msgtrace := range err.msgtraces { + s.Write([]byte(fmt.Sprintf(" %4d %s\n", i, msgtrace.String()))) + } + // Write stack trace. + if err.stacktrace != nil { + s.Write([]byte(fmt.Sprintf("Stack Trace:\n"))) + for i, pc := range err.stacktrace { + fnc := runtime.FuncForPC(pc) + file, line := fnc.FileLine(pc) + s.Write([]byte(fmt.Sprintf(" %4d %s:%d\n", i, file, line))) + } + } + s.Write([]byte("--= /Error =--\n")) + } else { + // Write msg. + s.Write([]byte(fmt.Sprintf("Error{`%v`}", err.msg))) // TODO tick-esc? + } + } } //---------------------------------------- -// StackError +// stacktrace & msgtraceItem -// NOTE: Used by Tendermint p2p upon recovery. -// Err could be "Reason", since it isn't an error type. -type StackError struct { - Err interface{} - Stack []byte +func captureStacktrace(offset int, depth int) []uintptr { + var pcs = make([]uintptr, depth) + n := runtime.Callers(offset, pcs) + return pcs[0:n] } -func (se StackError) String() string { - return fmt.Sprintf("Error: %v\nStack: %s", se.Err, se.Stack) +type msgtraceItem struct { + pc uintptr + msg string } -func (se StackError) Error() string { - return se.String() +func (mti msgtraceItem) String() string { + fnc := runtime.FuncForPC(mti.pc) + file, line := fnc.FileLine(mti.pc) + return fmt.Sprintf("%s:%d - %s", + file, line, + mti.msg, + ) } //---------------------------------------- diff --git a/common/errors_test.go b/common/errors_test.go new file mode 100644 index 000000000..56f366179 --- /dev/null +++ b/common/errors_test.go @@ -0,0 +1,107 @@ +package common + +import ( + fmt "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorPanic(t *testing.T) { + type pnk struct { + msg string + } + + capturePanic := func() (err Error) { + defer func() { + if r := recover(); r != nil { + err = ErrorWrap(r, "This is the message in ErrorWrap(r, message).") + } + return + }() + panic(pnk{"something"}) + return nil + } + + var err = capturePanic() + + assert.Equal(t, pnk{"something"}, err.Cause()) + assert.Equal(t, pnk{"something"}, err.T()) + assert.Equal(t, "This is the message in ErrorWrap(r, message).", err.Message()) + assert.Equal(t, "Error{`This is the message in ErrorWrap(r, message).`}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "Message: This is the message in ErrorWrap(r, message).") + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapSomething(t *testing.T) { + + var err = ErrorWrap("something", "formatter%v%v", 0, 1) + + assert.Equal(t, "something", err.Cause()) + assert.Equal(t, "something", err.T()) + assert.Equal(t, "formatter01", err.Message()) + assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapNothing(t *testing.T) { + + var err = ErrorWrap(nil, "formatter%v%v", 0, 1) + + assert.Equal(t, nil, err.Cause()) + assert.Equal(t, nil, err.T()) + assert.Equal(t, "formatter01", err.Message()) + assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewError(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + + assert.Equal(t, nil, err.Cause()) + assert.Equal(t, "formatter%v%v", err.T()) + assert.Equal(t, "formatter01", err.Message()) + assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") +} + +func TestErrorNewErrorWithStacktrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1).Stacktrace() + + assert.Equal(t, nil, err.Cause()) + assert.Equal(t, "formatter%v%v", err.T()) + assert.Equal(t, "formatter01", err.Message()) + assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewErrorWithTrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + err.Trace("trace %v", 1) + err.Trace("trace %v", 2) + err.Trace("trace %v", 3) + + assert.Equal(t, nil, err.Cause()) + assert.Equal(t, "formatter%v%v", err.T()) + assert.Equal(t, "formatter01", err.Message()) + assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + dump := fmt.Sprintf("%#v", err) + assert.NotContains(t, dump, "Stack Trace") + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 1`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 2`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 3`, dump) +} + +func TestErrorWrapError(t *testing.T) { + var err1 error = NewError("my message") + var err2 error = ErrorWrap(err1, "another message") + assert.Equal(t, err1, err2) +} From e9cf47606cfcbdc28a7c16671b4a70b459e9d4cc Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 25 Mar 2018 00:04:47 +0100 Subject: [PATCH 404/515] Merge panics into errors in Parallel --- common/async.go | 16 ++-------------- common/async_test.go | 13 +++++++------ 2 files changed, 9 insertions(+), 20 deletions(-) diff --git a/common/async.go b/common/async.go index 31dc2e968..49714d95e 100644 --- a/common/async.go +++ b/common/async.go @@ -15,7 +15,6 @@ type Task func(i int) (val interface{}, err error, abort bool) type TaskResult struct { Value interface{} Error error - Panic interface{} } type TaskResultCh <-chan TaskResult @@ -92,17 +91,6 @@ func (trs *TaskResultSet) FirstError() error { return nil } -// Returns the firstmost (by task index) panic as -// discovered by all previous Reap() calls. -func (trs *TaskResultSet) FirstPanic() interface{} { - for _, result := range trs.results { - if result.Panic != nil { - return result.Panic - } - } - return nil -} - //---------------------------------------- // Parallel @@ -128,7 +116,7 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { defer func() { if pnk := recover(); pnk != nil { atomic.AddInt32(numPanics, 1) - taskResultCh <- TaskResult{nil, nil, pnk} + taskResultCh <- TaskResult{nil, ErrorWrap(pnk, "Panic in task")} taskDoneCh <- false } }() @@ -136,7 +124,7 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { var val, err, abort = task(i) // Send val/err to taskResultCh. // NOTE: Below this line, nothing must panic/ - taskResultCh <- TaskResult{val, err, nil} + taskResultCh <- TaskResult{val, err} // Decrement waitgroup. taskDoneCh <- abort }(i, task, taskResultCh) diff --git a/common/async_test.go b/common/async_test.go index 2e8db26eb..8d41ec35a 100644 --- a/common/async_test.go +++ b/common/async_test.go @@ -37,9 +37,6 @@ func TestParallel(t *testing.T) { } else if taskResult.Error != nil { assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) failedTasks += 1 - } else if taskResult.Panic != nil { - assert.Fail(t, "Task should not have panic'd but got %v", taskResult.Panic) - failedTasks += 1 } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) failedTasks += 1 @@ -49,7 +46,6 @@ func TestParallel(t *testing.T) { } assert.Equal(t, failedTasks, 0, "No task should have failed") assert.Nil(t, trs.FirstError(), "There should be no errors") - assert.Nil(t, trs.FirstPanic(), "There should be no panics") assert.Equal(t, 0, trs.FirstValue(), "First value should be 0") } @@ -132,8 +128,13 @@ func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val inte taskName := fmt.Sprintf("Task #%v", index) assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) assert.Equal(t, val, taskResult.Value, taskName) - assert.Equal(t, err, taskResult.Error, taskName) - assert.Equal(t, pnk, taskResult.Panic, taskName) + if err != nil { + assert.Equal(t, err, taskResult.Error, taskName) + } else if pnk != nil { + assert.Equal(t, pnk, taskResult.Error.(Error).Cause(), taskName) + } else { + assert.Nil(t, taskResult.Error, taskName) + } } // Wait for timeout (no result) From 6e26392209cba2758a36fe37f125ecdfc22db5fa Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Wed, 28 Mar 2018 15:35:52 +0200 Subject: [PATCH 405/515] Return config parse errors (#182) --- cli/setup.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index dc34abdf9..06cf1cd1f 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -139,9 +139,8 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { // stderr, so if we redirect output to json file, this doesn't appear // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { - // we ignore not found error, only parse error - // stderr, so if we redirect output to json file, this doesn't appear - fmt.Fprintf(os.Stderr, "%#v", err) + // ignore not found error, return other errors + return err } return nil } From 0f2811441f4cf44b414df16ceae3c0931c74662e Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 28 Mar 2018 16:04:46 +0200 Subject: [PATCH 406/515] [pubsub] fix unsubscribing (#181) * [pubsub] fix unsubscribing by giving it the same exact query, which was used to subscribe Refs https://github.com/tendermint/tendermint/issues/1368 * use original query to unsubscribe Refs #1368 * modify the unit test the issue is fixed --- CHANGELOG.md | 6 ++++++ pubsub/pubsub.go | 30 +++++++++++++++++++++--------- pubsub/pubsub_test.go | 4 ++-- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d3e789a5..00ddd4b4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 0.8.1 (TBD) + +BUG FIXES: + + - [pubsub] fix unsubscribing + ## 0.8.0 (March 22, 2018) BREAKING: diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 28e008ca6..90f6e4ae6 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -28,6 +28,16 @@ const ( shutdown ) +var ( + // ErrSubscriptionNotFound is returned when a client tries to unsubscribe + // from not existing subscription. + ErrSubscriptionNotFound = errors.New("subscription not found") + + // ErrAlreadySubscribed is returned when a client tries to subscribe twice or + // more using the same query. + ErrAlreadySubscribed = errors.New("already subscribed") +) + type cmd struct { op operation query Query @@ -52,7 +62,7 @@ type Server struct { cmdsCap int mtx sync.RWMutex - subscriptions map[string]map[string]struct{} // subscriber -> query -> struct{} + subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query } // Option sets a parameter for the server. @@ -63,7 +73,7 @@ type Option func(*Server) // provided, the resulting server's queue is unbuffered. func NewServer(options ...Option) *Server { s := &Server{ - subscriptions: make(map[string]map[string]struct{}), + subscriptions: make(map[string]map[string]Query), } s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) @@ -106,16 +116,16 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou } s.mtx.RUnlock() if ok { - return errors.New("already subscribed") + return ErrAlreadySubscribed } select { case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: s.mtx.Lock() if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]struct{}) + s.subscriptions[clientID] = make(map[string]Query) } - s.subscriptions[clientID][query.String()] = struct{}{} + s.subscriptions[clientID][query.String()] = query s.mtx.Unlock() return nil case <-ctx.Done(): @@ -127,18 +137,20 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou // returned to the caller if the context is canceled or if subscription does // not exist. func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { + var origQuery Query s.mtx.RLock() clientSubscriptions, ok := s.subscriptions[clientID] if ok { - _, ok = clientSubscriptions[query.String()] + origQuery, ok = clientSubscriptions[query.String()] } s.mtx.RUnlock() if !ok { - return errors.New("subscription not found") + return ErrSubscriptionNotFound } + // original query is used here because we're using pointers as map keys select { - case s.cmds <- cmd{op: unsub, clientID: clientID, query: query}: + case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}: s.mtx.Lock() delete(clientSubscriptions, query.String()) s.mtx.Unlock() @@ -155,7 +167,7 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { _, ok := s.subscriptions[clientID] s.mtx.RUnlock() if !ok { - return errors.New("subscription not found") + return ErrSubscriptionNotFound } select { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 84b6aa218..2af7cea46 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -101,9 +101,9 @@ func TestUnsubscribe(t *testing.T) { ctx := context.Background() ch := make(chan interface{}) - err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch) require.NoError(t, err) - err = s.Unsubscribe(ctx, clientID, query.Empty{}) + err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) require.NoError(t, err) err = s.Publish(ctx, "Nick Fury") From 898216d419ad046ee3c3dee91df851e4e30461d7 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 29 Mar 2018 12:04:01 +0200 Subject: [PATCH 407/515] add SplitAndTrim func (#183) Refs https://github.com/tendermint/tendermint/issues/1380 --- common/string.go | 17 +++++++++++++++++ common/string_test.go | 19 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/common/string.go b/common/string.go index ae140c9f4..dfa262d3f 100644 --- a/common/string.go +++ b/common/string.go @@ -41,3 +41,20 @@ func StringInSlice(a string, list []string) bool { } return false } + +// SplitAndTrim slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. +func SplitAndTrim(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + for i := 0; i < len(spl); i++ { + spl[i] = strings.Trim(spl[i], cutset) + } + return spl +} diff --git a/common/string_test.go b/common/string_test.go index b8a917c16..82ba67844 100644 --- a/common/string_test.go +++ b/common/string_test.go @@ -30,3 +30,22 @@ func TestIsHex(t *testing.T) { assert.True(t, IsHex(v), "%q is hex", v) } } + +func TestSplitAndTrim(t *testing.T) { + testCases := []struct { + s string + sep string + cutset string + expected []string + }{ + {"a,b,c", ",", " ", []string{"a", "b", "c"}}, + {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, + {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, + {" , ", ",", " ", []string{"", ""}}, + {" ", ",", " ", []string{""}}, + } + + for _, tc := range testCases { + assert.Equal(t, tc.expected, SplitAndTrim(tc.s, tc.sep, tc.cutset), "%s", tc.s) + } +} From 382e99d06e8bdc2818704e4a22d23a11110803fa Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 2 Apr 2018 01:46:24 -0700 Subject: [PATCH 408/515] Add IsTypedNil --- common/nil.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 common/nil.go diff --git a/common/nil.go b/common/nil.go new file mode 100644 index 000000000..c7617f083 --- /dev/null +++ b/common/nil.go @@ -0,0 +1,18 @@ +package common + +import "reflect" + +// Go lacks a simple and safe way to see if something is a typed nil. +// See: +// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 +// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion +// - https://github.com/golang/go/issues/21538 +func IsTypedNil(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} From 2fbd9f15fae4a0d73f2c3d10f839a1e7f5451e36 Mon Sep 17 00:00:00 2001 From: Mohanson Date: Tue, 3 Apr 2018 15:26:47 +0800 Subject: [PATCH 409/515] bug fix: WriteFileAtomic Must close file before rename it. --- common/os.go | 1 + 1 file changed, 1 insertion(+) diff --git a/common/os.go b/common/os.go index f1e07115c..47ae4a1cc 100644 --- a/common/os.go +++ b/common/os.go @@ -148,6 +148,7 @@ func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { } else if n < len(data) { return io.ErrShortWrite } + f.Close() return os.Rename(f.Name(), filename) } From 29a8cb8d87e0ed2567938aed57bb78cc8cd6fee9 Mon Sep 17 00:00:00 2001 From: Mohanson Date: Tue, 3 Apr 2018 16:51:30 +0800 Subject: [PATCH 410/515] add comments. --- common/os.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/common/os.go b/common/os.go index 47ae4a1cc..8a0c14f46 100644 --- a/common/os.go +++ b/common/os.go @@ -148,6 +148,9 @@ func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { } else if n < len(data) { return io.ErrShortWrite } + // Close the file before renaming it, otherwise it will cause "The process + // cannot access the file because it is being used by another process." on windows or + // cause "cross-link error" on linux when you try to save it to another partition. f.Close() return os.Rename(f.Name(), filename) From ee67e34519a87a513731892d60c6ab97855f0c53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Corbi=C3=A8re?= Date: Tue, 3 Apr 2018 12:23:28 +0200 Subject: [PATCH 411/515] Fix lint errors (#190) * use increment and decrement operators. * remove unnecessary else branches. * fix receiver names. * remove omittable code. * fix dot imports. --- autofile/autofile.go | 7 +++--- autofile/group.go | 48 ++++++++++++++++++------------------------ autofile/group_test.go | 2 +- clist/clist.go | 4 ++-- clist/clist_test.go | 6 +++--- common/async_test.go | 6 +++--- common/bit_array.go | 3 +-- common/colors.go | 3 +-- common/errors.go | 7 +++--- common/io.go | 3 +-- common/os.go | 11 +++++----- common/service.go | 13 +++++------- common/string.go | 3 +-- common/word.go | 3 +-- db/backend_test.go | 2 +- db/common_test.go | 44 +++++++++++++++++++------------------- db/db_test.go | 12 +++++------ db/go_level_db.go | 13 ++++++------ db/go_level_db_test.go | 2 +- db/mem_db.go | 2 +- db/types.go | 3 +-- db/util.go | 28 ++++++++++++------------ db/util_test.go | 10 ++++----- events/events.go | 8 +++---- events/events_test.go | 4 ++-- merkle/simple_proof.go | 11 +++++----- test/mutate.go | 8 +++---- 27 files changed, 121 insertions(+), 145 deletions(-) diff --git a/autofile/autofile.go b/autofile/autofile.go index 05fb0d677..790be5224 100644 --- a/autofile/autofile.go +++ b/autofile/autofile.go @@ -5,7 +5,7 @@ import ( "sync" "time" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) /* AutoFile usage @@ -44,7 +44,7 @@ type AutoFile struct { func OpenAutoFile(path string) (af *AutoFile, err error) { af = &AutoFile{ - ID: RandStr(12) + ":" + path, + ID: cmn.RandStr(12) + ":" + path, Path: path, ticker: time.NewTicker(autoFileOpenDuration), } @@ -129,9 +129,8 @@ func (af *AutoFile) Size() (int64, error) { if err != nil { if err == os.ErrNotExist { return 0, nil - } else { - return -1, err } + return -1, err } } stat, err := af.file.Stat() diff --git a/autofile/group.go b/autofile/group.go index f2d0f2bae..652c33310 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -15,7 +15,7 @@ import ( "sync" "time" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) const ( @@ -54,7 +54,7 @@ The Group can also be used to binary-search for some line, assuming that marker lines are written occasionally. */ type Group struct { - BaseService + cmn.BaseService ID string Head *AutoFile // The head AutoFile to write to @@ -90,7 +90,7 @@ func OpenGroup(headPath string) (g *Group, err error) { minIndex: 0, maxIndex: 0, } - g.BaseService = *NewBaseService(nil, "Group", g) + g.BaseService = *cmn.NewBaseService(nil, "Group", g) gInfo := g.readGroupInfo() g.minIndex = gInfo.MinIndex @@ -267,7 +267,7 @@ func (g *Group) RotateFile() { panic(err) } - g.maxIndex += 1 + g.maxIndex++ } // NewReader returns a new group reader. @@ -277,9 +277,8 @@ func (g *Group) NewReader(index int) (*GroupReader, error) { err := r.SetIndex(index) if err != nil { return nil, err - } else { - return r, nil } + return r, nil } // Returns -1 if line comes after, 0 if found, 1 if line comes before. @@ -311,9 +310,8 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error if err != nil { r.Close() return nil, false, err - } else { - return r, match, err } + return r, match, err } // Read starting roughly at the middle file, @@ -349,9 +347,8 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error if err != nil { r.Close() return nil, false, err - } else { - return r, true, err } + return r, true, err } else { // We passed it maxIndex = curIndex - 1 @@ -429,9 +426,8 @@ GROUP_LOOP: if err == io.EOF { if found { return match, found, nil - } else { - continue GROUP_LOOP } + continue GROUP_LOOP } else if err != nil { return "", false, err } @@ -442,9 +438,8 @@ GROUP_LOOP: if r.CurIndex() > i { if found { return match, found, nil - } else { - continue GROUP_LOOP } + continue GROUP_LOOP } } } @@ -520,7 +515,7 @@ func (g *Group) readGroupInfo() GroupInfo { minIndex, maxIndex = 0, 0 } else { // Otherwise, the head file is 1 greater - maxIndex += 1 + maxIndex++ } return GroupInfo{minIndex, maxIndex, totalSize, headSize} } @@ -528,9 +523,8 @@ func (g *Group) readGroupInfo() GroupInfo { func filePathForIndex(headPath string, index int, maxIndex int) string { if index == maxIndex { return headPath - } else { - return fmt.Sprintf("%v.%03d", headPath, index) } + return fmt.Sprintf("%v.%03d", headPath, index) } //-------------------------------------------------------------------------------- @@ -567,9 +561,8 @@ func (gr *GroupReader) Close() error { gr.curFile = nil gr.curLine = nil return err - } else { - return nil } + return nil } // Read implements io.Reader, reading bytes from the current Reader @@ -598,10 +591,10 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { if err == io.EOF { if n >= lenP { return n, nil - } else { // Open the next file - if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { - return n, err1 - } + } + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return n, err1 } } else if err != nil { return n, err @@ -643,10 +636,9 @@ func (gr *GroupReader) ReadLine() (string, error) { } if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil - } else { - linePrefix += string(bytesRead) - continue } + linePrefix += string(bytesRead) + continue } else if err != nil { return "", err } @@ -726,11 +718,11 @@ func (gr *GroupReader) SetIndex(index int) error { func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { return func(line string) (int, error) { if !strings.HasPrefix(line, prefix) { - return -1, errors.New(Fmt("Marker line did not have prefix: %v", prefix)) + return -1, errors.New(cmn.Fmt("Marker line did not have prefix: %v", prefix)) } i, err := strconv.Atoi(line[len(prefix):]) if err != nil { - return -1, errors.New(Fmt("Failed to parse marker line: %v", err.Error())) + return -1, errors.New(cmn.Fmt("Failed to parse marker line: %v", err.Error())) } if target < i { return 1, nil diff --git a/autofile/group_test.go b/autofile/group_test.go index c4f68f057..1a1111961 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -175,7 +175,7 @@ func TestSearch(t *testing.T) { if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { t.Fatalf("Unexpected INFO #. Expected %v got:\n%v", cur, line) } - cur += 1 + cur++ } gr.Close() } diff --git a/clist/clist.go b/clist/clist.go index 28d771a28..ccb1f5777 100644 --- a/clist/clist.go +++ b/clist/clist.go @@ -316,7 +316,7 @@ func (l *CList) PushBack(v interface{}) *CElement { l.wg.Done() close(l.waitCh) } - l.len += 1 + l.len++ // Modify the tail if l.tail == nil { @@ -357,7 +357,7 @@ func (l *CList) Remove(e *CElement) interface{} { } // Update l.len - l.len -= 1 + l.len-- // Connect next/prev and set head/tail if prev == nil { diff --git a/clist/clist_test.go b/clist/clist_test.go index 31f821653..6171f1a39 100644 --- a/clist/clist_test.go +++ b/clist/clist_test.go @@ -122,7 +122,7 @@ func _TestGCRandom(t *testing.T) { v.Int = i l.PushBack(v) runtime.SetFinalizer(v, func(v *value) { - gcCount += 1 + gcCount++ }) } @@ -177,10 +177,10 @@ func TestScanRightDeleteRandom(t *testing.T) { } if el == nil { el = l.FrontWait() - restartCounter += 1 + restartCounter++ } el = el.Next() - counter += 1 + counter++ } fmt.Printf("Scanner %v restartCounter: %v counter: %v\n", scannerID, restartCounter, counter) }(i) diff --git a/common/async_test.go b/common/async_test.go index 8d41ec35a..9f060ca2d 100644 --- a/common/async_test.go +++ b/common/async_test.go @@ -33,13 +33,13 @@ func TestParallel(t *testing.T) { taskResult, ok := trs.LatestResult(i) if !ok { assert.Fail(t, "Task #%v did not complete.", i) - failedTasks += 1 + failedTasks++ } else if taskResult.Error != nil { assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) - failedTasks += 1 + failedTasks++ } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) - failedTasks += 1 + failedTasks++ } else { // Good! } diff --git a/common/bit_array.go b/common/bit_array.go index a3a87ccab..ea6a6ee1f 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -168,9 +168,8 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { } } return c - } else { - return bA.and(o.Not()) // Note degenerate case where o == nil } + return bA.and(o.Not()) // Note degenerate case where o == nil } func (bA *BitArray) IsEmpty() bool { diff --git a/common/colors.go b/common/colors.go index 776b22e2e..85e592248 100644 --- a/common/colors.go +++ b/common/colors.go @@ -38,9 +38,8 @@ const ( func treat(s string, color string) string { if len(s) > 2 && s[:2] == "\x1b[" { return s - } else { - return color + s + ANSIReset } + return color + s + ANSIReset } func treatAll(color string, args ...interface{}) string { diff --git a/common/errors.go b/common/errors.go index 52c45c799..c5efae9cf 100644 --- a/common/errors.go +++ b/common/errors.go @@ -13,11 +13,10 @@ func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { msg := Fmt(format, args...) if causeCmnError, ok := cause.(*cmnError); ok { return causeCmnError.TraceFrom(1, msg) - } else { - // NOTE: cause may be nil. - // NOTE: do not use causeCmnError here, not the same as nil. - return newError(msg, cause, cause).Stacktrace() } + // NOTE: cause may be nil. + // NOTE: do not use causeCmnError here, not the same as nil. + return newError(msg, cause, cause).Stacktrace() } //---------------------------------------- diff --git a/common/io.go b/common/io.go index 378c19fc6..fa0443e09 100644 --- a/common/io.go +++ b/common/io.go @@ -20,9 +20,8 @@ func (pr *PrefixedReader) Read(p []byte) (n int, err error) { read := copy(p, pr.Prefix) pr.Prefix = pr.Prefix[read:] return read, nil - } else { - return pr.reader.Read(p) } + return pr.reader.Read(p) } // NOTE: Not goroutine safe diff --git a/common/os.go b/common/os.go index f1e07115c..60a4217aa 100644 --- a/common/os.go +++ b/common/os.go @@ -183,11 +183,10 @@ func Prompt(prompt string, defaultValue string) (string, error) { line, err := reader.ReadString('\n') if err != nil { return defaultValue, err - } else { - line = strings.TrimSpace(line) - if line == "" { - return defaultValue, nil - } - return line, nil } + line = strings.TrimSpace(line) + if line == "" { + return defaultValue, nil + } + return line, nil } diff --git a/common/service.go b/common/service.go index 2502d671c..2f90fa4f9 100644 --- a/common/service.go +++ b/common/service.go @@ -125,9 +125,8 @@ func (bs *BaseService) Start() error { if atomic.LoadUint32(&bs.stopped) == 1 { bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) return ErrAlreadyStopped - } else { - bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) } + bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) err := bs.impl.OnStart() if err != nil { // revert flag @@ -135,10 +134,9 @@ func (bs *BaseService) Start() error { return err } return nil - } else { - bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) - return ErrAlreadyStarted } + bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) + return ErrAlreadyStarted } // OnStart implements Service by doing nothing. @@ -154,10 +152,9 @@ func (bs *BaseService) Stop() error { bs.impl.OnStop() close(bs.quit) return nil - } else { - bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) - return ErrAlreadyStopped } + bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + return ErrAlreadyStopped } // OnStop implements Service by doing nothing. diff --git a/common/string.go b/common/string.go index dfa262d3f..0e2231e91 100644 --- a/common/string.go +++ b/common/string.go @@ -10,9 +10,8 @@ import ( var Fmt = func(format string, a ...interface{}) string { if len(a) == 0 { return format - } else { - return fmt.Sprintf(format, a...) } + return fmt.Sprintf(format, a...) } // IsHex returns true for non-empty hex-string prefixed with "0x" diff --git a/common/word.go b/common/word.go index 4072482b8..a5b841f55 100644 --- a/common/word.go +++ b/common/word.go @@ -72,9 +72,8 @@ func (tuple Tuple256) Compare(other Tuple256) int { firstCompare := tuple.First.Compare(other.First) if firstCompare == 0 { return tuple.Second.Compare(other.Second) - } else { - return firstCompare } + return firstCompare } func Tuple256Split(t Tuple256) (Word256, Word256) { diff --git a/db/backend_test.go b/db/backend_test.go index 80fbbb140..c407b214f 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -47,7 +47,7 @@ func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { } func TestBackendsGetSetDelete(t *testing.T) { - for dbType, _ := range backends { + for dbType := range backends { testBackendGetSetDelete(t, dbType) } } diff --git a/db/common_test.go b/db/common_test.go index 7f9d10e9b..1d8d52c5f 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -82,73 +82,73 @@ func (mdb *mockDB) Mutex() *sync.Mutex { } func (mdb *mockDB) Get([]byte) []byte { - mdb.calls["Get"] += 1 + mdb.calls["Get"]++ return nil } func (mdb *mockDB) Has([]byte) bool { - mdb.calls["Has"] += 1 + mdb.calls["Has"]++ return false } func (mdb *mockDB) Set([]byte, []byte) { - mdb.calls["Set"] += 1 + mdb.calls["Set"]++ } func (mdb *mockDB) SetSync([]byte, []byte) { - mdb.calls["SetSync"] += 1 + mdb.calls["SetSync"]++ } func (mdb *mockDB) SetNoLock([]byte, []byte) { - mdb.calls["SetNoLock"] += 1 + mdb.calls["SetNoLock"]++ } func (mdb *mockDB) SetNoLockSync([]byte, []byte) { - mdb.calls["SetNoLockSync"] += 1 + mdb.calls["SetNoLockSync"]++ } func (mdb *mockDB) Delete([]byte) { - mdb.calls["Delete"] += 1 + mdb.calls["Delete"]++ } func (mdb *mockDB) DeleteSync([]byte) { - mdb.calls["DeleteSync"] += 1 + mdb.calls["DeleteSync"]++ } func (mdb *mockDB) DeleteNoLock([]byte) { - mdb.calls["DeleteNoLock"] += 1 + mdb.calls["DeleteNoLock"]++ } func (mdb *mockDB) DeleteNoLockSync([]byte) { - mdb.calls["DeleteNoLockSync"] += 1 + mdb.calls["DeleteNoLockSync"]++ } func (mdb *mockDB) Iterator(start, end []byte) Iterator { - mdb.calls["Iterator"] += 1 + mdb.calls["Iterator"]++ return &mockIterator{} } func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator { - mdb.calls["ReverseIterator"] += 1 + mdb.calls["ReverseIterator"]++ return &mockIterator{} } func (mdb *mockDB) Close() { - mdb.calls["Close"] += 1 + mdb.calls["Close"]++ } func (mdb *mockDB) NewBatch() Batch { - mdb.calls["NewBatch"] += 1 + mdb.calls["NewBatch"]++ return &memBatch{db: mdb} } func (mdb *mockDB) Print() { - mdb.calls["Print"] += 1 + mdb.calls["Print"]++ fmt.Printf("mockDB{%v}", mdb.Stats()) } func (mdb *mockDB) Stats() map[string]string { - mdb.calls["Stats"] += 1 + mdb.calls["Stats"]++ res := make(map[string]string) for key, count := range mdb.calls { @@ -162,24 +162,24 @@ func (mdb *mockDB) Stats() map[string]string { type mockIterator struct{} -func (_ mockIterator) Domain() (start []byte, end []byte) { +func (mockIterator) Domain() (start []byte, end []byte) { return nil, nil } -func (_ mockIterator) Valid() bool { +func (mockIterator) Valid() bool { return false } -func (_ mockIterator) Next() { +func (mockIterator) Next() { } -func (_ mockIterator) Key() []byte { +func (mockIterator) Key() []byte { return nil } -func (_ mockIterator) Value() []byte { +func (mockIterator) Value() []byte { return nil } -func (_ mockIterator) Close() { +func (mockIterator) Close() { } diff --git a/db/db_test.go b/db/db_test.go index 3d6ac38c4..a56901016 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -8,7 +8,7 @@ import ( ) func TestDBIteratorSingleKey(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) @@ -26,7 +26,7 @@ func TestDBIteratorSingleKey(t *testing.T) { } func TestDBIteratorTwoKeys(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) @@ -52,7 +52,7 @@ func TestDBIteratorTwoKeys(t *testing.T) { } func TestDBIteratorMany(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) @@ -76,7 +76,7 @@ func TestDBIteratorMany(t *testing.T) { } func TestDBIteratorEmpty(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) itr := db.Iterator(nil, nil) @@ -87,7 +87,7 @@ func TestDBIteratorEmpty(t *testing.T) { } func TestDBIteratorEmptyBeginAfter(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) itr := db.Iterator(bz("1"), nil) @@ -98,7 +98,7 @@ func TestDBIteratorEmptyBeginAfter(t *testing.T) { } func TestDBIteratorNonemptyBeginAfter(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) diff --git a/db/go_level_db.go b/db/go_level_db.go index 55ca36c39..9ff162e38 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -10,7 +10,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) func init() { @@ -46,9 +46,8 @@ func (db *GoLevelDB) Get(key []byte) []byte { if err != nil { if err == errors.ErrNotFound { return nil - } else { - panic(err) } + panic(err) } return res } @@ -64,7 +63,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { value = nonNilBytes(value) err := db.db.Put(key, value, nil) if err != nil { - PanicCrisis(err) + cmn.PanicCrisis(err) } } @@ -74,7 +73,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { value = nonNilBytes(value) err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { - PanicCrisis(err) + cmn.PanicCrisis(err) } } @@ -83,7 +82,7 @@ func (db *GoLevelDB) Delete(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, nil) if err != nil { - PanicCrisis(err) + cmn.PanicCrisis(err) } } @@ -92,7 +91,7 @@ func (db *GoLevelDB) DeleteSync(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { - PanicCrisis(err) + cmn.PanicCrisis(err) } } diff --git a/db/go_level_db_test.go b/db/go_level_db_test.go index 88b6730f3..266add8b5 100644 --- a/db/go_level_db_test.go +++ b/db/go_level_db_test.go @@ -30,7 +30,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { // Write something { idx := (int64(cmn.RandInt()) % numItems) - internal[idx] += 1 + internal[idx]++ val := internal[idx] idxBytes := int642Bytes(int64(idx)) valBytes := int642Bytes(int64(val)) diff --git a/db/mem_db.go b/db/mem_db.go index 5439d6789..2d802947c 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -235,7 +235,7 @@ func (itr *memDBIterator) assertIsValid() { func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { keys := []string{} - for key, _ := range db.db { + for key := range db.db { if IsKeyInDomain([]byte(key), start, end, false) { keys = append(keys, key) } diff --git a/db/types.go b/db/types.go index 45146942e..ad78859a7 100644 --- a/db/types.go +++ b/db/types.go @@ -129,7 +129,6 @@ func bz(s string) []byte { func nonNilBytes(bz []byte) []byte { if bz == nil { return []byte{} - } else { - return bz } + return bz } diff --git a/db/util.go b/db/util.go index ecb392dd6..1ad5002d6 100644 --- a/db/util.go +++ b/db/util.go @@ -21,14 +21,13 @@ func cpIncr(bz []byte) (ret []byte) { ret = cp(bz) for i := len(bz) - 1; i >= 0; i-- { if ret[i] < byte(0xFF) { - ret[i] += 1 + ret[i]++ return - } else { - ret[i] = byte(0x00) - if i == 0 { - // Overflow - return nil - } + } + ret[i] = byte(0x00) + if i == 0 { + // Overflow + return nil } } return nil @@ -44,13 +43,12 @@ func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { return false } return true - } else { - if start != nil && bytes.Compare(start, key) < 0 { - return false - } - if end != nil && bytes.Compare(key, end) <= 0 { - return false - } - return true } + if start != nil && bytes.Compare(start, key) < 0 { + return false + } + if end != nil && bytes.Compare(key, end) <= 0 { + return false + } + return true } diff --git a/db/util_test.go b/db/util_test.go index 854448af3..44f1f9f73 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -7,7 +7,7 @@ import ( // Empty iterator for empty db. func TestPrefixIteratorNoMatchNil(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) itr := IteratePrefix(db, []byte("2")) @@ -19,7 +19,7 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { // Empty iterator for db populated after iterator created. func TestPrefixIteratorNoMatch1(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) itr := IteratePrefix(db, []byte("2")) @@ -32,7 +32,7 @@ func TestPrefixIteratorNoMatch1(t *testing.T) { // Empty iterator for prefix starting after db entry. func TestPrefixIteratorNoMatch2(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("3"), bz("value_3")) @@ -45,7 +45,7 @@ func TestPrefixIteratorNoMatch2(t *testing.T) { // Iterator with single val for db with single val, starting from that val. func TestPrefixIteratorMatch1(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("2"), bz("value_2")) @@ -63,7 +63,7 @@ func TestPrefixIteratorMatch1(t *testing.T) { // Iterator with prefix iterates over everything with same prefix. func TestPrefixIteratorMatches1N(t *testing.T) { - for backend, _ := range backends { + for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) diff --git a/events/events.go b/events/events.go index 3bc349306..f1b2a754e 100644 --- a/events/events.go +++ b/events/events.go @@ -6,7 +6,7 @@ package events import ( "sync" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) // Generic event data can be typed and registered with tendermint/go-amino @@ -27,7 +27,7 @@ type Fireable interface { } type EventSwitch interface { - Service + cmn.Service Fireable AddListenerForEvent(listenerID, event string, cb EventCallback) @@ -36,7 +36,7 @@ type EventSwitch interface { } type eventSwitch struct { - BaseService + cmn.BaseService mtx sync.RWMutex eventCells map[string]*eventCell @@ -45,7 +45,7 @@ type eventSwitch struct { func NewEventSwitch() EventSwitch { evsw := &eventSwitch{} - evsw.BaseService = *NewBaseService(nil, "EventSwitch", evsw) + evsw.BaseService = *cmn.NewBaseService(nil, "EventSwitch", evsw) return evsw } diff --git a/events/events_test.go b/events/events_test.go index 87db2a304..4995ae730 100644 --- a/events/events_test.go +++ b/events/events_test.go @@ -221,11 +221,11 @@ func TestRemoveListener(t *testing.T) { // add some listeners and make sure they work evsw.AddListenerForEvent("listener", "event1", func(data EventData) { - sum1 += 1 + sum1++ }) evsw.AddListenerForEvent("listener", "event2", func(data EventData) { - sum2 += 1 + sum2++ }) for i := 0; i < count; i++ { evsw.FireEvent("event1", true) diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go index 83f89e598..7b8f82b46 100644 --- a/merkle/simple_proof.go +++ b/merkle/simple_proof.go @@ -67,13 +67,12 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][ return nil } return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) - } else { - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil - } - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) } + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if rightHash == nil { + return nil + } + return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) } } diff --git a/test/mutate.go b/test/mutate.go index 1dbe7a6bf..76534e8b1 100644 --- a/test/mutate.go +++ b/test/mutate.go @@ -1,7 +1,7 @@ package test import ( - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) // Contract: !bytes.Equal(input, output) && len(input) >= len(output) @@ -17,11 +17,11 @@ func MutateByteSlice(bytez []byte) []byte { bytez = mBytez // Try a random mutation - switch RandInt() % 2 { + switch cmn.RandInt() % 2 { case 0: // Mutate a single byte - bytez[RandInt()%len(bytez)] += byte(RandInt()%255 + 1) + bytez[cmn.RandInt()%len(bytez)] += byte(cmn.RandInt()%255 + 1) case 1: // Remove an arbitrary byte - pos := RandInt() % len(bytez) + pos := cmn.RandInt() % len(bytez) bytez = append(bytez[:pos], bytez[pos+1:]...) } return bytez From b221ca0efa588db6875ee8f35f815319f25a3c96 Mon Sep 17 00:00:00 2001 From: Mohanson Date: Tue, 3 Apr 2018 19:04:09 +0800 Subject: [PATCH 412/515] refine comments --- common/os.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/common/os.go b/common/os.go index 8a0c14f46..450388564 100644 --- a/common/os.go +++ b/common/os.go @@ -149,8 +149,7 @@ func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { return io.ErrShortWrite } // Close the file before renaming it, otherwise it will cause "The process - // cannot access the file because it is being used by another process." on windows or - // cause "cross-link error" on linux when you try to save it to another partition. + // cannot access the file because it is being used by another process." on windows. f.Close() return os.Rename(f.Name(), filename) From 3375dac049d2e7c597b29485d294b6f30798b4d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Corbi=C3=A8re?= Date: Tue, 3 Apr 2018 14:09:50 +0200 Subject: [PATCH 413/515] add Float64() and RandFloat64(). --- common/random.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/common/random.go b/common/random.go index af531992e..b767e3589 100644 --- a/common/random.go +++ b/common/random.go @@ -113,6 +113,10 @@ func RandFloat32() float32 { return grand.Float32() } +func RandFloat64() float64 { + return grand.Float64() +} + func RandTime() time.Time { return grand.Time() } @@ -272,6 +276,14 @@ func (r *Rand) Float32() float32 { return f32 } +// It is not safe for cryptographic usage. +func (r *Rand) Float64() float64 { + r.Lock() + f64 := r.rand.Float64() + r.Unlock() + return f64 +} + // It is not safe for cryptographic usage. func (r *Rand) Time() time.Time { return time.Unix(int64(r.Uint64Exp()), 0) From 3cd4dcf13ba5ba6b4b3a8d224bda5ed3d3043c04 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 3 Apr 2018 16:23:41 +0300 Subject: [PATCH 414/515] changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d6d8914a..70061a409 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,11 +16,13 @@ FEATURES: - [db] NewPrefixDB for a DB with all keys prefixed - [db] NewDebugDB prints everything during operation - [common] SplitAndTrim func + - [common] RandFloat64() and rand.Float64() BUG FIXES: - [pubsub] Fix unsubscribing - [cli] Return config errors + - [common] Fix WriteFileAtomic Windows bug ## 0.7.1 (March 22, 2018) From f457435199f4e38337b3e7c90dc4384ca928e09a Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 3 Apr 2018 07:01:08 -0700 Subject: [PATCH 415/515] HexBytes formatting; Make computeHashFromAunts more defensive --- common/bytes.go | 9 +++++++++ merkle/simple_proof.go | 6 +++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/common/bytes.go b/common/bytes.go index ba81bbe97..711720aa7 100644 --- a/common/bytes.go +++ b/common/bytes.go @@ -51,3 +51,12 @@ func (bz HexBytes) Bytes() []byte { func (bz HexBytes) String() string { return strings.ToUpper(hex.EncodeToString(bz)) } + +func (bz HexBytes) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", bz))) + default: + s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) + } +} diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go index 7b8f82b46..c81ed674a 100644 --- a/merkle/simple_proof.go +++ b/merkle/simple_proof.go @@ -43,9 +43,9 @@ func (sp *SimpleProof) StringIndented(indent string) string { // Use the leafHash and innerHashes to get the root merkle hash. // If the length of the innerHashes slice isn't exactly correct, the result is nil. +// Recursive impl. func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { - // Recursive impl. - if index >= total { + if index >= total || index < 0 || total <= 0 { return nil } switch total { @@ -80,7 +80,7 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][ // The node and the tree is thrown away afterwards. // Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. // node.Parent.Hash = hash(node.Hash, node.Right.Hash) or -// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. +// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. type SimpleProofNode struct { Hash []byte Parent *SimpleProofNode From d66d43d2eaa5d2611ed1e2814b1df7bf01df705a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Corbi=C3=A8re?= Date: Tue, 3 Apr 2018 16:23:36 +0200 Subject: [PATCH 416/515] Add Int31n() and RandInt31n(). --- common/random.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/common/random.go b/common/random.go index b767e3589..5653142eb 100644 --- a/common/random.go +++ b/common/random.go @@ -93,6 +93,10 @@ func RandInt31() int32 { return grand.Int31() } +func RandInt31n(n int32) int32 { + return grand.Int31n(n) +} + func RandInt63() int64 { return grand.Int63() } @@ -224,6 +228,14 @@ func (r *Rand) Int31() int32 { return i31 } +// It is not safe for cryptographic usage. +func (r *Rand) Int31n(n int32) int32 { + r.Lock() + i31n := r.rand.Int31n(n) + r.Unlock() + return i31n +} + // It is not safe for cryptographic usage. func (r *Rand) Int63() int64 { r.Lock() From 74486f7f932a7ec2c54f55e2b0319c621b2d5bdf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Corbi=C3=A8re?= Date: Tue, 3 Apr 2018 16:24:38 +0200 Subject: [PATCH 417/515] Add Int63n() and RandInt63n(). --- common/random.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/common/random.go b/common/random.go index 5653142eb..389a32fc2 100644 --- a/common/random.go +++ b/common/random.go @@ -101,6 +101,10 @@ func RandInt63() int64 { return grand.Int63() } +func RandInt63n(n int64) int64 { + return grand.Int63n(n) +} + func RandUint16Exp() uint16 { return grand.Uint16Exp() } @@ -244,6 +248,14 @@ func (r *Rand) Int63() int64 { return i63 } +// It is not safe for cryptographic usage. +func (r *Rand) Int63n(n int64) int64 { + r.Lock() + i63n := r.rand.Int63n(n) + r.Unlock() + return i63n +} + // Distributed pseudo-exponentially to test for various cases // It is not safe for cryptographic usage. func (r *Rand) Uint16Exp() uint16 { From 390de81bbc41cd27cabf70d1d172e1c2ef890106 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 4 Apr 2018 12:30:13 -0700 Subject: [PATCH 418/515] Release version 0.8.0 --- CHANGELOG.md | 5 +++-- version/version.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70061a409..7f372d028 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 0.8.0 (TBD) +## 0.8.0 (develop branch) BREAKING: @@ -16,7 +16,8 @@ FEATURES: - [db] NewPrefixDB for a DB with all keys prefixed - [db] NewDebugDB prints everything during operation - [common] SplitAndTrim func - - [common] RandFloat64() and rand.Float64() + - [common] rand.Float64(), rand.Int63n(n), rand.Int31n(n) and global equivalents + - [common] HexBytes Format() BUG FIXES: diff --git a/version/version.go b/version/version.go index c683dd245..f17ed2ed3 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.8.0-dev" +const Version = "0.8.0" From fb7bde9c24b3c9d7c5e9926606760cb36ada3e8b Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 4 Apr 2018 13:43:19 -0700 Subject: [PATCH 419/515] Add cause in Error.Error() --- CHANGELOG.md | 6 ++++++ common/errors.go | 6 +++++- common/errors_test.go | 4 ++-- version/version.go | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f372d028..0da5ee204 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 0.8.1 (develop branch) + +FEATURES: + + - [common] Error.Error() includes cause + ## 0.8.0 (develop branch) BREAKING: diff --git a/common/errors.go b/common/errors.go index c5efae9cf..1ee1fb349 100644 --- a/common/errors.go +++ b/common/errors.go @@ -200,7 +200,11 @@ func (err *cmnError) Format(s fmt.State, verb rune) { s.Write([]byte("--= /Error =--\n")) } else { // Write msg. - s.Write([]byte(fmt.Sprintf("Error{`%v`}", err.msg))) // TODO tick-esc? + if err.cause != nil { + s.Write([]byte(fmt.Sprintf("Error{`%s` (cause: %v)}", err.msg, err.cause))) // TODO tick-esc? + } else { + s.Write([]byte(fmt.Sprintf("Error{`%s`}", err.msg))) // TODO tick-esc? + } } } } diff --git a/common/errors_test.go b/common/errors_test.go index 56f366179..2c5234f9f 100644 --- a/common/errors_test.go +++ b/common/errors_test.go @@ -28,7 +28,7 @@ func TestErrorPanic(t *testing.T) { assert.Equal(t, pnk{"something"}, err.Cause()) assert.Equal(t, pnk{"something"}, err.T()) assert.Equal(t, "This is the message in ErrorWrap(r, message).", err.Message()) - assert.Equal(t, "Error{`This is the message in ErrorWrap(r, message).`}", fmt.Sprintf("%v", err)) + assert.Equal(t, "Error{`This is the message in ErrorWrap(r, message).` (cause: {something})}", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "Message: This is the message in ErrorWrap(r, message).") assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -40,7 +40,7 @@ func TestErrorWrapSomething(t *testing.T) { assert.Equal(t, "something", err.Cause()) assert.Equal(t, "something", err.T()) assert.Equal(t, "formatter01", err.Message()) - assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) + assert.Equal(t, "Error{`formatter01` (cause: something)}", fmt.Sprintf("%v", err)) assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } diff --git a/version/version.go b/version/version.go index f17ed2ed3..b389a63a0 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.8.0" +const Version = "0.8.1" From 2e24b64fc121dcdf1cabceab8dc2f7257675483c Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 5 Apr 2018 03:12:21 -0700 Subject: [PATCH 420/515] Add IsEmpty; Publish 0.8.1 --- CHANGELOG.md | 1 + common/nil.go | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0da5ee204..b93066f96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ FEATURES: - [common] Error.Error() includes cause + - [common] IsEmpty() for 0 length ## 0.8.0 (develop branch) diff --git a/common/nil.go b/common/nil.go index c7617f083..31f75f008 100644 --- a/common/nil.go +++ b/common/nil.go @@ -16,3 +16,14 @@ func IsTypedNil(o interface{}) bool { return false } } + +// Returns true if it has zero length. +func IsEmpty(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return rv.Len() == 0 + default: + return false + } +} From 2861f795f5c9acf32fba52c1004467ee86e5c202 Mon Sep 17 00:00:00 2001 From: mossid Date: Thu, 5 Apr 2018 21:30:15 +0200 Subject: [PATCH 421/515] add SimpleProofsFromMap --- merkle/simple_proof.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go index c81ed674a..7eb3a77e6 100644 --- a/merkle/simple_proof.go +++ b/merkle/simple_proof.go @@ -22,6 +22,20 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP return } +func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*SimpleProof) { + sm := NewSimpleMap() + for k, v := range m { + sm.Set(k, v) + } + sm.Sort() + kvs := sm.kvs + kvsH := make([]Hasher, 0, len(kvs)) + for _, kvp := range kvs { + kvsH = append(kvsH, kvPair(kvp)) + } + return SimpleProofsFromHashers(kvsH) +} + // Verify that leafHash is a leaf hash of the simple-merkle-tree // which hashes to rootHash. func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { From 50c521e706e8e2e99706990f320bcf3167e7f792 Mon Sep 17 00:00:00 2001 From: mossid Date: Thu, 5 Apr 2018 21:56:29 +0200 Subject: [PATCH 422/515] expose KVPair --- merkle/simple_map.go | 6 +++--- merkle/simple_proof.go | 2 +- merkle/simple_tree_test.go | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/merkle/simple_map.go b/merkle/simple_map.go index b59e3b4b6..cd38de761 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -60,9 +60,9 @@ func (sm *SimpleMap) KVPairs() cmn.KVPairs { //---------------------------------------- // A local extension to KVPair that can be hashed. -type kvPair cmn.KVPair +type KVPair cmn.KVPair -func (kv kvPair) Hash() []byte { +func (kv KVPair) Hash() []byte { hasher := ripemd160.New() err := encodeByteSlice(hasher, kv.Key) if err != nil { @@ -78,7 +78,7 @@ func (kv kvPair) Hash() []byte { func hashKVPairs(kvs cmn.KVPairs) []byte { kvsH := make([]Hasher, 0, len(kvs)) for _, kvp := range kvs { - kvsH = append(kvsH, kvPair(kvp)) + kvsH = append(kvsH, KVPair(kvp)) } return SimpleHashFromHashers(kvsH) } diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go index 7eb3a77e6..ca6ccf372 100644 --- a/merkle/simple_proof.go +++ b/merkle/simple_proof.go @@ -31,7 +31,7 @@ func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*Simple kvs := sm.kvs kvsH := make([]Hasher, 0, len(kvs)) for _, kvp := range kvs { - kvsH = append(kvsH, kvPair(kvp)) + kvsH = append(kvsH, KVPair(kvp)) } return SimpleProofsFromHashers(kvsH) } diff --git a/merkle/simple_tree_test.go b/merkle/simple_tree_test.go index 26f35c807..8c4ed01f8 100644 --- a/merkle/simple_tree_test.go +++ b/merkle/simple_tree_test.go @@ -3,7 +3,7 @@ package merkle import ( "bytes" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" . "github.com/tendermint/tmlibs/test" "testing" @@ -21,7 +21,7 @@ func TestSimpleProof(t *testing.T) { items := make([]Hasher, total) for i := 0; i < total; i++ { - items[i] = testItem(RandBytes(32)) + items[i] = testItem(cmn.RandBytes(32)) } rootHash := SimpleHashFromHashers(items) @@ -53,7 +53,7 @@ func TestSimpleProof(t *testing.T) { // Trail too long should make it fail origAunts := proof.Aunts - proof.Aunts = append(proof.Aunts, RandBytes(32)) + proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) { ok = proof.Verify(i, total, itemHash, rootHash) if ok { From a807b5db579af9553a8078cd856289280f928b87 Mon Sep 17 00:00:00 2001 From: Sunny Aggarwal Date: Mon, 9 Apr 2018 12:51:24 +0200 Subject: [PATCH 423/515] added PrefixEndBytes (#186) * added PrefixToBytes * added test * added comment --- common/byteslice.go | 26 ++++++++++++++++++++++++++ common/byteslice_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 common/byteslice_test.go diff --git a/common/byteslice.go b/common/byteslice.go index ceaf06bd3..57b3a8a2b 100644 --- a/common/byteslice.go +++ b/common/byteslice.go @@ -45,3 +45,29 @@ func TrimmedString(b []byte) string { return string(bytes.TrimLeft(b, trimSet)) } + +// PrefixEndBytes returns the end byteslice for a noninclusive range +// that would include all byte slices for which the input is the prefix +func PrefixEndBytes(prefix []byte) []byte { + if prefix == nil { + return nil + } + + end := make([]byte, len(prefix)) + copy(end, prefix) + finished := false + + for !finished { + if end[len(end)-1] != byte(255) { + end[len(end)-1]++ + finished = true + } else { + end = end[:len(end)-1] + if len(end) == 0 { + end = nil + finished = true + } + } + } + return end +} diff --git a/common/byteslice_test.go b/common/byteslice_test.go new file mode 100644 index 000000000..98085d125 --- /dev/null +++ b/common/byteslice_test.go @@ -0,0 +1,28 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPrefixEndBytes(t *testing.T) { + assert := assert.New(t) + + var testCases = []struct { + prefix []byte + expected []byte + }{ + {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, + {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, + {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, + {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, + {[]byte{byte(255), byte(255), byte(255)}, nil}, + {nil, nil}, + } + + for _, test := range testCases { + end := PrefixEndBytes(test.prefix) + assert.Equal(test.expected, end) + } +} From 75345c204669096552c44907006de36f89f7783d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Corbi=C3=A8re?= Date: Mon, 9 Apr 2018 14:36:40 +0200 Subject: [PATCH 424/515] Use an interface for tags. (#195) * Use an interface for tags. * rename TagSet to TagMap. * add documentation to TagMap. --- pubsub/example_test.go | 2 +- pubsub/pubsub.go | 39 +++++++++++++++++++++++++++++++++----- pubsub/pubsub_test.go | 14 +++++++------- pubsub/query/empty.go | 4 +++- pubsub/query/empty_test.go | 9 +++++---- pubsub/query/query.go | 10 ++++++---- pubsub/query/query_test.go | 5 +++-- 7 files changed, 59 insertions(+), 24 deletions(-) diff --git a/pubsub/example_test.go b/pubsub/example_test.go index 3eda7d32d..71f1b9cd5 100644 --- a/pubsub/example_test.go +++ b/pubsub/example_test.go @@ -21,7 +21,7 @@ func TestExample(t *testing.T) { ch := make(chan interface{}, 1) err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Tombstone", map[string]interface{}{"abci.account.name": "John"}) + err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]interface{}{"abci.account.name": "John"})) require.NoError(t, err) assertReceive(t, "Tombstone", ch) } diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go index 90f6e4ae6..67f264ace 100644 --- a/pubsub/pubsub.go +++ b/pubsub/pubsub.go @@ -38,18 +38,30 @@ var ( ErrAlreadySubscribed = errors.New("already subscribed") ) +// TagMap is used to associate tags to a message. +// They can be queried by subscribers to choose messages they will received. +type TagMap interface { + // Get returns the value for a key, or nil if no value is present. + // The ok result indicates whether value was found in the tags. + Get(key string) (value interface{}, ok bool) + // Len returns the number of tags. + Len() int +} + +type tagMap map[string]interface{} + type cmd struct { op operation query Query ch chan<- interface{} clientID string msg interface{} - tags map[string]interface{} + tags TagMap } // Query defines an interface for a query to be used for subscribing. type Query interface { - Matches(tags map[string]interface{}) bool + Matches(tags TagMap) bool String() string } @@ -68,6 +80,23 @@ type Server struct { // Option sets a parameter for the server. type Option func(*Server) +// NewTagMap constructs a new immutable tag set from a map. +func NewTagMap(data map[string]interface{}) TagMap { + return tagMap(data) +} + +// Get returns the value for a key, or nil if no value is present. +// The ok result indicates whether value was found in the tags. +func (ts tagMap) Get(key string) (value interface{}, ok bool) { + value, ok = ts[key] + return +} + +// Len returns the number of tags. +func (ts tagMap) Len() int { + return len(ts) +} + // NewServer returns a new server. See the commentary on the Option functions // for a detailed description of how to configure buffering. If no options are // provided, the resulting server's queue is unbuffered. @@ -184,13 +213,13 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithTags(ctx, msg, make(map[string]interface{})) + return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]interface{}))) } // PublishWithTags publishes the given message with the set of tags. The set is // matched with clients queries. If there is a match, the message is sent to // the client. -func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags map[string]interface{}) error { +func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error { select { case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: return nil @@ -302,7 +331,7 @@ func (state *state) removeAll(clientID string) { delete(state.clients, clientID) } -func (state *state) send(msg interface{}, tags map[string]interface{}) { +func (state *state) send(msg interface{}, tags TagMap) { for q, clientToChannelMap := range state.queries { if q.Matches(tags) { for _, ch := range clientToChannelMap { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 2af7cea46..f853d163b 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -48,14 +48,14 @@ func TestDifferentClients(t *testing.T) { ch1 := make(chan interface{}, 1) err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Iceman", map[string]interface{}{"tm.events.type": "NewBlock"}) + err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"})) require.NoError(t, err) assertReceive(t, "Iceman", ch1) ch2 := make(chan interface{}, 1) err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Ultimo", map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) + err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"})) require.NoError(t, err) assertReceive(t, "Ultimo", ch1) assertReceive(t, "Ultimo", ch2) @@ -63,7 +63,7 @@ func TestDifferentClients(t *testing.T) { ch3 := make(chan interface{}, 1) err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Valeria Richards", map[string]interface{}{"tm.events.type": "NewRoundStep"}) + err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewRoundStep"})) require.NoError(t, err) assert.Zero(t, len(ch3)) } @@ -80,7 +80,7 @@ func TestClientSubscribesTwice(t *testing.T) { ch1 := make(chan interface{}, 1) err := s.Subscribe(ctx, clientID, q, ch1) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Goblin Queen", map[string]interface{}{"tm.events.type": "NewBlock"}) + err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"})) require.NoError(t, err) assertReceive(t, "Goblin Queen", ch1) @@ -88,7 +88,7 @@ func TestClientSubscribesTwice(t *testing.T) { err = s.Subscribe(ctx, clientID, q, ch2) require.Error(t, err) - err = s.PublishWithTags(ctx, "Spider-Man", map[string]interface{}{"tm.events.type": "NewBlock"}) + err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"})) require.NoError(t, err) assertReceive(t, "Spider-Man", ch1) } @@ -208,7 +208,7 @@ func benchmarkNClients(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i}) + s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i})) } } @@ -231,7 +231,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1}) + s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1})) } } diff --git a/pubsub/query/empty.go b/pubsub/query/empty.go index 2d60a8923..cefdace4a 100644 --- a/pubsub/query/empty.go +++ b/pubsub/query/empty.go @@ -1,11 +1,13 @@ package query +import "github.com/tendermint/tmlibs/pubsub" + // Empty query matches any set of tags. type Empty struct { } // Matches always returns true. -func (Empty) Matches(tags map[string]interface{}) bool { +func (Empty) Matches(tags pubsub.TagMap) bool { return true } diff --git a/pubsub/query/empty_test.go b/pubsub/query/empty_test.go index 663acb191..b5e8a3001 100644 --- a/pubsub/query/empty_test.go +++ b/pubsub/query/empty_test.go @@ -4,13 +4,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tmlibs/pubsub" "github.com/tendermint/tmlibs/pubsub/query" ) func TestEmptyQueryMatchesAnything(t *testing.T) { q := query.Empty{} - assert.True(t, q.Matches(map[string]interface{}{})) - assert.True(t, q.Matches(map[string]interface{}{"Asher": "Roth"})) - assert.True(t, q.Matches(map[string]interface{}{"Route": 66})) - assert.True(t, q.Matches(map[string]interface{}{"Route": 66, "Billy": "Blue"})) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Asher": "Roth"}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66, "Billy": "Blue"}))) } diff --git a/pubsub/query/query.go b/pubsub/query/query.go index 56f2829d2..84c3aa180 100644 --- a/pubsub/query/query.go +++ b/pubsub/query/query.go @@ -14,6 +14,8 @@ import ( "strconv" "strings" "time" + + "github.com/tendermint/tmlibs/pubsub" ) // Query holds the query string and the query parser. @@ -145,8 +147,8 @@ func (q *Query) Conditions() []Condition { // // For example, query "name=John" matches tags = {"name": "John"}. More // examples could be found in parser_test.go and query_test.go. -func (q *Query) Matches(tags map[string]interface{}) bool { - if len(tags) == 0 { +func (q *Query) Matches(tags pubsub.TagMap) bool { + if tags.Len() == 0 { return false } @@ -231,9 +233,9 @@ func (q *Query) Matches(tags map[string]interface{}) bool { // value from it to the operand using the operator. // // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } -func match(tag string, op Operator, operand reflect.Value, tags map[string]interface{}) bool { +func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool { // look up the tag from the query in tags - value, ok := tags[tag] + value, ok := tags.Get(tag) if !ok { return false } diff --git a/pubsub/query/query_test.go b/pubsub/query/query_test.go index b980a79c0..7d3ac6ba4 100644 --- a/pubsub/query/query_test.go +++ b/pubsub/query/query_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tmlibs/pubsub" "github.com/tendermint/tmlibs/pubsub/query" ) @@ -51,9 +52,9 @@ func TestMatches(t *testing.T) { } if tc.matches { - assert.True(t, q.Matches(tc.tags), "Query '%s' should match %v", tc.s, tc.tags) + assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags) } else { - assert.False(t, q.Matches(tc.tags), "Query '%s' should not match %v", tc.s, tc.tags) + assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags) } } } From 0f92a017377bb5fb243d77f2a3f53930895ee7e1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 9 Apr 2018 15:51:54 +0300 Subject: [PATCH 425/515] changelog dates --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b93066f96..7a55fb554 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,13 @@ # Changelog -## 0.8.1 (develop branch) +## 0.8.1 (April 5th, 2018) FEATURES: - [common] Error.Error() includes cause - [common] IsEmpty() for 0 length -## 0.8.0 (develop branch) +## 0.8.0 (April 4th, 2018) BREAKING: From e328006bfe961e2ddc0a8a5d7714476a68e5edf3 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 21 Apr 2018 04:25:45 -0700 Subject: [PATCH 426/515] Jae/fixprefixdb (#199) * Fix PrefixDB Iterator * PrefixDB Iterator/ReverseIterator fixes * Bump version 0.8.2 * Update CHANGELOG.md about DebugDB * Keep invalid source to be closed * Use prefixBatch instead of memBatch --- CHANGELOG.md | 11 +++ common/errors.go | 2 +- common/os_test.go | 4 +- db/common_test.go | 6 ++ db/debug_db.go | 60 +++++++----- db/mem_batch.go | 5 +- db/mem_db.go | 10 +- db/prefix_db.go | 212 +++++++++++++++++++++++++++++++------------ db/prefix_db_test.go | 111 +++++++++++++++++++++- db/util.go | 38 ++++++-- version/version.go | 2 +- 11 files changed, 359 insertions(+), 102 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a55fb554..a9509d5d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Changelog +## 0.8.2 (April 12th, 2018) + +FEATURES: + + - [db] DebugDB shows better colorized output + +BUG FIXES: + + - [db] PrefixDB Iterator/ReverseIterator fixes + - [db] DebugDB fixes + ## 0.8.1 (April 5th, 2018) FEATURES: diff --git a/common/errors.go b/common/errors.go index 1ee1fb349..5992b2346 100644 --- a/common/errors.go +++ b/common/errors.go @@ -178,7 +178,7 @@ func (err *cmnError) Format(s fmt.State, verb rune) { if s.Flag('#') { s.Write([]byte("--= Error =--\n")) // Write msg. - s.Write([]byte(fmt.Sprintf("Message: %#s\n", err.msg))) + s.Write([]byte(fmt.Sprintf("Message: %s\n", err.msg))) // Write cause. s.Write([]byte(fmt.Sprintf("Cause: %#v\n", err.cause))) // Write type. diff --git a/common/os_test.go b/common/os_test.go index 97ad672b5..973d68901 100644 --- a/common/os_test.go +++ b/common/os_test.go @@ -23,11 +23,11 @@ func TestWriteFileAtomic(t *testing.T) { } defer os.Remove(f.Name()) - if err := ioutil.WriteFile(f.Name(), old, 0664); err != nil { + if err = ioutil.WriteFile(f.Name(), old, 0664); err != nil { t.Fatal(err) } - if err := WriteFileAtomic(f.Name(), data, perm); err != nil { + if err = WriteFileAtomic(f.Name(), data, perm); err != nil { t.Fatal(err) } diff --git a/db/common_test.go b/db/common_test.go index 1d8d52c5f..6af6e15e6 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -33,6 +33,12 @@ func checkNextPanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") } +func checkDomain(t *testing.T, itr Iterator, start, end []byte) { + ds, de := itr.Domain() + assert.Equal(t, start, ds, "checkDomain domain start incorrect") + assert.Equal(t, end, de, "checkDomain domain end incorrect") +} + func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { k, v := itr.Key(), itr.Value() assert.Exactly(t, key, k) diff --git a/db/debug_db.go b/db/debug_db.go index 7a15bc294..7666ed9fd 100644 --- a/db/debug_db.go +++ b/db/debug_db.go @@ -3,8 +3,14 @@ package db import ( "fmt" "sync" + + cmn "github.com/tendermint/tmlibs/common" ) +func _fmt(f string, az ...interface{}) string { + return fmt.Sprintf(f, az...) +} + //---------------------------------------- // debugDB @@ -26,78 +32,84 @@ func (ddb debugDB) Mutex() *sync.Mutex { return nil } // Implements DB. func (ddb debugDB) Get(key []byte) (value []byte) { - defer fmt.Printf("%v.Get(%X) %X\n", ddb.label, key, value) + defer func() { + fmt.Printf("%v.Get(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Blue(_fmt("%X", value))) + }() value = ddb.db.Get(key) return } // Implements DB. func (ddb debugDB) Has(key []byte) (has bool) { - defer fmt.Printf("%v.Has(%X) %v\n", ddb.label, key, has) + defer func() { + fmt.Printf("%v.Has(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), has) + }() return ddb.db.Has(key) } // Implements DB. func (ddb debugDB) Set(key []byte, value []byte) { - fmt.Printf("%v.Set(%X, %X)\n", ddb.label, key, value) + fmt.Printf("%v.Set(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) ddb.db.Set(key, value) } // Implements DB. func (ddb debugDB) SetSync(key []byte, value []byte) { - fmt.Printf("%v.SetSync(%X, %X)\n", ddb.label, key, value) + fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) ddb.db.SetSync(key, value) } // Implements atomicSetDeleter. func (ddb debugDB) SetNoLock(key []byte, value []byte) { - fmt.Printf("%v.SetNoLock(%X, %X)\n", ddb.label, key, value) - ddb.db.Set(key, value) + fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) + ddb.db.(atomicSetDeleter).SetNoLock(key, value) } // Implements atomicSetDeleter. func (ddb debugDB) SetNoLockSync(key []byte, value []byte) { - fmt.Printf("%v.SetNoLockSync(%X, %X)\n", ddb.label, key, value) - ddb.db.SetSync(key, value) + fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) + ddb.db.(atomicSetDeleter).SetNoLockSync(key, value) } // Implements DB. func (ddb debugDB) Delete(key []byte) { - fmt.Printf("%v.Delete(%X)\n", ddb.label, key) + fmt.Printf("%v.Delete(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) ddb.db.Delete(key) } // Implements DB. func (ddb debugDB) DeleteSync(key []byte) { - fmt.Printf("%v.DeleteSync(%X)\n", ddb.label, key) + fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) ddb.db.DeleteSync(key) } // Implements atomicSetDeleter. func (ddb debugDB) DeleteNoLock(key []byte) { - fmt.Printf("%v.DeleteNoLock(%X)\n", ddb.label, key) - ddb.db.Delete(key) + fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) + ddb.db.(atomicSetDeleter).DeleteNoLock(key) } // Implements atomicSetDeleter. func (ddb debugDB) DeleteNoLockSync(key []byte) { - fmt.Printf("%v.DeleteNoLockSync(%X)\n", ddb.label, key) - ddb.db.DeleteSync(key) + fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) + ddb.db.(atomicSetDeleter).DeleteNoLockSync(key) } // Implements DB. func (ddb debugDB) Iterator(start, end []byte) Iterator { - fmt.Printf("%v.Iterator(%X, %X)\n", ddb.label, start, end) + fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end))) return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end)) } // Implements DB. func (ddb debugDB) ReverseIterator(start, end []byte) Iterator { - fmt.Printf("%v.ReverseIterator(%X, %X)\n", ddb.label, start, end) + fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end))) return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end)) } // Implements DB. +// Panics if the underlying db is not an +// atomicSetDeleter. func (ddb debugDB) NewBatch() Batch { fmt.Printf("%v.NewBatch()\n", ddb.label) return NewDebugBatch(ddb.label, ddb.db.NewBatch()) @@ -137,14 +149,18 @@ func NewDebugIterator(label string, itr Iterator) debugIterator { // Implements Iterator. func (ditr debugIterator) Domain() (start []byte, end []byte) { - defer fmt.Printf("%v.itr.Domain() (%X,%X)\n", ditr.label, start, end) + defer func() { + fmt.Printf("%v.itr.Domain() (%X,%X)\n", ditr.label, start, end) + }() start, end = ditr.itr.Domain() return } // Implements Iterator. func (ditr debugIterator) Valid() (ok bool) { - defer fmt.Printf("%v.itr.Valid() %v\n", ditr.label, ok) + defer func() { + fmt.Printf("%v.itr.Valid() %v\n", ditr.label, ok) + }() ok = ditr.itr.Valid() return } @@ -157,14 +173,14 @@ func (ditr debugIterator) Next() { // Implements Iterator. func (ditr debugIterator) Key() (key []byte) { - fmt.Printf("%v.itr.Key() %X\n", ditr.label, key) + fmt.Printf("%v.itr.Key() %v\n", ditr.label, cmn.Cyan(_fmt("%X", key))) key = ditr.itr.Key() return } // Implements Iterator. func (ditr debugIterator) Value() (value []byte) { - fmt.Printf("%v.itr.Value() %X\n", ditr.label, value) + fmt.Printf("%v.itr.Value() %v\n", ditr.label, cmn.Blue(_fmt("%X", value))) value = ditr.itr.Value() return } @@ -193,13 +209,13 @@ func NewDebugBatch(label string, bch Batch) debugBatch { // Implements Batch. func (dbch debugBatch) Set(key, value []byte) { - fmt.Printf("%v.batch.Set(%X, %X)\n", dbch.label, key, value) + fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) dbch.bch.Set(key, value) } // Implements Batch. func (dbch debugBatch) Delete(key []byte) { - fmt.Printf("%v.batch.Delete(%X)\n", dbch.label, key) + fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, cmn.Red(_fmt("%X", key))) dbch.bch.Delete(key) } diff --git a/db/mem_batch.go b/db/mem_batch.go index 81a63d62b..5c5d0c13a 100644 --- a/db/mem_batch.go +++ b/db/mem_batch.go @@ -1,6 +1,8 @@ package db -import "sync" +import ( + "sync" +) type atomicSetDeleter interface { Mutex() *sync.Mutex @@ -66,6 +68,5 @@ func (mBatch *memBatch) write(doSync bool) { case opTypeDelete: mBatch.db.DeleteNoLock(op.key) } - } } diff --git a/db/mem_db.go b/db/mem_db.go index 2d802947c..1521f87ac 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -37,7 +37,8 @@ func (db *MemDB) Get(key []byte) []byte { defer db.mtx.Unlock() key = nonNilBytes(key) - return db.db[string(key)] + value := db.db[string(key)] + return value } // Implements DB. @@ -162,7 +163,7 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { db.mtx.Lock() defer db.mtx.Unlock() - keys := db.getSortedKeys(end, start, true) + keys := db.getSortedKeys(start, end, true) return newMemDBIterator(db, keys, start, end) } @@ -236,7 +237,8 @@ func (itr *memDBIterator) assertIsValid() { func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { keys := []string{} for key := range db.db { - if IsKeyInDomain([]byte(key), start, end, false) { + inDomain := IsKeyInDomain([]byte(key), start, end, reverse) + if inDomain { keys = append(keys, key) } } @@ -244,7 +246,9 @@ func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { if reverse { nkeys := len(keys) for i := 0; i < nkeys/2; i++ { + temp := keys[i] keys[i] = keys[nkeys-i-1] + keys[nkeys-i-1] = temp } } return keys diff --git a/db/prefix_db.go b/db/prefix_db.go index 4381ce070..5bb53ebd9 100644 --- a/db/prefix_db.go +++ b/db/prefix_db.go @@ -24,7 +24,8 @@ func IteratePrefix(db DB, prefix []byte) Iterator { TODO: Make test, maybe rename. // Like IteratePrefix but the iterator strips the prefix from the keys. func IteratePrefixStripped(db DB, prefix []byte) Iterator { - return newUnprefixIterator(prefix, IteratePrefix(db, prefix)) + start, end := ... + return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix)) } */ @@ -55,7 +56,9 @@ func (pdb *prefixDB) Get(key []byte) []byte { pdb.mtx.Lock() defer pdb.mtx.Unlock() - return pdb.db.Get(pdb.prefixed(key)) + pkey := pdb.prefixed(key) + value := pdb.db.Get(pkey) + return value } // Implements DB. @@ -71,7 +74,8 @@ func (pdb *prefixDB) Set(key []byte, value []byte) { pdb.mtx.Lock() defer pdb.mtx.Unlock() - pdb.db.Set(pdb.prefixed(key), value) + pkey := pdb.prefixed(key) + pdb.db.Set(pkey, value) } // Implements DB. @@ -82,16 +86,6 @@ func (pdb *prefixDB) SetSync(key []byte, value []byte) { pdb.db.SetSync(pdb.prefixed(key), value) } -// Implements atomicSetDeleter. -func (pdb *prefixDB) SetNoLock(key []byte, value []byte) { - pdb.db.Set(pdb.prefixed(key), value) -} - -// Implements atomicSetDeleter. -func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) { - pdb.db.SetSync(pdb.prefixed(key), value) -} - // Implements DB. func (pdb *prefixDB) Delete(key []byte) { pdb.mtx.Lock() @@ -108,28 +102,22 @@ func (pdb *prefixDB) DeleteSync(key []byte) { pdb.db.DeleteSync(pdb.prefixed(key)) } -// Implements atomicSetDeleter. -func (pdb *prefixDB) DeleteNoLock(key []byte) { - pdb.db.Delete(pdb.prefixed(key)) -} - -// Implements atomicSetDeleter. -func (pdb *prefixDB) DeleteNoLockSync(key []byte) { - pdb.db.DeleteSync(pdb.prefixed(key)) -} - // Implements DB. func (pdb *prefixDB) Iterator(start, end []byte) Iterator { pdb.mtx.Lock() defer pdb.mtx.Unlock() - pstart := append(pdb.prefix, start...) - pend := []byte(nil) - if end != nil { - pend = append(pdb.prefix, end...) + var pstart, pend []byte + pstart = append(cp(pdb.prefix), start...) + if end == nil { + pend = cpIncr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) } - return newUnprefixIterator( + return newPrefixIterator( pdb.prefix, + start, + end, pdb.db.Iterator( pstart, pend, @@ -142,31 +130,68 @@ func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator { pdb.mtx.Lock() defer pdb.mtx.Unlock() - pstart := []byte(nil) - if start != nil { - pstart = append(pdb.prefix, start...) + var pstart, pend []byte + if start == nil { + // This may cause the underlying iterator to start with + // an item which doesn't start with prefix. We will skip + // that item later in this function. See 'skipOne'. + pstart = cpIncr(pdb.prefix) + } else { + pstart = append(cp(pdb.prefix), start...) + } + if end == nil { + // This may cause the underlying iterator to end with an + // item which doesn't start with prefix. The + // prefixIterator will terminate iteration + // automatically upon detecting this. + pend = cpDecr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) } - pend := []byte(nil) - if end != nil { - pend = append(pdb.prefix, end...) + ritr := pdb.db.ReverseIterator(pstart, pend) + if start == nil { + skipOne(ritr, cpIncr(pdb.prefix)) } - return newUnprefixIterator( + return newPrefixIterator( pdb.prefix, - pdb.db.ReverseIterator( - pstart, - pend, - ), + start, + end, + ritr, ) } // Implements DB. +// Panics if the underlying DB is not an +// atomicSetDeleter. func (pdb *prefixDB) NewBatch() Batch { pdb.mtx.Lock() defer pdb.mtx.Unlock() - return &memBatch{pdb, nil} + return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) +} + +/* NOTE: Uncomment to use memBatch instead of prefixBatch +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLock(key []byte, value []byte) { + pdb.db.(atomicSetDeleter).SetNoLock(pdb.prefixed(key), value) } +// Implements atomicSetDeleter. +func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) { + pdb.db.(atomicSetDeleter).SetNoLockSync(pdb.prefixed(key), value) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLock(key []byte) { + pdb.db.(atomicSetDeleter).DeleteNoLock(pdb.prefixed(key)) +} + +// Implements atomicSetDeleter. +func (pdb *prefixDB) DeleteNoLockSync(key []byte) { + pdb.db.(atomicSetDeleter).DeleteNoLockSync(pdb.prefixed(key)) +} +*/ + // Implements DB. func (pdb *prefixDB) Close() { pdb.mtx.Lock() @@ -201,52 +226,109 @@ func (pdb *prefixDB) Stats() map[string]string { } func (pdb *prefixDB) prefixed(key []byte) []byte { - return append(pdb.prefix, key...) + return append(cp(pdb.prefix), key...) } //---------------------------------------- +// prefixBatch -// Strips prefix while iterating from Iterator. -type unprefixIterator struct { +type prefixBatch struct { prefix []byte - source Iterator + source Batch } -func newUnprefixIterator(prefix []byte, source Iterator) unprefixIterator { - return unprefixIterator{ +func newPrefixBatch(prefix []byte, source Batch) prefixBatch { + return prefixBatch{ prefix: prefix, source: source, } } -func (itr unprefixIterator) Domain() (start []byte, end []byte) { - start, end = itr.source.Domain() - if len(start) > 0 { - start = stripPrefix(start, itr.prefix) - } - if len(end) > 0 { - end = stripPrefix(end, itr.prefix) +func (pb prefixBatch) Set(key, value []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Set(pkey, value) +} + +func (pb prefixBatch) Delete(key []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Delete(pkey) +} + +func (pb prefixBatch) Write() { + pb.source.Write() +} + +func (pb prefixBatch) WriteSync() { + pb.source.WriteSync() +} + +//---------------------------------------- +// prefixIterator + +// Strips prefix while iterating from Iterator. +type prefixIterator struct { + prefix []byte + start []byte + end []byte + source Iterator + valid bool +} + +func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterator { + if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { + return prefixIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: false, + } + } else { + return prefixIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: true, + } } - return } -func (itr unprefixIterator) Valid() bool { - return itr.source.Valid() +func (itr prefixIterator) Domain() (start []byte, end []byte) { + return itr.start, itr.end } -func (itr unprefixIterator) Next() { +func (itr prefixIterator) Valid() bool { + return itr.valid && itr.source.Valid() +} + +func (itr prefixIterator) Next() { + if !itr.valid { + panic("prefixIterator invalid, cannot call Next()") + } itr.source.Next() + if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { + itr.source.Close() + itr.valid = false + return + } } -func (itr unprefixIterator) Key() (key []byte) { +func (itr prefixIterator) Key() (key []byte) { + if !itr.valid { + panic("prefixIterator invalid, cannot call Key()") + } return stripPrefix(itr.source.Key(), itr.prefix) } -func (itr unprefixIterator) Value() (value []byte) { +func (itr prefixIterator) Value() (value []byte) { + if !itr.valid { + panic("prefixIterator invalid, cannot call Value()") + } return itr.source.Value() } -func (itr unprefixIterator) Close() { +func (itr prefixIterator) Close() { itr.source.Close() } @@ -261,3 +343,13 @@ func stripPrefix(key []byte, prefix []byte) (stripped []byte) { } return key[len(prefix):] } + +// If the first iterator item is skipKey, then +// skip it. +func skipOne(itr Iterator, skipKey []byte) { + if itr.Valid() { + if bytes.Equal(itr.Key(), skipKey) { + itr.Next() + } + } +} diff --git a/db/prefix_db_test.go b/db/prefix_db_test.go index fd44a7ec8..60809f157 100644 --- a/db/prefix_db_test.go +++ b/db/prefix_db_test.go @@ -2,7 +2,7 @@ package db import "testing" -func TestIteratePrefix(t *testing.T) { +func mockDBWithStuff() DB { db := NewMemDB() // Under "key" prefix db.Set(bz("key"), bz("value")) @@ -14,10 +14,13 @@ func TestIteratePrefix(t *testing.T) { db.Set(bz("k"), bz("val")) db.Set(bz("ke"), bz("valu")) db.Set(bz("kee"), bz("valuu")) - xitr := db.Iterator(nil, nil) - xitr.Key() + return db +} +func TestPrefixDBSimple(t *testing.T) { + db := mockDBWithStuff() pdb := NewPrefixDB(db, bz("key")) + checkValue(t, pdb, bz("key"), nil) checkValue(t, pdb, bz(""), bz("value")) checkValue(t, pdb, bz("key1"), nil) @@ -30,9 +33,42 @@ func TestIteratePrefix(t *testing.T) { checkValue(t, pdb, bz("k"), nil) checkValue(t, pdb, bz("ke"), nil) checkValue(t, pdb, bz("kee"), nil) +} + +func TestPrefixDBIterator1(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) itr := pdb.Iterator(nil, nil) - itr.Key() + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator2(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator3(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) checkItem(t, itr, bz(""), bz("value")) checkNext(t, itr, true) checkItem(t, itr, bz("1"), bz("value1")) @@ -40,5 +76,72 @@ func TestIteratePrefix(t *testing.T) { checkItem(t, itr, bz("2"), bz("value2")) checkNext(t, itr, true) checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBIterator4(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.Iterator(bz(""), bz("")) + checkDomain(t, itr, bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator1(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(nil, nil) + checkDomain(t, itr, nil, nil) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator2(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(nil, bz("")) + checkDomain(t, itr, nil, bz("")) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator3(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator4(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz(""), bz("")) + checkInvalid(t, itr) itr.Close() } diff --git a/db/util.go b/db/util.go index 1ad5002d6..51277ac42 100644 --- a/db/util.go +++ b/db/util.go @@ -33,6 +33,29 @@ func cpIncr(bz []byte) (ret []byte) { return nil } +// Returns a slice of the same length (big endian) +// except decremented by one. +// Returns nil on underflow (e.g. if bz bytes are all 0x00) +// CONTRACT: len(bz) > 0 +func cpDecr(bz []byte) (ret []byte) { + if len(bz) == 0 { + panic("cpDecr expects non-zero bz length") + } + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] > byte(0x00) { + ret[i]-- + return + } + ret[i] = byte(0xFF) + if i == 0 { + // Underflow + return nil + } + } + return nil +} + // See DB interface documentation for more information. func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { if !isReverse { @@ -43,12 +66,13 @@ func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { return false } return true + } else { + if start != nil && bytes.Compare(start, key) < 0 { + return false + } + if end != nil && bytes.Compare(key, end) <= 0 { + return false + } + return true } - if start != nil && bytes.Compare(start, key) < 0 { - return false - } - if end != nil && bytes.Compare(key, end) <= 0 { - return false - } - return true } diff --git a/version/version.go b/version/version.go index b389a63a0..107f5cf3a 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.8.1" +const Version = "0.8.2" From 8fa4211bbd2304d6f5b1307a8f1de9ec26f16434 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 23 Apr 2018 00:07:03 -0700 Subject: [PATCH 427/515] Fixes TestParallelAbort nondeterministic failure #201 (#202) --- common/async.go | 47 +++++++++++++++++++++++++++++++++++--------- common/async_test.go | 4 ++++ 2 files changed, 42 insertions(+), 9 deletions(-) diff --git a/common/async.go b/common/async.go index 49714d95e..7be09a3c1 100644 --- a/common/async.go +++ b/common/async.go @@ -32,7 +32,7 @@ type TaskResultSet struct { func newTaskResultSet(chz []TaskResultCh) *TaskResultSet { return &TaskResultSet{ chz: chz, - results: nil, + results: make([]taskResultOK, len(chz)), } } @@ -49,18 +49,20 @@ func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) { } // NOTE: Not concurrency safe. +// Writes results to trs.results without waiting for all tasks to complete. func (trs *TaskResultSet) Reap() *TaskResultSet { - if trs.results == nil { - trs.results = make([]taskResultOK, len(trs.chz)) - } for i := 0; i < len(trs.results); i++ { var trch = trs.chz[i] select { - case result := <-trch: - // Overwrite result. - trs.results[i] = taskResultOK{ - TaskResult: result, - OK: true, + case result, ok := <-trch: + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. } default: // Do nothing. @@ -69,6 +71,27 @@ func (trs *TaskResultSet) Reap() *TaskResultSet { return trs } +// NOTE: Not concurrency safe. +// Like Reap() but waits until all tasks have returned or panic'd. +func (trs *TaskResultSet) Wait() *TaskResultSet { + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + select { + case result, ok := <-trch: + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. + } + } + } + return trs +} + // Returns the firstmost (by task index) error as // discovered by all previous Reap() calls. func (trs *TaskResultSet) FirstValue() interface{} { @@ -116,7 +139,11 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { defer func() { if pnk := recover(); pnk != nil { atomic.AddInt32(numPanics, 1) + // Send panic to taskResultCh. taskResultCh <- TaskResult{nil, ErrorWrap(pnk, "Panic in task")} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) + // Decrement waitgroup. taskDoneCh <- false } }() @@ -125,6 +152,8 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { // Send val/err to taskResultCh. // NOTE: Below this line, nothing must panic/ taskResultCh <- TaskResult{val, err} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) // Decrement waitgroup. taskDoneCh <- abort }(i, task, taskResultCh) diff --git a/common/async_test.go b/common/async_test.go index 9f060ca2d..037afcaaa 100644 --- a/common/async_test.go +++ b/common/async_test.go @@ -91,10 +91,14 @@ func TestParallelAbort(t *testing.T) { // Now let the last task (#3) complete after abort. flow4 <- <-flow3 + // Wait until all tasks have returned or panic'd. + taskResultSet.Wait() + // Verify task #0, #1, #2. checkResult(t, taskResultSet, 0, 0, nil, nil) checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) checkResult(t, taskResultSet, 2, 2, nil, nil) + checkResult(t, taskResultSet, 3, 3, nil, nil) } func TestParallelRecover(t *testing.T) { From d94e312673e16a11ea55d742cefb3e331228f898 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 23 Apr 2018 09:16:05 +0200 Subject: [PATCH 428/515] add MarshalJSON and UnmarshalJSON to BitArray (#200) See CHANGELOG --- CHANGELOG.md | 9 ++++- common/bit_array.go | 76 ++++++++++++++++++++++++++++++++++------ common/bit_array_test.go | 52 ++++++++++++++++++++++++++- common/string.go | 15 ++++++++ common/string_test.go | 15 ++++++++ 5 files changed, 155 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9509d5d3..b85072e60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,20 @@ # Changelog -## 0.8.2 (April 12th, 2018) +## 0.8.2 (April 23rd, 2018) FEATURES: + - [pubsub] TagMap, NewTagMap + - [merkle] SimpleProofsFromMap() + - [common] IsASCIIText() + - [common] PrefixEndBytes // e.g. increment or nil + - [common] BitArray.MarshalJSON/.UnmarshalJSON + - [common] BitArray uses 'x' not 'X' for String() and above. - [db] DebugDB shows better colorized output BUG FIXES: + - [common] Fix TestParallelAbort nondeterministic failure #201/#202 - [db] PrefixDB Iterator/ReverseIterator fixes - [db] DebugDB fixes diff --git a/common/bit_array.go b/common/bit_array.go index ea6a6ee1f..0290921a6 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -3,6 +3,7 @@ package common import ( "encoding/binary" "fmt" + "regexp" "strings" "sync" ) @@ -249,13 +250,14 @@ func (bA *BitArray) PickRandom() (int, bool) { return 0, false } +// String returns a string representation of BitArray: BA{}, +// where is a sequence of 'x' (1) and '_' (0). +// The includes spaces and newlines to help people. +// For a simple sequence of 'x' and '_' characters with no spaces or newlines, +// see the MarshalJSON() method. +// Example: "BA{_x_}" or "nil-BitArray" for nil. func (bA *BitArray) String() string { - if bA == nil { - return "nil-BitArray" - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - return bA.stringIndented("") + return bA.StringIndented("") } func (bA *BitArray) StringIndented(indent string) string { @@ -268,12 +270,11 @@ func (bA *BitArray) StringIndented(indent string) string { } func (bA *BitArray) stringIndented(indent string) string { - lines := []string{} bits := "" for i := 0; i < bA.Bits; i++ { if bA.getIndex(i) { - bits += "X" + bits += "x" } else { bits += "_" } @@ -282,10 +283,10 @@ func (bA *BitArray) stringIndented(indent string) string { bits = "" } if i%10 == 9 { - bits += " " + bits += indent } if i%50 == 49 { - bits += " " + bits += indent } } if len(bits) > 0 { @@ -320,3 +321,58 @@ func (bA *BitArray) Update(o *BitArray) { copy(bA.Elems, o.Elems) } + +// MarshalJSON implements json.Marshaler interface by marshaling bit array +// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. +func (bA *BitArray) MarshalJSON() ([]byte, error) { + if bA == nil { + return []byte("null"), nil + } + + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bits := `"` + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += `x` + } else { + bits += `_` + } + } + bits += `"` + return []byte(bits), nil +} + +var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) + +// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom +// JSON description. +func (bA *BitArray) UnmarshalJSON(bz []byte) error { + b := string(bz) + if b == "null" { + // This is required e.g. for encoding/json when decoding + // into a pointer with pre-allocated BitArray. + bA.Bits = 0 + bA.Elems = nil + return nil + } + + // Validate 'b'. + match := bitArrayJSONRegexp.FindStringSubmatch(b) + if match == nil { + return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) + } + bits := match[1] + + // Construct new BitArray and copy over. + numBits := len(bits) + bA2 := NewBitArray(numBits) + for i := 0; i < numBits; i++ { + if bits[i] == 'x' { + bA2.SetIndex(i, true) + } + } + *bA = *bA2 + return nil +} diff --git a/common/bit_array_test.go b/common/bit_array_test.go index fbc438cd1..c697ba5de 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -2,8 +2,10 @@ package common import ( "bytes" + "encoding/json" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -210,8 +212,56 @@ func TestUpdateNeverPanics(t *testing.T) { } func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { - bitList := []int{-127, -128, -1<<31} + bitList := []int{-127, -128, -1 << 31} for _, bits := range bitList { _ = NewBitArray(bits) } } + +func TestJSONMarshalUnmarshal(t *testing.T) { + + bA1 := NewBitArray(0) + + bA2 := NewBitArray(1) + + bA3 := NewBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + testCases := []struct { + bA *BitArray + marshalledBA string + }{ + {nil, `null`}, + {bA1, `null`}, + {bA2, `"_"`}, + {bA3, `"x"`}, + {bA4, `"xx___"`}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz, err := json.Marshal(tc.bA) + require.NoError(t, err) + + assert.Equal(t, tc.marshalledBA, string(bz)) + + var unmarshalledBA *BitArray + err = json.Unmarshal(bz, &unmarshalledBA) + require.NoError(t, err) + + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Bits, unmarshalledBA.Bits) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} diff --git a/common/string.go b/common/string.go index 0e2231e91..ccfa0cd3a 100644 --- a/common/string.go +++ b/common/string.go @@ -57,3 +57,18 @@ func SplitAndTrim(s, sep, cutset string) []string { } return spl } + +// Returns true if s is a non-empty printable non-tab ascii character. +func IsASCIIText(s string) bool { + if len(s) == 0 { + return false + } + for _, b := range []byte(s) { + if 32 <= b && b <= 126 { + // good + } else { + return false + } + } + return true +} diff --git a/common/string_test.go b/common/string_test.go index 82ba67844..fecf1dab7 100644 --- a/common/string_test.go +++ b/common/string_test.go @@ -49,3 +49,18 @@ func TestSplitAndTrim(t *testing.T) { assert.Equal(t, tc.expected, SplitAndTrim(tc.s, tc.sep, tc.cutset), "%s", tc.s) } } + +func TestIsASCIIText(t *testing.T) { + notASCIIText := []string{ + "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", + } + for _, v := range notASCIIText { + assert.False(t, IsHex(v), "%q is not ascii-text", v) + } + asciiText := []string{ + " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", + } + for _, v := range asciiText { + assert.True(t, IsASCIIText(v), "%q is ascii-text", v) + } +} From 9b2a8f07a353c7d9db9f89cd0bd62ddf1d52c5c4 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 23 Apr 2018 01:32:18 -0700 Subject: [PATCH 429/515] [common] Add ASCIITrim --- common/string.go | 15 +++++++++++++++ common/string_test.go | 8 ++++++++ 2 files changed, 23 insertions(+) diff --git a/common/string.go b/common/string.go index ccfa0cd3a..fac1be6c9 100644 --- a/common/string.go +++ b/common/string.go @@ -72,3 +72,18 @@ func IsASCIIText(s string) bool { } return true } + +// NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. +func ASCIITrim(s string) string { + r := make([]byte, 0, len(s)) + for _, b := range []byte(s) { + if b == 32 { + continue // skip space + } else if 32 < b && b <= 126 { + r = append(r, b) + } else { + panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) + } + } + return string(r) +} diff --git a/common/string_test.go b/common/string_test.go index fecf1dab7..5d1b68feb 100644 --- a/common/string_test.go +++ b/common/string_test.go @@ -64,3 +64,11 @@ func TestIsASCIIText(t *testing.T) { assert.True(t, IsASCIIText(v), "%q is ascii-text", v) } } + +func TestASCIITrim(t *testing.T) { + assert.Equal(t, ASCIITrim(" "), "") + assert.Equal(t, ASCIITrim(" a"), "a") + assert.Equal(t, ASCIITrim("a "), "a") + assert.Equal(t, ASCIITrim(" a "), "a") + assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) +} From cc5f287c4798ffe88c04d02df219ecb6932080fd Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 23 Apr 2018 02:36:49 -0700 Subject: [PATCH 430/515] Add developer branch 0.8.3 to CHANGELOG --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b85072e60..0d1cfceb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 0.8.3 (develop branch) + +FEATURES: + + - [common] ASCIITrim() + ## 0.8.2 (April 23rd, 2018) FEATURES: From 11bee6194aeff40a7b3d62123db1a7cbca2e81e7 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Mon, 12 Mar 2018 00:39:34 -0700 Subject: [PATCH 431/515] DB as a service Fixes https://github.com/tendermint/tendermint/issues/1162 Databases as a service! Can now access Databases as a remote service via gRPC for performance and easy deployment. The caveat is that each service is stateful in regards to the DB i.e. each unique service uses only one unique DB but nonetheless multiple clients can access it. A full standalone example ```go package main import ( "bytes" "context" "log" grpcdb "github.com/tendermint/tmlibs/grpcdb" protodb "github.com/tendermint/tmlibs/proto" ) func main() { addr := ":8998" go func() { if err := grpcdb.BindRemoteDBServer(addr); err != nil { log.Fatalf("BindRemoteDBServer: %v", err) } }() client, err := grpcdb.NewClient(addr, false) if err != nil { log.Fatalf("Failed to create grpcDB client: %v", err) } ctx := context.Background() // 1. Initialize the DB in := &protodb.Init{ Type: "leveldb", Name: "grpc-uno-test", Dir: ".", } if _, err := client.Init(ctx, in); err != nil { log.Fatalf("Init error: %v", err) } // 2. Now it can be used! query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")} if _, err := client.SetSync(ctx, query1); err != nil { log.Fatalf("SetSync err: %v", err) } query2 := &protodb.Entity{Key: []byte("Project")} read, err := client.Get(ctx, query2) if err != nil { log.Fatalf("Get err: %v", err) } if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) { log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w) } } ``` --- Makefile | 3 + grpcdb/client.go | 19 + grpcdb/example_test.go | 50 +++ grpcdb/server.go | 142 ++++++++ proto/defs.pb.go | 784 +++++++++++++++++++++++++++++++++++++++++ proto/defs.proto | 57 +++ 6 files changed, 1055 insertions(+) create mode 100644 grpcdb/client.go create mode 100644 grpcdb/example_test.go create mode 100644 grpcdb/server.go create mode 100644 proto/defs.pb.go create mode 100644 proto/defs.proto diff --git a/Makefile b/Makefile index 9e181f9f9..0236c480b 100644 --- a/Makefile +++ b/Makefile @@ -119,3 +119,6 @@ metalinter_all: # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html .PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all + +grpc_dbserver: + protoc -I proto/ proto/defs.proto --go_out=plugins=grpc:proto diff --git a/grpcdb/client.go b/grpcdb/client.go new file mode 100644 index 000000000..45409a1f9 --- /dev/null +++ b/grpcdb/client.go @@ -0,0 +1,19 @@ +package grpcdb + +import ( + "google.golang.org/grpc" + + protodb "github.com/tendermint/tmlibs/proto" +) + +func NewClient(serverAddr string, secure bool) (protodb.DBClient, error) { + var opts []grpc.DialOption + if !secure { + opts = append(opts, grpc.WithInsecure()) + } + cc, err := grpc.Dial(serverAddr, opts...) + if err != nil { + return nil, err + } + return protodb.NewDBClient(cc), nil +} diff --git a/grpcdb/example_test.go b/grpcdb/example_test.go new file mode 100644 index 000000000..653180113 --- /dev/null +++ b/grpcdb/example_test.go @@ -0,0 +1,50 @@ +package grpcdb_test + +import ( + "bytes" + "context" + "log" + + grpcdb "github.com/tendermint/tmlibs/grpcdb" + protodb "github.com/tendermint/tmlibs/proto" +) + +func Example() { + addr := ":8998" + go func() { + if err := grpcdb.BindRemoteDBServer(addr); err != nil { + log.Fatalf("BindRemoteDBServer: %v", err) + } + }() + + client, err := grpcdb.NewClient(addr, false) + if err != nil { + log.Fatalf("Failed to create grpcDB client: %v", err) + } + + ctx := context.Background() + // 1. Initialize the DB + in := &protodb.Init{ + Type: "leveldb", + Name: "grpc-uno-test", + Dir: ".", + } + if _, err := client.Init(ctx, in); err != nil { + log.Fatalf("Init error: %v", err) + } + + // 2. Now it can be used! + query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")} + if _, err := client.SetSync(ctx, query1); err != nil { + log.Fatalf("SetSync err: %v", err) + } + + query2 := &protodb.Entity{Key: []byte("Project")} + read, err := client.Get(ctx, query2) + if err != nil { + log.Fatalf("Get err: %v", err) + } + if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) { + log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w) + } +} diff --git a/grpcdb/server.go b/grpcdb/server.go new file mode 100644 index 000000000..26d0ffa9e --- /dev/null +++ b/grpcdb/server.go @@ -0,0 +1,142 @@ +package grpcdb + +import ( +"log" + "context" + "net" + "sync" + "time" + + "google.golang.org/grpc" + + "github.com/tendermint/tmlibs/db" + protodb "github.com/tendermint/tmlibs/proto" +) + +func BindRemoteDBServer(addr string, opts ...grpc.ServerOption) error { + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + srv := grpc.NewServer(opts...) + protodb.RegisterDBServer(srv, new(server)) + return srv.Serve(ln) +} + +type server struct { + mu sync.Mutex + db db.DB +} + +var _ protodb.DBServer = (*server)(nil) + +func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { + s.mu.Lock() + defer s.mu.Unlock() + +log.Printf("in: %+v\n", in) + s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) + return &protodb.Entity{TimeAt: time.Now().Unix()}, nil +} + +func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.Delete(in.Key) + return nothing, nil +} + +var nothing = new(protodb.Nothing) +func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.DeleteSync(in.Key) + return nothing, nil +} + +func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { + value := s.db.Get(in.Key) + return &protodb.Entity{Value: value}, nil +} + +func (s *server) GetStream(ds protodb.DB_GetStreamServer) error { + // Receive routine + responsesChan := make(chan *protodb.Entity) + go func() { + defer close(responsesChan) + ctx := context.Background() + for { + in, err := ds.Recv() + if err != nil { + responsesChan <- &protodb.Entity{Err: err.Error()} + return + } + out, err := s.Get(ctx, in) + if err != nil { + if out == nil { + out = new(protodb.Entity) + out.Key = in.Key + } + out.Err = err.Error() + responsesChan <- out + return + } + + // Otherwise continue on + responsesChan <- out + } + }() + + // Send routine, block until we return + for out := range responsesChan { + if err := ds.Send(out); err != nil { + return err + } + } + return nil +} + +func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { + exists := s.db.Has(in.Key) + return &protodb.Entity{Exists: exists}, nil +} + +func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.Set(in.Key, in.Value) + return nothing, nil +} + +func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { + s.db.SetSync(in.Key, in.Value) + return nothing, nil +} + +func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error { + it := s.db.Iterator(query.Start, query.End) + return s.handleIterator(it, dis.Send) +} + +func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error { + for it.Valid() { + start, end := it.Domain() + out := &protodb.Iterator{ + Domain: &protodb.DDomain{Start: start, End: end}, + Valid: it.Valid(), + Key: it.Key(), + Value: it.Value(), + } + if err := sendFunc(out); err != nil { + return err + } + + // Finally move the iterator forward + it.Next() + } + return nil +} + +func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error { + it := s.db.ReverseIterator(query.Start, query.End) + return s.handleIterator(it, dis.Send) +} + +func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) { + stats := s.db.Stats() + return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil +} diff --git a/proto/defs.pb.go b/proto/defs.pb.go new file mode 100644 index 000000000..61f687504 --- /dev/null +++ b/proto/defs.pb.go @@ -0,0 +1,784 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: defs.proto + +/* +Package protodb is a generated protocol buffer package. + +It is generated from these files: + defs.proto + +It has these top-level messages: + Entity + Nothing + DDomain + Iterator + Stats + Init +*/ +package protodb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Entity struct { + Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` + Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` + Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` + Print string `protobuf:"bytes,8,opt,name=print" json:"print,omitempty"` + TimeAt int64 `protobuf:"varint,9,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Entity) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Entity) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *Entity) GetExists() bool { + if m != nil { + return m.Exists + } + return false +} + +func (m *Entity) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *Entity) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +func (m *Entity) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +func (m *Entity) GetPrint() string { + if m != nil { + return m.Print + } + return "" +} + +func (m *Entity) GetTimeAt() int64 { + if m != nil { + return m.TimeAt + } + return 0 +} + +type Nothing struct { +} + +func (m *Nothing) Reset() { *m = Nothing{} } +func (m *Nothing) String() string { return proto.CompactTextString(m) } +func (*Nothing) ProtoMessage() {} +func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type DDomain struct { + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (m *DDomain) Reset() { *m = DDomain{} } +func (m *DDomain) String() string { return proto.CompactTextString(m) } +func (*DDomain) ProtoMessage() {} +func (*DDomain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DDomain) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *DDomain) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +type Iterator struct { + Domain *DDomain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` + Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Iterator) Reset() { *m = Iterator{} } +func (m *Iterator) String() string { return proto.CompactTextString(m) } +func (*Iterator) ProtoMessage() {} +func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Iterator) GetDomain() *DDomain { + if m != nil { + return m.Domain + } + return nil +} + +func (m *Iterator) GetValid() bool { + if m != nil { + return m.Valid + } + return false +} + +func (m *Iterator) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Iterator) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type Stats struct { + Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` +} + +func (m *Stats) Reset() { *m = Stats{} } +func (m *Stats) String() string { return proto.CompactTextString(m) } +func (*Stats) ProtoMessage() {} +func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Stats) GetData() map[string]string { + if m != nil { + return m.Data + } + return nil +} + +func (m *Stats) GetTimeAt() int64 { + if m != nil { + return m.TimeAt + } + return 0 +} + +type Init struct { + Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` +} + +func (m *Init) Reset() { *m = Init{} } +func (m *Init) String() string { return proto.CompactTextString(m) } +func (*Init) ProtoMessage() {} +func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Init) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Init) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Init) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func init() { + proto.RegisterType((*Entity)(nil), "protodb.Entity") + proto.RegisterType((*Nothing)(nil), "protodb.Nothing") + proto.RegisterType((*DDomain)(nil), "protodb.DDomain") + proto.RegisterType((*Iterator)(nil), "protodb.Iterator") + proto.RegisterType((*Stats)(nil), "protodb.Stats") + proto.RegisterType((*Init)(nil), "protodb.Init") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DB service + +type DBClient interface { + Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) + Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) + GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) + Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) + Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) + Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) + ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) + // rpc print(Nothing) returns (Entity) {} + Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) +} + +type dBClient struct { + cc *grpc.ClientConn +} + +func NewDBClient(cc *grpc.ClientConn) DBClient { + return &dBClient{cc} +} + +func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...) + if err != nil { + return nil, err + } + x := &dBGetStreamClient{stream} + return x, nil +} + +type DB_GetStreamClient interface { + Send(*Entity) error + Recv() (*Entity, error) + grpc.ClientStream +} + +type dBGetStreamClient struct { + grpc.ClientStream +} + +func (x *dBGetStreamClient) Send(m *Entity) error { + return x.ClientStream.SendMsg(m) +} + +func (x *dBGetStreamClient) Recv() (*Entity, error) { + m := new(Entity) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { + out := new(Entity) + err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...) + if err != nil { + return nil, err + } + x := &dBIteratorClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DB_IteratorClient interface { + Recv() (*Iterator, error) + grpc.ClientStream +} + +type dBIteratorClient struct { + grpc.ClientStream +} + +func (x *dBIteratorClient) Recv() (*Iterator, error) { + m := new(Iterator) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) { + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...) + if err != nil { + return nil, err + } + x := &dBReverseIteratorClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DB_ReverseIteratorClient interface { + Recv() (*Iterator, error) + grpc.ClientStream +} + +type dBReverseIteratorClient struct { + grpc.ClientStream +} + +func (x *dBReverseIteratorClient) Recv() (*Iterator, error) { + m := new(Iterator) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) { + out := new(Stats) + err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DB service + +type DBServer interface { + Init(context.Context, *Init) (*Entity, error) + Get(context.Context, *Entity) (*Entity, error) + GetStream(DB_GetStreamServer) error + Has(context.Context, *Entity) (*Entity, error) + Set(context.Context, *Entity) (*Nothing, error) + SetSync(context.Context, *Entity) (*Nothing, error) + Delete(context.Context, *Entity) (*Nothing, error) + DeleteSync(context.Context, *Entity) (*Nothing, error) + Iterator(*Entity, DB_IteratorServer) error + ReverseIterator(*Entity, DB_ReverseIteratorServer) error + // rpc print(Nothing) returns (Entity) {} + Stats(context.Context, *Nothing) (*Stats, error) +} + +func RegisterDBServer(s *grpc.Server, srv DBServer) { + s.RegisterService(&_DB_serviceDesc, srv) +} + +func _DB_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Init) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Init(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Init", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Init(ctx, req.(*Init)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Get(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(DBServer).GetStream(&dBGetStreamServer{stream}) +} + +type DB_GetStreamServer interface { + Send(*Entity) error + Recv() (*Entity, error) + grpc.ServerStream +} + +type dBGetStreamServer struct { + grpc.ServerStream +} + +func (x *dBGetStreamServer) Send(m *Entity) error { + return x.ServerStream.SendMsg(m) +} + +func (x *dBGetStreamServer) Recv() (*Entity, error) { + m := new(Entity) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _DB_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Has(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Has", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Has(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Set(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_SetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).SetSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/SetSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).SetSync(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Delete(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Entity) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).DeleteSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/DeleteSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).DeleteSync(ctx, req.(*Entity)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Iterator_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Entity) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DBServer).Iterator(m, &dBIteratorServer{stream}) +} + +type DB_IteratorServer interface { + Send(*Iterator) error + grpc.ServerStream +} + +type dBIteratorServer struct { + grpc.ServerStream +} + +func (x *dBIteratorServer) Send(m *Iterator) error { + return x.ServerStream.SendMsg(m) +} + +func _DB_ReverseIterator_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Entity) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DBServer).ReverseIterator(m, &dBReverseIteratorServer{stream}) +} + +type DB_ReverseIteratorServer interface { + Send(*Iterator) error + grpc.ServerStream +} + +type dBReverseIteratorServer struct { + grpc.ServerStream +} + +func (x *dBReverseIteratorServer) Send(m *Iterator) error { + return x.ServerStream.SendMsg(m) +} + +func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Nothing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Stats(ctx, req.(*Nothing)) + } + return interceptor(ctx, in, info, handler) +} + +var _DB_serviceDesc = grpc.ServiceDesc{ + ServiceName: "protodb.DB", + HandlerType: (*DBServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "init", + Handler: _DB_Init_Handler, + }, + { + MethodName: "get", + Handler: _DB_Get_Handler, + }, + { + MethodName: "has", + Handler: _DB_Has_Handler, + }, + { + MethodName: "set", + Handler: _DB_Set_Handler, + }, + { + MethodName: "setSync", + Handler: _DB_SetSync_Handler, + }, + { + MethodName: "delete", + Handler: _DB_Delete_Handler, + }, + { + MethodName: "deleteSync", + Handler: _DB_DeleteSync_Handler, + }, + { + MethodName: "stats", + Handler: _DB_Stats_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "getStream", + Handler: _DB_GetStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "iterator", + Handler: _DB_Iterator_Handler, + ServerStreams: true, + }, + { + StreamName: "reverseIterator", + Handler: _DB_ReverseIterator_Handler, + ServerStreams: true, + }, + }, + Metadata: "defs.proto", +} + +func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x72, 0xd3, 0x4c, + 0x10, 0xf4, 0x4a, 0xb2, 0x64, 0x4d, 0xbe, 0x2f, 0x09, 0x5b, 0x14, 0x6c, 0xf9, 0xa4, 0xd2, 0x49, + 0xfc, 0xb9, 0x12, 0xe7, 0xc0, 0xcf, 0x09, 0x28, 0xe7, 0xe0, 0x4b, 0x0e, 0x32, 0x77, 0x6a, 0x83, + 0x06, 0x67, 0x8b, 0x58, 0x72, 0xed, 0x0e, 0x29, 0xf4, 0x04, 0x3c, 0x00, 0x4f, 0xc4, 0x9b, 0x51, + 0xbb, 0xfa, 0xb1, 0x43, 0x7c, 0x10, 0x27, 0x4d, 0xef, 0x76, 0xf7, 0x8c, 0x5a, 0x23, 0x80, 0x02, + 0xbf, 0x9a, 0xd9, 0x56, 0x57, 0x54, 0xf1, 0xc8, 0x3d, 0x8a, 0xeb, 0xf4, 0x37, 0x83, 0xf0, 0xb2, + 0x24, 0x45, 0x35, 0x3f, 0x06, 0x4f, 0x15, 0x82, 0x25, 0x2c, 0x1b, 0xe7, 0x9e, 0x2a, 0xf8, 0x29, + 0xf8, 0xdf, 0xb0, 0x16, 0x5e, 0xc2, 0xb2, 0xff, 0x72, 0x5b, 0xf2, 0xc7, 0x30, 0xbe, 0x93, 0xb7, + 0xdf, 0x51, 0xf8, 0xee, 0xac, 0x01, 0xfc, 0x09, 0x84, 0xf8, 0x43, 0x19, 0x32, 0x22, 0x48, 0x58, + 0x36, 0xc9, 0x5b, 0x64, 0xd9, 0x86, 0xa4, 0x26, 0x31, 0x6e, 0xd8, 0x0e, 0x58, 0x57, 0x2c, 0x0b, + 0x11, 0x36, 0xae, 0x58, 0xba, 0x3e, 0xa8, 0xb5, 0x88, 0x12, 0x96, 0xc5, 0xb9, 0x2d, 0xad, 0x72, + 0xab, 0x55, 0x49, 0x62, 0xe2, 0xce, 0x1a, 0xc0, 0x9f, 0x42, 0x44, 0x6a, 0x83, 0x9f, 0x25, 0x89, + 0x38, 0x61, 0x99, 0x9f, 0x87, 0x16, 0x7e, 0xa0, 0x34, 0x86, 0xe8, 0xaa, 0xa2, 0x1b, 0x55, 0xae, + 0xd3, 0x73, 0x88, 0x16, 0x8b, 0x6a, 0x23, 0x55, 0xb9, 0x6b, 0xcf, 0x0e, 0xb4, 0xf7, 0xfa, 0xf6, + 0xa9, 0x86, 0xc9, 0x92, 0x50, 0x4b, 0xaa, 0x34, 0xcf, 0x20, 0x2c, 0x9c, 0xda, 0x89, 0x8e, 0xe6, + 0xa7, 0xb3, 0x36, 0xa7, 0x59, 0xeb, 0x9a, 0xb7, 0xf7, 0x6d, 0x14, 0xaa, 0x71, 0x9a, 0xe4, 0x0d, + 0xe8, 0x22, 0xf3, 0x0f, 0x44, 0x16, 0xec, 0x45, 0x96, 0xfe, 0x64, 0x30, 0x5e, 0x91, 0x24, 0xc3, + 0x5f, 0x42, 0x50, 0x48, 0x92, 0x82, 0x25, 0x7e, 0x76, 0x34, 0x17, 0x7d, 0x3f, 0x77, 0x3b, 0x5b, + 0x48, 0x92, 0x97, 0x25, 0xe9, 0x3a, 0x77, 0xac, 0xfd, 0x08, 0xbc, 0xfd, 0x08, 0xa6, 0xaf, 0x21, + 0xee, 0xb9, 0xdd, 0x14, 0xac, 0x09, 0xf4, 0xde, 0x14, 0x5e, 0x13, 0xa8, 0x03, 0xef, 0xbc, 0x37, + 0x2c, 0x7d, 0x0f, 0xc1, 0xb2, 0x54, 0xc4, 0x39, 0x04, 0x9f, 0xea, 0x2d, 0xb6, 0x22, 0x57, 0xdb, + 0xb3, 0x2b, 0xb9, 0xe9, 0x44, 0xae, 0xb6, 0xde, 0x0b, 0xa5, 0xdd, 0x1b, 0xc6, 0xb9, 0x2d, 0xe7, + 0xbf, 0x02, 0xf0, 0x16, 0x1f, 0x79, 0x06, 0x81, 0xb2, 0x46, 0xff, 0xf7, 0xaf, 0x60, 0x7d, 0xa7, + 0x27, 0x3d, 0x6c, 0xb6, 0x2c, 0x1d, 0xf1, 0x67, 0xe0, 0xaf, 0x91, 0xf8, 0xdf, 0x37, 0x87, 0xa8, + 0x17, 0x10, 0xaf, 0x91, 0x56, 0xa4, 0x51, 0x6e, 0x86, 0x08, 0x32, 0x76, 0xc6, 0xac, 0xff, 0x8d, + 0x34, 0x83, 0xfc, 0x9f, 0x83, 0x6f, 0x0e, 0x8d, 0xb2, 0xfb, 0xee, 0xdd, 0x62, 0x8d, 0xf8, 0x0c, + 0x22, 0x83, 0xb4, 0xaa, 0xcb, 0x2f, 0xc3, 0xf8, 0xaf, 0x20, 0x2c, 0xf0, 0x16, 0x09, 0x87, 0xd1, + 0xcf, 0xed, 0xff, 0x69, 0xe9, 0xc3, 0x3b, 0xcc, 0x61, 0xa2, 0xba, 0xcd, 0x7d, 0x20, 0x78, 0xb4, + 0xfb, 0x0e, 0x2d, 0x27, 0x1d, 0x9d, 0x31, 0xfe, 0x16, 0x4e, 0x34, 0xde, 0xa1, 0x36, 0xb8, 0xfc, + 0x57, 0xe9, 0x0b, 0xf7, 0x43, 0x91, 0xe1, 0x0f, 0x66, 0x99, 0x1e, 0xdf, 0xdf, 0xdb, 0x74, 0x74, + 0x1d, 0xba, 0x83, 0x8b, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x2e, 0x77, 0x07, 0x75, 0x04, + 0x00, 0x00, +} diff --git a/proto/defs.proto b/proto/defs.proto new file mode 100644 index 000000000..c203fd1eb --- /dev/null +++ b/proto/defs.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package protodb; + +message Entity { + int32 id = 1; + bytes key = 2; + bytes value = 3; + bool exists = 4; + bytes start = 5; + bytes end = 6; + string err = 7; + string print = 8; + int64 time_at = 9; +} + +message Nothing { +} + +message DDomain { + bytes start = 1; + bytes end = 2; +} + +message Iterator { + DDomain domain = 1; + bool valid = 2; + bytes key = 3; + bytes value = 4; +} + +message Stats { + map data = 1; + int64 time_at = 2; +} + +message Init { + string Type = 1; + string Name = 2; + string Dir = 3; +} + +service DB { + rpc init(Init) returns (Entity) {} + rpc get(Entity) returns (Entity) {} + rpc getStream(stream Entity) returns (stream Entity) {} + + rpc has(Entity) returns (Entity) {} + rpc set(Entity) returns (Nothing) {} + rpc setSync(Entity) returns (Nothing) {} + rpc delete(Entity) returns (Nothing) {} + rpc deleteSync(Entity) returns (Nothing) {} + rpc iterator(Entity) returns (stream Iterator) {} + rpc reverseIterator(Entity) returns (stream Iterator) {} + // rpc print(Nothing) returns (Entity) {} + rpc stats(Nothing) returns (Stats) {} +} From 1260b75f6341088a1253c18059f0bb34527179dd Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 16 Mar 2018 14:54:15 -0700 Subject: [PATCH 432/515] grpcdb: Better readability for docs and constructor names * Added some docs for NewClient, BindServer, *server.Init * Security level clarified, whether "secure" for https or "insecure" for non-https gRPC connections. --- grpcdb/client.go | 14 ++++++++++++-- grpcdb/example_test.go | 6 +++--- grpcdb/server.go | 23 ++++++++++++++++++++--- 3 files changed, 35 insertions(+), 8 deletions(-) diff --git a/grpcdb/client.go b/grpcdb/client.go index 45409a1f9..a09720abc 100644 --- a/grpcdb/client.go +++ b/grpcdb/client.go @@ -6,9 +6,19 @@ import ( protodb "github.com/tendermint/tmlibs/proto" ) -func NewClient(serverAddr string, secure bool) (protodb.DBClient, error) { +// Security defines how the client will talk to the gRPC server. +type Security uint + +const ( + Insecure Security = iota + Secure +) + +// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. +// Use kind to set the level of security to either Secure or Insecure. +func NewClient(serverAddr string, kind Security) (protodb.DBClient, error) { var opts []grpc.DialOption - if !secure { + if kind == Insecure { opts = append(opts, grpc.WithInsecure()) } cc, err := grpc.Dial(serverAddr, opts...) diff --git a/grpcdb/example_test.go b/grpcdb/example_test.go index 653180113..451428b97 100644 --- a/grpcdb/example_test.go +++ b/grpcdb/example_test.go @@ -12,12 +12,12 @@ import ( func Example() { addr := ":8998" go func() { - if err := grpcdb.BindRemoteDBServer(addr); err != nil { - log.Fatalf("BindRemoteDBServer: %v", err) + if err := grpcdb.BindServer(addr); err != nil { + log.Fatalf("BindServer: %v", err) } }() - client, err := grpcdb.NewClient(addr, false) + client, err := grpcdb.NewClient(addr, grpcdb.Insecure) if err != nil { log.Fatalf("Failed to create grpcDB client: %v", err) } diff --git a/grpcdb/server.go b/grpcdb/server.go index 26d0ffa9e..c4d115bd7 100644 --- a/grpcdb/server.go +++ b/grpcdb/server.go @@ -1,8 +1,8 @@ package grpcdb import ( -"log" "context" + "log" "net" "sync" "time" @@ -13,7 +13,10 @@ import ( protodb "github.com/tendermint/tmlibs/proto" ) -func BindRemoteDBServer(addr string, opts ...grpc.ServerOption) error { +// BindServer is a blocking function that sets up a gRPC based +// server at the address supplied, with the gRPC options passed in. +// Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. +func BindServer(addr string, opts ...grpc.ServerOption) error { ln, err := net.Listen("tcp", addr) if err != nil { return err @@ -30,11 +33,24 @@ type server struct { var _ protodb.DBServer = (*server)(nil) +// Init initializes the server's database. Only one type of database +// can be initialized per server. +// +// Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove) +// +// Name is representative filesystem entry's basepath +// +// Type can be either one of: +// * cleveldb (if built with gcc enabled) +// * fsdb +// * memdB +// * leveldb +// See https://godoc.org/github.com/tendermint/tmlibs/db#DBBackendType func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { s.mu.Lock() defer s.mu.Unlock() -log.Printf("in: %+v\n", in) + log.Printf("in: %+v\n", in) s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) return &protodb.Entity{TimeAt: time.Now().Unix()}, nil } @@ -45,6 +61,7 @@ func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothi } var nothing = new(protodb.Nothing) + func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { s.db.DeleteSync(in.Key) return nothing, nil From 5d12e1eb46ef00868fbf74bc2e990d96592b5635 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sat, 17 Mar 2018 16:58:49 -0700 Subject: [PATCH 433/515] remotedb: a client package implementing the db.DB interface Simplified the abstractions to remotedb, a package that allows clients to use whatever database they can in client code without having to switch out their code e.g ```go client, err := remotedb.NewInsecure(":9888") ... // Just like they'd initialize locally in := &remotedb.Init{ Name: "test-remote-db", Type: "leveldb", Dir: "/tmp/dbs", } if err := client.InitRemote(in); err != nil { log.Fatalf("Failed to initialize the database") } v1 := client.Get(k1) client.Set(k9, dog) for itr := client.Iterator(a1, z1); itr.Valid(); itr.Next() { k, v := itr.Key(), itr.Value() dom := itr.Domain() ... } ``` --- grpcdb/doc.go | 30 +++++ grpcdb/example_test.go | 2 +- grpcdb/server.go | 13 ++- remotedb/doc.go | 37 +++++++ remotedb/remotedb.go | 226 ++++++++++++++++++++++++++++++++++++++ remotedb/remotedb_test.go | 41 +++++++ 6 files changed, 343 insertions(+), 6 deletions(-) create mode 100644 grpcdb/doc.go create mode 100644 remotedb/doc.go create mode 100644 remotedb/remotedb.go create mode 100644 remotedb/remotedb_test.go diff --git a/grpcdb/doc.go b/grpcdb/doc.go new file mode 100644 index 000000000..a54cab207 --- /dev/null +++ b/grpcdb/doc.go @@ -0,0 +1,30 @@ +/* +grpcdb is the distribution of Tendermint's db.DB instances using +the gRPC transport to decouple local db.DB usages from applications, +to using them over a network in a highly performant manner. + +grpcdb allows users to initialize a database's server like +they would locally and invoke the respective methods of db.DB. + +Most users shouldn't use this package, but should instead use +remotedb. Only the lower level users and database server deployers +should use it, for functionality such as: + + ln, err := net.Listen("tcp", "0.0.0.0:0") + srv := grpcdb.NewServer() + defer srv.Stop() + go func() { + if err := srv.Serve(ln); err != nil { + t.Fatalf("BindServer: %v", err) + } + }() + +or + addr := ":8998" + go func() { + if err := grpcdb.ListenAndServe(addr); err != nil { + log.Fatalf("BindServer: %v", err) + } + }() +*/ +package grpcdb diff --git a/grpcdb/example_test.go b/grpcdb/example_test.go index 451428b97..cbe1abf92 100644 --- a/grpcdb/example_test.go +++ b/grpcdb/example_test.go @@ -12,7 +12,7 @@ import ( func Example() { addr := ":8998" go func() { - if err := grpcdb.BindServer(addr); err != nil { + if err := grpcdb.ListenAndServe(addr); err != nil { log.Fatalf("BindServer: %v", err) } }() diff --git a/grpcdb/server.go b/grpcdb/server.go index c4d115bd7..301f43f23 100644 --- a/grpcdb/server.go +++ b/grpcdb/server.go @@ -2,7 +2,6 @@ package grpcdb import ( "context" - "log" "net" "sync" "time" @@ -13,17 +12,22 @@ import ( protodb "github.com/tendermint/tmlibs/proto" ) -// BindServer is a blocking function that sets up a gRPC based +// ListenAndServe is a blocking function that sets up a gRPC based // server at the address supplied, with the gRPC options passed in. // Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. -func BindServer(addr string, opts ...grpc.ServerOption) error { +func ListenAndServe(addr string, opts ...grpc.ServerOption) error { ln, err := net.Listen("tcp", addr) if err != nil { return err } + srv := NewServer(opts...) + return srv.Serve(ln) +} + +func NewServer(opts ...grpc.ServerOption) *grpc.Server { srv := grpc.NewServer(opts...) protodb.RegisterDBServer(srv, new(server)) - return srv.Serve(ln) + return srv } type server struct { @@ -50,7 +54,6 @@ func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, e s.mu.Lock() defer s.mu.Unlock() - log.Printf("in: %+v\n", in) s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) return &protodb.Entity{TimeAt: time.Now().Unix()}, nil } diff --git a/remotedb/doc.go b/remotedb/doc.go new file mode 100644 index 000000000..07c95a56a --- /dev/null +++ b/remotedb/doc.go @@ -0,0 +1,37 @@ +/* +remotedb is a package for connecting to distributed Tendermint db.DB +instances. The purpose is to detach difficult deployments such as +CLevelDB that requires gcc or perhaps for databases that require +custom configurations such as extra disk space. It also eases +the burden and cost of deployment of dependencies for databases +to be used by Tendermint developers. Most importantly it is built +over the high performant gRPC transport. + +remotedb's RemoteDB implements db.DB so can be used normally +like other databases. One just has to explicitly connect to the +remote database with a client setup such as: + + client, err := remotedb.NewInsecure(addr) + // Make sure to invoke InitRemote! + if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil { + log.Fatalf("Failed to initialize the remote db") + } + + client.Set(key1, value) + gv1 := client.SetSync(k2, v2) + + client.Delete(k1) + gv2 := client.Get(k1) + + for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() { + ik, iv := itr.Key(), itr.Value() + ds, de := itr.Domain() + } + + stats := client.Stats() + + if !client.Has(dk1) { + client.SetSync(dk1, dv1) + } +*/ +package remotedb diff --git a/remotedb/remotedb.go b/remotedb/remotedb.go new file mode 100644 index 000000000..a110e816c --- /dev/null +++ b/remotedb/remotedb.go @@ -0,0 +1,226 @@ +package remotedb + +import ( + "context" + "fmt" + + "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tmlibs/grpcdb" + protodb "github.com/tendermint/tmlibs/proto" +) + +type RemoteDB struct { + ctx context.Context + dc protodb.DBClient +} + +func NewSecure(serverAddr string) (*RemoteDB, error) { + return newRemoteDB(grpcdb.NewClient(serverAddr, grpcdb.Secure)) +} + +func NewInsecure(serverAddr string) (*RemoteDB, error) { + return newRemoteDB(grpcdb.NewClient(serverAddr, grpcdb.Insecure)) +} + +func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) { + if err != nil { + return nil, err + } + return &RemoteDB{dc: gdc, ctx: context.Background()}, nil +} + +type Init struct { + Dir string + Name string + Type string +} + +func (rd *RemoteDB) InitRemote(in *Init) error { + _, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name}) + return err +} + +var _ db.DB = (*RemoteDB)(nil) + +// Close is a noop currently +func (rd *RemoteDB) Close() { +} + +func (rd *RemoteDB) Delete(key []byte) { + if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil { + panic(fmt.Sprintf("RemoteDB.Delete: %v", err)) + } +} + +func (rd *RemoteDB) DeleteSync(key []byte) { + if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil { + panic(fmt.Sprintf("RemoteDB.DeleteSync: %v", err)) + } +} + +func (rd *RemoteDB) Set(key, value []byte) { + if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { + panic(fmt.Sprintf("RemoteDB.Set: %v", err)) + } +} + +func (rd *RemoteDB) SetSync(key, value []byte) { + if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { + panic(fmt.Sprintf("RemoteDB.SetSync: %v", err)) + } +} + +func (rd *RemoteDB) Get(key []byte) []byte { + res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Get error: %v", err)) + } + return res.Value +} + +func (rd *RemoteDB) Has(key []byte) bool { + res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Has error: %v", err)) + } + return res.Exists +} + +func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator { + dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) + } + return makeReverseIterator(dic) +} + +// TODO: Implement NewBatch +func (rd *RemoteDB) NewBatch() db.Batch { + panic("Unimplemented") +} + +// TODO: Implement Print when db.DB implements a method +// to print to a string and not db.Print to stdout. +func (rd *RemoteDB) Print() { + panic("Unimplemented") +} + +func (rd *RemoteDB) Stats() map[string]string { + stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Stats error: %v", err)) + } + if stats == nil { + return nil + } + return stats.Data +} + +func (rd *RemoteDB) Iterator(start, end []byte) db.Iterator { + dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end}) + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) + } + return makeIterator(dic) +} + +func makeIterator(dic protodb.DB_IteratorClient) db.Iterator { + return &iterator{dic: dic} +} + +func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator { + return &reverseIterator{dric: dric} +} + +type reverseIterator struct { + dric protodb.DB_ReverseIteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +func (rItr *reverseIterator) Valid() bool { + return rItr.cur != nil && rItr.cur.Valid +} + +func (rItr *reverseIterator) Domain() (start, end []byte) { + if rItr.cur == nil || rItr.cur.Domain == nil { + return nil, nil + } + return rItr.cur.Domain.Start, rItr.cur.Domain.End +} + +// Next advances the current reverseIterator +func (rItr *reverseIterator) Next() { + var err error + rItr.cur, err = rItr.dric.Recv() + if err != nil { + panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err)) + } +} + +func (rItr *reverseIterator) Key() []byte { + if rItr.cur == nil { + return nil + } + return rItr.cur.Key +} + +func (rItr *reverseIterator) Value() []byte { + if rItr.cur == nil { + return nil + } + return rItr.cur.Value +} + +func (rItr *reverseIterator) Close() { +} + +// iterator implements the db.Iterator by retrieving +// streamed iterators from the remote backend as +// needed. It is NOT safe for concurrent usage, +// matching the behavior of other iterators. +type iterator struct { + dic protodb.DB_IteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +func (itr *iterator) Valid() bool { + return itr.cur != nil && itr.cur.Valid +} + +func (itr *iterator) Domain() (start, end []byte) { + if itr.cur == nil || itr.cur.Domain == nil { + return nil, nil + } + return itr.cur.Domain.Start, itr.cur.Domain.End +} + +// Next advances the current iterator +func (itr *iterator) Next() { + var err error + itr.cur, err = itr.dic.Recv() + if err != nil { + panic(fmt.Sprintf("RemoteDB.Iterator.Next error: %v", err)) + } +} + +func (itr *iterator) Key() []byte { + if itr.cur == nil { + return nil + } + return itr.cur.Key +} + +func (itr *iterator) Value() []byte { + if itr.cur == nil { + return nil + } + return itr.cur.Value +} + +func (itr *iterator) Close() { + // TODO: Shut down the iterator +} diff --git a/remotedb/remotedb_test.go b/remotedb/remotedb_test.go new file mode 100644 index 000000000..37ce0c59a --- /dev/null +++ b/remotedb/remotedb_test.go @@ -0,0 +1,41 @@ +package remotedb_test + +import ( + "net" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tmlibs/grpcdb" + "github.com/tendermint/tmlibs/remotedb" +) + +func TestRemoteDB(t *testing.T) { + ln, err := net.Listen("tcp", "0.0.0.0:0") + require.Nil(t, err, "expecting a port to have been assigned on which we can listen") + srv := grpcdb.NewServer() + defer srv.Stop() + go func() { + if err := srv.Serve(ln); err != nil { + t.Fatalf("BindServer: %v", err) + } + }() + + client, err := remotedb.NewInsecure(ln.Addr().String()) + require.Nil(t, err, "expecting a successful client creation") + require.Nil(t, client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"})) + + k1 := []byte("key-1") + v1 := client.Get(k1) + require.Equal(t, 0, len(v1), "expecting no key1 to have been stored") + vv1 := []byte("value-1") + client.Set(k1, vv1) + gv1 := client.Get(k1) + require.Equal(t, gv1, vv1) + + // Deletion + client.Delete(k1) + gv2 := client.Get(k1) + require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") + require.NotEqual(t, len(gv1), len(gv2), "after deletion, not expecting the key to exist anymore") +} From bf16d6453c64c553c224d9334e53ef7a60d298b0 Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Mon, 7 May 2018 22:12:26 +0200 Subject: [PATCH 434/515] Address PR comments --- Gopkg.lock | 74 ++++++++++++++++++++++++++++- Makefile | 2 +- grpcdb/server.go | 4 +- proto/defs.pb.go | 118 ++++++++++++++++++++++------------------------- proto/defs.proto | 7 ++- 5 files changed, 133 insertions(+), 72 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 45b4d2887..32669c198 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -51,6 +51,18 @@ revision = "1adfc126b41513cc696b209667c8656ea7aac67c" version = "v1.0.0" +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp" + ] + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + [[projects]] branch = "master" name = "github.com/golang/snappy" @@ -189,6 +201,20 @@ packages = ["ripemd160"] revision = "edd5e9b0879d13ee6970a50153d85b8fec9f7686" +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace" + ] + revision = "d11bb6cd8e3c4e60239c9cb20ef68586d74500d0" + [[projects]] name = "golang.org/x/sys" packages = ["unix"] @@ -197,15 +223,59 @@ [[projects]] name = "golang.org/x/text" packages = [ + "collate", + "collate/build", + "internal/colltab", "internal/gen", + "internal/tag", "internal/triegen", "internal/ucd", + "language", + "secure/bidirule", "transform", + "unicode/bidi", "unicode/cldr", - "unicode/norm" + "unicode/norm", + "unicode/rangetable" ] revision = "c01e4764d870b77f8abe5096ee19ad20d80e8075" +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "86e600f69ee4704c6efbf6a2a40a5c10700e76c2" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport" + ] + revision = "d11072e7ca9811b1100b80ca0269ac831f06d024" + version = "v1.11.3" + [[projects]] name = "gopkg.in/yaml.v2" packages = ["."] @@ -215,6 +285,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "c33ff784e40965e1cd0ec6232b43e379c6608cb41a9c5c707247742b68c906fb" + inputs-digest = "8b1ff7eb1a874905f0d7772407cfacd3fca77a2214530e633c0f4d7e468a6f92" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Makefile b/Makefile index 0236c480b..0e715ef16 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ GOTOOLS = \ GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-gogo INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf -all: check get_vendor_deps protoc build test install metalinter +all: check get_vendor_deps protoc grpc_dbserver build test install metalinter check: check_tools diff --git a/grpcdb/server.go b/grpcdb/server.go index 301f43f23..1e8495300 100644 --- a/grpcdb/server.go +++ b/grpcdb/server.go @@ -55,7 +55,7 @@ func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, e defer s.mu.Unlock() s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) - return &protodb.Entity{TimeAt: time.Now().Unix()}, nil + return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil } func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { @@ -136,7 +136,7 @@ func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) for it.Valid() { start, end := it.Domain() out := &protodb.Iterator{ - Domain: &protodb.DDomain{Start: start, End: end}, + Domain: &protodb.Domain{Start: start, End: end}, Valid: it.Valid(), Key: it.Key(), Value: it.Value(), diff --git a/proto/defs.pb.go b/proto/defs.pb.go index 61f687504..c65b28e08 100644 --- a/proto/defs.pb.go +++ b/proto/defs.pb.go @@ -10,7 +10,7 @@ It is generated from these files: It has these top-level messages: Entity Nothing - DDomain + Domain Iterator Stats Init @@ -38,15 +38,14 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Entity struct { - Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` - Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` - End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` - Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` - Print string `protobuf:"bytes,8,opt,name=print" json:"print,omitempty"` - TimeAt int64 `protobuf:"varint,9,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` + Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` + Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` + Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` + CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` } func (m *Entity) Reset() { *m = Entity{} } @@ -103,16 +102,9 @@ func (m *Entity) GetErr() string { return "" } -func (m *Entity) GetPrint() string { +func (m *Entity) GetCreatedAt() int64 { if m != nil { - return m.Print - } - return "" -} - -func (m *Entity) GetTimeAt() int64 { - if m != nil { - return m.TimeAt + return m.CreatedAt } return 0 } @@ -125,24 +117,24 @@ func (m *Nothing) String() string { return proto.CompactTextString(m) func (*Nothing) ProtoMessage() {} func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -type DDomain struct { +type Domain struct { Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` } -func (m *DDomain) Reset() { *m = DDomain{} } -func (m *DDomain) String() string { return proto.CompactTextString(m) } -func (*DDomain) ProtoMessage() {} -func (*DDomain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *Domain) Reset() { *m = Domain{} } +func (m *Domain) String() string { return proto.CompactTextString(m) } +func (*Domain) ProtoMessage() {} +func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (m *DDomain) GetStart() []byte { +func (m *Domain) GetStart() []byte { if m != nil { return m.Start } return nil } -func (m *DDomain) GetEnd() []byte { +func (m *Domain) GetEnd() []byte { if m != nil { return m.End } @@ -150,10 +142,10 @@ func (m *DDomain) GetEnd() []byte { } type Iterator struct { - Domain *DDomain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` - Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` + Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` } func (m *Iterator) Reset() { *m = Iterator{} } @@ -161,7 +153,7 @@ func (m *Iterator) String() string { return proto.CompactTextString(m func (*Iterator) ProtoMessage() {} func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } -func (m *Iterator) GetDomain() *DDomain { +func (m *Iterator) GetDomain() *Domain { if m != nil { return m.Domain } @@ -248,7 +240,7 @@ func (m *Init) GetDir() string { func init() { proto.RegisterType((*Entity)(nil), "protodb.Entity") proto.RegisterType((*Nothing)(nil), "protodb.Nothing") - proto.RegisterType((*DDomain)(nil), "protodb.DDomain") + proto.RegisterType((*Domain)(nil), "protodb.Domain") proto.RegisterType((*Iterator)(nil), "protodb.Iterator") proto.RegisterType((*Stats)(nil), "protodb.Stats") proto.RegisterType((*Init)(nil), "protodb.Init") @@ -749,36 +741,36 @@ func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x72, 0xd3, 0x4c, - 0x10, 0xf4, 0x4a, 0xb2, 0x64, 0x4d, 0xbe, 0x2f, 0x09, 0x5b, 0x14, 0x6c, 0xf9, 0xa4, 0xd2, 0x49, - 0xfc, 0xb9, 0x12, 0xe7, 0xc0, 0xcf, 0x09, 0x28, 0xe7, 0xe0, 0x4b, 0x0e, 0x32, 0x77, 0x6a, 0x83, - 0x06, 0x67, 0x8b, 0x58, 0x72, 0xed, 0x0e, 0x29, 0xf4, 0x04, 0x3c, 0x00, 0x4f, 0xc4, 0x9b, 0x51, - 0xbb, 0xfa, 0xb1, 0x43, 0x7c, 0x10, 0x27, 0x4d, 0xef, 0x76, 0xf7, 0x8c, 0x5a, 0x23, 0x80, 0x02, - 0xbf, 0x9a, 0xd9, 0x56, 0x57, 0x54, 0xf1, 0xc8, 0x3d, 0x8a, 0xeb, 0xf4, 0x37, 0x83, 0xf0, 0xb2, - 0x24, 0x45, 0x35, 0x3f, 0x06, 0x4f, 0x15, 0x82, 0x25, 0x2c, 0x1b, 0xe7, 0x9e, 0x2a, 0xf8, 0x29, - 0xf8, 0xdf, 0xb0, 0x16, 0x5e, 0xc2, 0xb2, 0xff, 0x72, 0x5b, 0xf2, 0xc7, 0x30, 0xbe, 0x93, 0xb7, - 0xdf, 0x51, 0xf8, 0xee, 0xac, 0x01, 0xfc, 0x09, 0x84, 0xf8, 0x43, 0x19, 0x32, 0x22, 0x48, 0x58, - 0x36, 0xc9, 0x5b, 0x64, 0xd9, 0x86, 0xa4, 0x26, 0x31, 0x6e, 0xd8, 0x0e, 0x58, 0x57, 0x2c, 0x0b, - 0x11, 0x36, 0xae, 0x58, 0xba, 0x3e, 0xa8, 0xb5, 0x88, 0x12, 0x96, 0xc5, 0xb9, 0x2d, 0xad, 0x72, - 0xab, 0x55, 0x49, 0x62, 0xe2, 0xce, 0x1a, 0xc0, 0x9f, 0x42, 0x44, 0x6a, 0x83, 0x9f, 0x25, 0x89, - 0x38, 0x61, 0x99, 0x9f, 0x87, 0x16, 0x7e, 0xa0, 0x34, 0x86, 0xe8, 0xaa, 0xa2, 0x1b, 0x55, 0xae, - 0xd3, 0x73, 0x88, 0x16, 0x8b, 0x6a, 0x23, 0x55, 0xb9, 0x6b, 0xcf, 0x0e, 0xb4, 0xf7, 0xfa, 0xf6, - 0xa9, 0x86, 0xc9, 0x92, 0x50, 0x4b, 0xaa, 0x34, 0xcf, 0x20, 0x2c, 0x9c, 0xda, 0x89, 0x8e, 0xe6, - 0xa7, 0xb3, 0x36, 0xa7, 0x59, 0xeb, 0x9a, 0xb7, 0xf7, 0x6d, 0x14, 0xaa, 0x71, 0x9a, 0xe4, 0x0d, - 0xe8, 0x22, 0xf3, 0x0f, 0x44, 0x16, 0xec, 0x45, 0x96, 0xfe, 0x64, 0x30, 0x5e, 0x91, 0x24, 0xc3, - 0x5f, 0x42, 0x50, 0x48, 0x92, 0x82, 0x25, 0x7e, 0x76, 0x34, 0x17, 0x7d, 0x3f, 0x77, 0x3b, 0x5b, - 0x48, 0x92, 0x97, 0x25, 0xe9, 0x3a, 0x77, 0xac, 0xfd, 0x08, 0xbc, 0xfd, 0x08, 0xa6, 0xaf, 0x21, - 0xee, 0xb9, 0xdd, 0x14, 0xac, 0x09, 0xf4, 0xde, 0x14, 0x5e, 0x13, 0xa8, 0x03, 0xef, 0xbc, 0x37, - 0x2c, 0x7d, 0x0f, 0xc1, 0xb2, 0x54, 0xc4, 0x39, 0x04, 0x9f, 0xea, 0x2d, 0xb6, 0x22, 0x57, 0xdb, - 0xb3, 0x2b, 0xb9, 0xe9, 0x44, 0xae, 0xb6, 0xde, 0x0b, 0xa5, 0xdd, 0x1b, 0xc6, 0xb9, 0x2d, 0xe7, - 0xbf, 0x02, 0xf0, 0x16, 0x1f, 0x79, 0x06, 0x81, 0xb2, 0x46, 0xff, 0xf7, 0xaf, 0x60, 0x7d, 0xa7, - 0x27, 0x3d, 0x6c, 0xb6, 0x2c, 0x1d, 0xf1, 0x67, 0xe0, 0xaf, 0x91, 0xf8, 0xdf, 0x37, 0x87, 0xa8, - 0x17, 0x10, 0xaf, 0x91, 0x56, 0xa4, 0x51, 0x6e, 0x86, 0x08, 0x32, 0x76, 0xc6, 0xac, 0xff, 0x8d, - 0x34, 0x83, 0xfc, 0x9f, 0x83, 0x6f, 0x0e, 0x8d, 0xb2, 0xfb, 0xee, 0xdd, 0x62, 0x8d, 0xf8, 0x0c, - 0x22, 0x83, 0xb4, 0xaa, 0xcb, 0x2f, 0xc3, 0xf8, 0xaf, 0x20, 0x2c, 0xf0, 0x16, 0x09, 0x87, 0xd1, - 0xcf, 0xed, 0xff, 0x69, 0xe9, 0xc3, 0x3b, 0xcc, 0x61, 0xa2, 0xba, 0xcd, 0x7d, 0x20, 0x78, 0xb4, - 0xfb, 0x0e, 0x2d, 0x27, 0x1d, 0x9d, 0x31, 0xfe, 0x16, 0x4e, 0x34, 0xde, 0xa1, 0x36, 0xb8, 0xfc, - 0x57, 0xe9, 0x0b, 0xf7, 0x43, 0x91, 0xe1, 0x0f, 0x66, 0x99, 0x1e, 0xdf, 0xdf, 0xdb, 0x74, 0x74, - 0x1d, 0xba, 0x83, 0x8b, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x2e, 0x77, 0x07, 0x75, 0x04, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xce, 0xda, 0x8e, 0x13, 0x4f, 0xa1, 0x2d, 0x2b, 0x04, 0xab, 0x4a, 0x48, 0x96, 0x2f, 0x98, + 0x3f, 0x2b, 0xa4, 0x07, 0x7e, 0x4e, 0x14, 0xa5, 0x87, 0x5c, 0x7a, 0x70, 0xb8, 0xa3, 0x6d, 0x3d, + 0xa4, 0x2b, 0x1a, 0xbb, 0xec, 0x0e, 0x15, 0x7e, 0x02, 0x1e, 0x80, 0x27, 0xe1, 0x0d, 0xd1, 0xae, + 0x7f, 0x42, 0x69, 0x0e, 0xe6, 0xe4, 0x99, 0xd9, 0xef, 0xfb, 0x66, 0xf6, 0xf3, 0x2c, 0x40, 0x81, + 0x5f, 0x4c, 0x76, 0xad, 0x2b, 0xaa, 0xf8, 0xc4, 0x7d, 0x8a, 0xf3, 0xe4, 0x37, 0x83, 0xf0, 0xb4, + 0x24, 0x45, 0x35, 0xdf, 0x07, 0x4f, 0x15, 0x82, 0xc5, 0x2c, 0x1d, 0xe7, 0x9e, 0x2a, 0xf8, 0x21, + 0xf8, 0x5f, 0xb1, 0x16, 0x5e, 0xcc, 0xd2, 0x7b, 0xb9, 0x0d, 0xf9, 0x43, 0x18, 0xdf, 0xc8, 0xab, + 0xef, 0x28, 0x7c, 0x57, 0x6b, 0x12, 0xfe, 0x08, 0x42, 0xfc, 0xa1, 0x0c, 0x19, 0x11, 0xc4, 0x2c, + 0x9d, 0xe6, 0x6d, 0x66, 0xd1, 0x86, 0xa4, 0x26, 0x31, 0x6e, 0xd0, 0x2e, 0xb1, 0xaa, 0x58, 0x16, + 0x22, 0x6c, 0x54, 0xb1, 0x74, 0x7d, 0x50, 0x6b, 0x31, 0x89, 0x59, 0x1a, 0xe5, 0x36, 0xe4, 0x4f, + 0x00, 0x2e, 0x34, 0x4a, 0xc2, 0xe2, 0xb3, 0x24, 0x31, 0x8d, 0x59, 0xea, 0xe7, 0x51, 0x5b, 0x39, + 0xa1, 0x24, 0x82, 0xc9, 0x59, 0x45, 0x97, 0xaa, 0x5c, 0x27, 0x33, 0x08, 0x17, 0xd5, 0x46, 0xaa, + 0x72, 0xdb, 0x8d, 0xed, 0xe8, 0xe6, 0xf5, 0xdd, 0x92, 0x6f, 0x30, 0x5d, 0x12, 0x6a, 0x49, 0x95, + 0xe6, 0x4f, 0x21, 0x2c, 0x1c, 0xdb, 0x91, 0xf6, 0xe6, 0x07, 0x59, 0x6b, 0x4b, 0xd6, 0x88, 0xe6, + 0xed, 0x71, 0x7b, 0x71, 0xd5, 0x08, 0x4d, 0xf3, 0x26, 0xe9, 0x0c, 0xf2, 0x77, 0x18, 0x14, 0xfc, + 0x65, 0x50, 0xf2, 0x93, 0xc1, 0x78, 0x45, 0x92, 0x0c, 0x7f, 0x09, 0x41, 0x21, 0x49, 0x0a, 0x16, + 0xfb, 0xe9, 0xde, 0x5c, 0xf4, 0xed, 0xdc, 0x69, 0xb6, 0x90, 0x24, 0x4f, 0x4b, 0xd2, 0x75, 0xee, + 0x50, 0xfc, 0x31, 0x4c, 0x48, 0x6d, 0xd0, 0x7a, 0xe0, 0x39, 0x0f, 0x42, 0x9b, 0x9e, 0xd0, 0xd1, + 0x1b, 0x88, 0x7a, 0x6c, 0x37, 0x05, 0x6b, 0xec, 0xbb, 0x35, 0x85, 0xe7, 0x6a, 0x4d, 0xf2, 0xde, + 0x7b, 0xcb, 0x92, 0x0f, 0x10, 0x2c, 0x4b, 0x45, 0x9c, 0x43, 0xf0, 0xa9, 0xbe, 0xc6, 0x96, 0xe4, + 0x62, 0x5b, 0x3b, 0x93, 0x9b, 0x8e, 0xe4, 0x62, 0xab, 0xbd, 0x50, 0xda, 0xdd, 0x30, 0xca, 0x6d, + 0x38, 0xff, 0x15, 0x80, 0xb7, 0xf8, 0xc8, 0x53, 0x08, 0x94, 0x15, 0xba, 0xdf, 0x5f, 0xc1, 0xea, + 0x1e, 0x6d, 0x0d, 0x6c, 0x76, 0x2a, 0x19, 0xf1, 0x67, 0xe0, 0xaf, 0x91, 0xf8, 0xbf, 0x27, 0xbb, + 0xa0, 0xc7, 0x10, 0xad, 0x91, 0x56, 0xa4, 0x51, 0x6e, 0x86, 0x10, 0x52, 0x36, 0x63, 0x56, 0xff, + 0x52, 0x9a, 0x41, 0xfa, 0xcf, 0xc1, 0x37, 0xbb, 0x46, 0x39, 0xec, 0x0b, 0xdd, 0x5a, 0x8d, 0x78, + 0x06, 0x13, 0x83, 0xb4, 0xaa, 0xcb, 0x8b, 0x61, 0xf8, 0x57, 0x10, 0x16, 0x78, 0x85, 0x84, 0xc3, + 0xe0, 0xaf, 0xed, 0x6b, 0xb4, 0xf0, 0xe1, 0x1d, 0xe6, 0x30, 0x55, 0xdd, 0xe2, 0xde, 0x21, 0x3c, + 0xd8, 0xfe, 0x87, 0x16, 0x93, 0x8c, 0x66, 0x8c, 0xbf, 0x83, 0x03, 0x8d, 0x37, 0xa8, 0x0d, 0x2e, + 0xff, 0x97, 0xfa, 0xc2, 0xbd, 0x27, 0x32, 0xfc, 0xce, 0x2c, 0x47, 0xfb, 0xb7, 0xf7, 0x36, 0x19, + 0x9d, 0x87, 0xae, 0x70, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xfe, 0x6a, 0xcc, 0x63, 0x04, 0x00, 0x00, } diff --git a/proto/defs.proto b/proto/defs.proto index c203fd1eb..4b52e9afa 100644 --- a/proto/defs.proto +++ b/proto/defs.proto @@ -10,20 +10,19 @@ message Entity { bytes start = 5; bytes end = 6; string err = 7; - string print = 8; - int64 time_at = 9; + int64 created_at = 8; } message Nothing { } -message DDomain { +message Domain { bytes start = 1; bytes end = 2; } message Iterator { - DDomain domain = 1; + Domain domain = 1; bool valid = 2; bytes key = 3; bytes value = 4; From 2cca5a7a4cb320c7ade7845af154b3116295124d Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Mon, 7 May 2018 23:16:06 +0200 Subject: [PATCH 435/515] Implement TLS/SSL --- Makefile | 20 +++++++++++++++++--- grpcdb/client.go | 11 ++++++----- grpcdb/doc.go | 4 +++- grpcdb/example_test.go | 6 ++++-- grpcdb/server.go | 17 +++++++++++++---- remotedb/remotedb.go | 8 ++------ remotedb/remotedb_test.go | 7 +++++-- 7 files changed, 50 insertions(+), 23 deletions(-) diff --git a/Makefile b/Makefile index 0e715ef16..93312024e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ GOTOOLS = \ github.com/golang/dep/cmd/dep \ github.com/gogo/protobuf/protoc-gen-gogo \ - github.com/gogo/protobuf/gogoproto + github.com/gogo/protobuf/gogoproto \ + github.com/square/certstrap # github.com/alecthomas/gometalinter.v2 \ GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-gogo @@ -66,8 +67,21 @@ get_vendor_deps: ######################################## ### Testing -test: +gen_certs: clean_certs + ## Generating certificates for TLS testing... + certstrap init --common-name "tendermint.com" --passphrase "" + certstrap request-cert -ip "::" --passphrase "" + certstrap sign "::" --CA "tendermint.com" --passphrase "" + mv out/{::.crt,::.key} remotedb + +clean_certs: + ## Cleaning TLS testing certificates... + rm -rf out + rm -f remotedb/{::.crt,::.key} + +test: gen_certs go test -tags gcc $(shell go list ./... | grep -v vendor) + make clean_certs test100: @for i in {1..100}; do make test; done @@ -118,7 +132,7 @@ metalinter_all: # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all +.PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all gen_certs clean_certs grpc_dbserver: protoc -I proto/ proto/defs.proto --go_out=plugins=grpc:proto diff --git a/grpcdb/client.go b/grpcdb/client.go index a09720abc..07fd461ec 100644 --- a/grpcdb/client.go +++ b/grpcdb/client.go @@ -2,6 +2,7 @@ package grpcdb import ( "google.golang.org/grpc" + "google.golang.org/grpc/credentials" protodb "github.com/tendermint/tmlibs/proto" ) @@ -16,12 +17,12 @@ const ( // NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. // Use kind to set the level of security to either Secure or Insecure. -func NewClient(serverAddr string, kind Security) (protodb.DBClient, error) { - var opts []grpc.DialOption - if kind == Insecure { - opts = append(opts, grpc.WithInsecure()) +func NewClient(serverAddr string, serverCert string) (protodb.DBClient, error) { + creds, err := credentials.NewClientTLSFromFile(serverCert, "") + if err != nil { + return nil, err } - cc, err := grpc.Dial(serverAddr, opts...) + cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) if err != nil { return nil, err } diff --git a/grpcdb/doc.go b/grpcdb/doc.go index a54cab207..c92de82d3 100644 --- a/grpcdb/doc.go +++ b/grpcdb/doc.go @@ -21,8 +21,10 @@ should use it, for functionality such as: or addr := ":8998" + cert := "server.crt" + key := "server.key" go func() { - if err := grpcdb.ListenAndServe(addr); err != nil { + if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { log.Fatalf("BindServer: %v", err) } }() diff --git a/grpcdb/example_test.go b/grpcdb/example_test.go index cbe1abf92..5a9c6eed9 100644 --- a/grpcdb/example_test.go +++ b/grpcdb/example_test.go @@ -11,13 +11,15 @@ import ( func Example() { addr := ":8998" + cert := "server.crt" + key := "server.key" go func() { - if err := grpcdb.ListenAndServe(addr); err != nil { + if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { log.Fatalf("BindServer: %v", err) } }() - client, err := grpcdb.NewClient(addr, grpcdb.Insecure) + client, err := grpcdb.NewClient(addr, cert) if err != nil { log.Fatalf("Failed to create grpcDB client: %v", err) } diff --git a/grpcdb/server.go b/grpcdb/server.go index 1e8495300..d8dc1581f 100644 --- a/grpcdb/server.go +++ b/grpcdb/server.go @@ -7,6 +7,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "github.com/tendermint/tmlibs/db" protodb "github.com/tendermint/tmlibs/proto" @@ -15,19 +16,27 @@ import ( // ListenAndServe is a blocking function that sets up a gRPC based // server at the address supplied, with the gRPC options passed in. // Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. -func ListenAndServe(addr string, opts ...grpc.ServerOption) error { +func ListenAndServe(addr string, cert string, key string, opts ...grpc.ServerOption) error { ln, err := net.Listen("tcp", addr) if err != nil { return err } - srv := NewServer(opts...) + srv, err := NewServer(cert, key, opts...) + if err != nil { + return err + } return srv.Serve(ln) } -func NewServer(opts ...grpc.ServerOption) *grpc.Server { +func NewServer(cert string, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { + creds, err := credentials.NewServerTLSFromFile(cert, key) + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(creds)) srv := grpc.NewServer(opts...) protodb.RegisterDBServer(srv, new(server)) - return srv + return srv, nil } type server struct { diff --git a/remotedb/remotedb.go b/remotedb/remotedb.go index a110e816c..b80cd3fdb 100644 --- a/remotedb/remotedb.go +++ b/remotedb/remotedb.go @@ -14,12 +14,8 @@ type RemoteDB struct { dc protodb.DBClient } -func NewSecure(serverAddr string) (*RemoteDB, error) { - return newRemoteDB(grpcdb.NewClient(serverAddr, grpcdb.Secure)) -} - -func NewInsecure(serverAddr string) (*RemoteDB, error) { - return newRemoteDB(grpcdb.NewClient(serverAddr, grpcdb.Insecure)) +func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) { + return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey)) } func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) { diff --git a/remotedb/remotedb_test.go b/remotedb/remotedb_test.go index 37ce0c59a..a5b77cf5a 100644 --- a/remotedb/remotedb_test.go +++ b/remotedb/remotedb_test.go @@ -11,9 +11,12 @@ import ( ) func TestRemoteDB(t *testing.T) { + cert := "::.crt" + key := "::.key" ln, err := net.Listen("tcp", "0.0.0.0:0") require.Nil(t, err, "expecting a port to have been assigned on which we can listen") - srv := grpcdb.NewServer() + srv, err := grpcdb.NewServer(cert, key) + require.Nil(t, err) defer srv.Stop() go func() { if err := srv.Serve(ln); err != nil { @@ -21,7 +24,7 @@ func TestRemoteDB(t *testing.T) { } }() - client, err := remotedb.NewInsecure(ln.Addr().String()) + client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) require.Nil(t, err, "expecting a successful client creation") require.Nil(t, client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"})) From 55f4ccd4fcdc0359ea70c2c7978313c6a3a17d78 Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Mon, 7 May 2018 23:23:15 +0200 Subject: [PATCH 436/515] CI fix --- Gopkg.lock | 2 +- Makefile | 4 ++-- test.sh | 6 ++++++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 32669c198..96df808a5 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -285,6 +285,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "8b1ff7eb1a874905f0d7772407cfacd3fca77a2214530e633c0f4d7e468a6f92" + inputs-digest = "8aa4ea7ef6d0ff170127eb5bca89c6c37c767d58047159cfd26a431c5cd5e7ad" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Makefile b/Makefile index 93312024e..41e5f1291 100644 --- a/Makefile +++ b/Makefile @@ -72,12 +72,12 @@ gen_certs: clean_certs certstrap init --common-name "tendermint.com" --passphrase "" certstrap request-cert -ip "::" --passphrase "" certstrap sign "::" --CA "tendermint.com" --passphrase "" - mv out/{::.crt,::.key} remotedb + mv out/::.crt out/::.key remotedb clean_certs: ## Cleaning TLS testing certificates... rm -rf out - rm -f remotedb/{::.crt,::.key} + rm -f remotedb/::.crt remotedb/::.key test: gen_certs go test -tags gcc $(shell go list ./... | grep -v vendor) diff --git a/test.sh b/test.sh index b3978d3fe..ecf17fc45 100755 --- a/test.sh +++ b/test.sh @@ -4,6 +4,9 @@ set -e # run the linter # make metalinter_test +# setup certs +make gen_certs + # run the unit tests with coverage echo "" > coverage.txt for d in $(go list ./... | grep -v vendor); do @@ -13,3 +16,6 @@ for d in $(go list ./... | grep -v vendor); do rm profile.out fi done + +# cleanup certs +make clean_certs From 39e1567d0ad3ef578bca645c914fc0c6b8f0fbed Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Tue, 8 May 2018 00:53:33 +0200 Subject: [PATCH 437/515] Add iterator tests --- remotedb/remotedb_test.go | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/remotedb/remotedb_test.go b/remotedb/remotedb_test.go index a5b77cf5a..c4014fe66 100644 --- a/remotedb/remotedb_test.go +++ b/remotedb/remotedb_test.go @@ -36,9 +36,38 @@ func TestRemoteDB(t *testing.T) { gv1 := client.Get(k1) require.Equal(t, gv1, vv1) + // Simple iteration + itr := client.Iterator(nil, nil) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-1")) + require.Equal(t, itr.Value(), []byte("value-1")) + require.Panics(t, itr.Next) + itr.Close() + + // Set some more keys + k2 := []byte("key-2") + v2 := []byte("value-2") + client.Set(k2, v2) + gv2 := client.Get(k2) + require.Equal(t, gv2, v2) + + // More iteration + itr = client.Iterator(nil, nil) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-1")) + require.Equal(t, itr.Value(), []byte("value-1")) + itr.Next() + require.Equal(t, itr.Key(), []byte("key-2")) + require.Equal(t, itr.Value(), []byte("value-2")) + require.Panics(t, itr.Next) + // Deletion client.Delete(k1) - gv2 := client.Get(k1) + client.Delete(k2) + gv1 = client.Get(k1) + gv2 = client.Get(k2) require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") - require.NotEqual(t, len(gv1), len(gv2), "after deletion, not expecting the key to exist anymore") + require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore") + + // TODO Batch tests } From 45514a6013602659e2fa168440d10f46af1289fe Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Tue, 8 May 2018 15:45:49 +0200 Subject: [PATCH 438/515] Address PR comments --- grpcdb/client.go | 2 +- grpcdb/doc.go | 4 ++-- grpcdb/server.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/grpcdb/client.go b/grpcdb/client.go index 07fd461ec..bae38b1c5 100644 --- a/grpcdb/client.go +++ b/grpcdb/client.go @@ -17,7 +17,7 @@ const ( // NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. // Use kind to set the level of security to either Secure or Insecure. -func NewClient(serverAddr string, serverCert string) (protodb.DBClient, error) { +func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) { creds, err := credentials.NewClientTLSFromFile(serverCert, "") if err != nil { return nil, err diff --git a/grpcdb/doc.go b/grpcdb/doc.go index c92de82d3..0d8e380ce 100644 --- a/grpcdb/doc.go +++ b/grpcdb/doc.go @@ -21,8 +21,8 @@ should use it, for functionality such as: or addr := ":8998" - cert := "server.crt" - key := "server.key" + cert := "server.crt" + key := "server.key" go func() { if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { log.Fatalf("BindServer: %v", err) diff --git a/grpcdb/server.go b/grpcdb/server.go index d8dc1581f..9b00be43f 100644 --- a/grpcdb/server.go +++ b/grpcdb/server.go @@ -16,7 +16,7 @@ import ( // ListenAndServe is a blocking function that sets up a gRPC based // server at the address supplied, with the gRPC options passed in. // Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. -func ListenAndServe(addr string, cert string, key string, opts ...grpc.ServerOption) error { +func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error { ln, err := net.Listen("tcp", addr) if err != nil { return err @@ -28,7 +28,7 @@ func ListenAndServe(addr string, cert string, key string, opts ...grpc.ServerOpt return srv.Serve(ln) } -func NewServer(cert string, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { +func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { creds, err := credentials.NewServerTLSFromFile(cert, key) if err != nil { return nil, err From 0b6d101c772579bff6c75d4611b9f8c2eae41a1e Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Tue, 8 May 2018 16:38:39 +0200 Subject: [PATCH 439/515] Implement batch operations --- grpcdb/server.go | 26 +++++ proto/defs.pb.go | 216 +++++++++++++++++++++++++++++++------- proto/defs.proto | 15 +++ remotedb/remotedb.go | 46 +++++++- remotedb/remotedb_test.go | 34 +++++- 5 files changed, 294 insertions(+), 43 deletions(-) diff --git a/grpcdb/server.go b/grpcdb/server.go index 9b00be43f..d4cfe4433 100644 --- a/grpcdb/server.go +++ b/grpcdb/server.go @@ -169,3 +169,29 @@ func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error stats := s.db.Stats() return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil } + +func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { + return s.batchWrite(c, b, false) +} + +func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { + return s.batchWrite(c, b, true) +} + +func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { + bat := s.db.NewBatch() + for _, op := range b.Ops { + switch op.Type { + case protodb.Operation_SET: + bat.Set(op.Entity.Key, op.Entity.Value) + case protodb.Operation_DELETE: + bat.Delete(op.Entity.Key) + } + } + if sync { + bat.WriteSync() + } else { + bat.Write() + } + return nothing, nil +} diff --git a/proto/defs.pb.go b/proto/defs.pb.go index c65b28e08..4d9f0b272 100644 --- a/proto/defs.pb.go +++ b/proto/defs.pb.go @@ -8,6 +8,8 @@ It is generated from these files: defs.proto It has these top-level messages: + Batch + Operation Entity Nothing Domain @@ -37,6 +39,67 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +type Operation_Type int32 + +const ( + Operation_SET Operation_Type = 0 + Operation_DELETE Operation_Type = 1 +) + +var Operation_Type_name = map[int32]string{ + 0: "SET", + 1: "DELETE", +} +var Operation_Type_value = map[string]int32{ + "SET": 0, + "DELETE": 1, +} + +func (x Operation_Type) String() string { + return proto.EnumName(Operation_Type_name, int32(x)) +} +func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +type Batch struct { + Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` +} + +func (m *Batch) Reset() { *m = Batch{} } +func (m *Batch) String() string { return proto.CompactTextString(m) } +func (*Batch) ProtoMessage() {} +func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Batch) GetOps() []*Operation { + if m != nil { + return m.Ops + } + return nil +} + +type Operation struct { + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Operation) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *Operation) GetType() Operation_Type { + if m != nil { + return m.Type + } + return Operation_SET +} + type Entity struct { Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` @@ -51,7 +114,7 @@ type Entity struct { func (m *Entity) Reset() { *m = Entity{} } func (m *Entity) String() string { return proto.CompactTextString(m) } func (*Entity) ProtoMessage() {} -func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *Entity) GetId() int32 { if m != nil { @@ -115,7 +178,7 @@ type Nothing struct { func (m *Nothing) Reset() { *m = Nothing{} } func (m *Nothing) String() string { return proto.CompactTextString(m) } func (*Nothing) ProtoMessage() {} -func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type Domain struct { Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` @@ -125,7 +188,7 @@ type Domain struct { func (m *Domain) Reset() { *m = Domain{} } func (m *Domain) String() string { return proto.CompactTextString(m) } func (*Domain) ProtoMessage() {} -func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *Domain) GetStart() []byte { if m != nil { @@ -151,7 +214,7 @@ type Iterator struct { func (m *Iterator) Reset() { *m = Iterator{} } func (m *Iterator) String() string { return proto.CompactTextString(m) } func (*Iterator) ProtoMessage() {} -func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *Iterator) GetDomain() *Domain { if m != nil { @@ -189,7 +252,7 @@ type Stats struct { func (m *Stats) Reset() { *m = Stats{} } func (m *Stats) String() string { return proto.CompactTextString(m) } func (*Stats) ProtoMessage() {} -func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *Stats) GetData() map[string]string { if m != nil { @@ -214,7 +277,7 @@ type Init struct { func (m *Init) Reset() { *m = Init{} } func (m *Init) String() string { return proto.CompactTextString(m) } func (*Init) ProtoMessage() {} -func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *Init) GetType() string { if m != nil { @@ -238,12 +301,15 @@ func (m *Init) GetDir() string { } func init() { + proto.RegisterType((*Batch)(nil), "protodb.Batch") + proto.RegisterType((*Operation)(nil), "protodb.Operation") proto.RegisterType((*Entity)(nil), "protodb.Entity") proto.RegisterType((*Nothing)(nil), "protodb.Nothing") proto.RegisterType((*Domain)(nil), "protodb.Domain") proto.RegisterType((*Iterator)(nil), "protodb.Iterator") proto.RegisterType((*Stats)(nil), "protodb.Stats") proto.RegisterType((*Init)(nil), "protodb.Init") + proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value) } // Reference imports to suppress errors if they are not otherwise used. @@ -269,6 +335,8 @@ type DBClient interface { ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) // rpc print(Nothing) returns (Entity) {} Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) + BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) + BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) } type dBClient struct { @@ -446,6 +514,24 @@ func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOpti return out, nil } +func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { + out := new(Nothing) + err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for DB service type DBServer interface { @@ -461,6 +547,8 @@ type DBServer interface { ReverseIterator(*Entity, DB_ReverseIteratorServer) error // rpc print(Nothing) returns (Entity) {} Stats(context.Context, *Nothing) (*Stats, error) + BatchWrite(context.Context, *Batch) (*Nothing, error) + BatchWriteSync(context.Context, *Batch) (*Nothing, error) } func RegisterDBServer(s *grpc.Server, srv DBServer) { @@ -679,6 +767,42 @@ func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{ return interceptor(ctx, in, info, handler) } +func _DB_BatchWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Batch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).BatchWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/BatchWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).BatchWrite(ctx, req.(*Batch)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_BatchWriteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Batch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).BatchWriteSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protodb.DB/BatchWriteSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).BatchWriteSync(ctx, req.(*Batch)) + } + return interceptor(ctx, in, info, handler) +} + var _DB_serviceDesc = grpc.ServiceDesc{ ServiceName: "protodb.DB", HandlerType: (*DBServer)(nil), @@ -715,6 +839,14 @@ var _DB_serviceDesc = grpc.ServiceDesc{ MethodName: "stats", Handler: _DB_Stats_Handler, }, + { + MethodName: "batchWrite", + Handler: _DB_BatchWrite_Handler, + }, + { + MethodName: "batchWriteSync", + Handler: _DB_BatchWriteSync_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -740,37 +872,43 @@ var _DB_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xce, 0xda, 0x8e, 0x13, 0x4f, 0xa1, 0x2d, 0x2b, 0x04, 0xab, 0x4a, 0x48, 0x96, 0x2f, 0x98, - 0x3f, 0x2b, 0xa4, 0x07, 0x7e, 0x4e, 0x14, 0xa5, 0x87, 0x5c, 0x7a, 0x70, 0xb8, 0xa3, 0x6d, 0x3d, - 0xa4, 0x2b, 0x1a, 0xbb, 0xec, 0x0e, 0x15, 0x7e, 0x02, 0x1e, 0x80, 0x27, 0xe1, 0x0d, 0xd1, 0xae, - 0x7f, 0x42, 0x69, 0x0e, 0xe6, 0xe4, 0x99, 0xd9, 0xef, 0xfb, 0x66, 0xf6, 0xf3, 0x2c, 0x40, 0x81, - 0x5f, 0x4c, 0x76, 0xad, 0x2b, 0xaa, 0xf8, 0xc4, 0x7d, 0x8a, 0xf3, 0xe4, 0x37, 0x83, 0xf0, 0xb4, - 0x24, 0x45, 0x35, 0xdf, 0x07, 0x4f, 0x15, 0x82, 0xc5, 0x2c, 0x1d, 0xe7, 0x9e, 0x2a, 0xf8, 0x21, - 0xf8, 0x5f, 0xb1, 0x16, 0x5e, 0xcc, 0xd2, 0x7b, 0xb9, 0x0d, 0xf9, 0x43, 0x18, 0xdf, 0xc8, 0xab, - 0xef, 0x28, 0x7c, 0x57, 0x6b, 0x12, 0xfe, 0x08, 0x42, 0xfc, 0xa1, 0x0c, 0x19, 0x11, 0xc4, 0x2c, - 0x9d, 0xe6, 0x6d, 0x66, 0xd1, 0x86, 0xa4, 0x26, 0x31, 0x6e, 0xd0, 0x2e, 0xb1, 0xaa, 0x58, 0x16, - 0x22, 0x6c, 0x54, 0xb1, 0x74, 0x7d, 0x50, 0x6b, 0x31, 0x89, 0x59, 0x1a, 0xe5, 0x36, 0xe4, 0x4f, - 0x00, 0x2e, 0x34, 0x4a, 0xc2, 0xe2, 0xb3, 0x24, 0x31, 0x8d, 0x59, 0xea, 0xe7, 0x51, 0x5b, 0x39, - 0xa1, 0x24, 0x82, 0xc9, 0x59, 0x45, 0x97, 0xaa, 0x5c, 0x27, 0x33, 0x08, 0x17, 0xd5, 0x46, 0xaa, - 0x72, 0xdb, 0x8d, 0xed, 0xe8, 0xe6, 0xf5, 0xdd, 0x92, 0x6f, 0x30, 0x5d, 0x12, 0x6a, 0x49, 0x95, - 0xe6, 0x4f, 0x21, 0x2c, 0x1c, 0xdb, 0x91, 0xf6, 0xe6, 0x07, 0x59, 0x6b, 0x4b, 0xd6, 0x88, 0xe6, - 0xed, 0x71, 0x7b, 0x71, 0xd5, 0x08, 0x4d, 0xf3, 0x26, 0xe9, 0x0c, 0xf2, 0x77, 0x18, 0x14, 0xfc, - 0x65, 0x50, 0xf2, 0x93, 0xc1, 0x78, 0x45, 0x92, 0x0c, 0x7f, 0x09, 0x41, 0x21, 0x49, 0x0a, 0x16, - 0xfb, 0xe9, 0xde, 0x5c, 0xf4, 0xed, 0xdc, 0x69, 0xb6, 0x90, 0x24, 0x4f, 0x4b, 0xd2, 0x75, 0xee, - 0x50, 0xfc, 0x31, 0x4c, 0x48, 0x6d, 0xd0, 0x7a, 0xe0, 0x39, 0x0f, 0x42, 0x9b, 0x9e, 0xd0, 0xd1, - 0x1b, 0x88, 0x7a, 0x6c, 0x37, 0x05, 0x6b, 0xec, 0xbb, 0x35, 0x85, 0xe7, 0x6a, 0x4d, 0xf2, 0xde, - 0x7b, 0xcb, 0x92, 0x0f, 0x10, 0x2c, 0x4b, 0x45, 0x9c, 0x43, 0xf0, 0xa9, 0xbe, 0xc6, 0x96, 0xe4, - 0x62, 0x5b, 0x3b, 0x93, 0x9b, 0x8e, 0xe4, 0x62, 0xab, 0xbd, 0x50, 0xda, 0xdd, 0x30, 0xca, 0x6d, - 0x38, 0xff, 0x15, 0x80, 0xb7, 0xf8, 0xc8, 0x53, 0x08, 0x94, 0x15, 0xba, 0xdf, 0x5f, 0xc1, 0xea, - 0x1e, 0x6d, 0x0d, 0x6c, 0x76, 0x2a, 0x19, 0xf1, 0x67, 0xe0, 0xaf, 0x91, 0xf8, 0xbf, 0x27, 0xbb, - 0xa0, 0xc7, 0x10, 0xad, 0x91, 0x56, 0xa4, 0x51, 0x6e, 0x86, 0x10, 0x52, 0x36, 0x63, 0x56, 0xff, - 0x52, 0x9a, 0x41, 0xfa, 0xcf, 0xc1, 0x37, 0xbb, 0x46, 0x39, 0xec, 0x0b, 0xdd, 0x5a, 0x8d, 0x78, - 0x06, 0x13, 0x83, 0xb4, 0xaa, 0xcb, 0x8b, 0x61, 0xf8, 0x57, 0x10, 0x16, 0x78, 0x85, 0x84, 0xc3, - 0xe0, 0xaf, 0xed, 0x6b, 0xb4, 0xf0, 0xe1, 0x1d, 0xe6, 0x30, 0x55, 0xdd, 0xe2, 0xde, 0x21, 0x3c, - 0xd8, 0xfe, 0x87, 0x16, 0x93, 0x8c, 0x66, 0x8c, 0xbf, 0x83, 0x03, 0x8d, 0x37, 0xa8, 0x0d, 0x2e, - 0xff, 0x97, 0xfa, 0xc2, 0xbd, 0x27, 0x32, 0xfc, 0xce, 0x2c, 0x47, 0xfb, 0xb7, 0xf7, 0x36, 0x19, - 0x9d, 0x87, 0xae, 0x70, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xfe, 0x6a, 0xcc, 0x63, 0x04, - 0x00, 0x00, + // 606 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b, + 0x09, 0x43, 0x69, 0x14, 0x52, 0x24, 0xfe, 0x9c, 0x68, 0x95, 0x1c, 0x2a, 0xa1, 0x22, 0x39, 0x95, + 0x38, 0xa2, 0x6d, 0x3d, 0x34, 0x2b, 0x1a, 0x3b, 0xac, 0x87, 0x8a, 0x5c, 0xb8, 0xf2, 0x79, 0xf8, + 0x7c, 0x5c, 0xd0, 0xae, 0x1d, 0x87, 0x36, 0x39, 0x84, 0x53, 0x76, 0x66, 0xde, 0x7b, 0xb3, 0xf3, + 0x32, 0x5e, 0x80, 0x94, 0x3e, 0x17, 0xfd, 0xb9, 0xce, 0x39, 0xc7, 0x96, 0xfd, 0x49, 0x2f, 0xa2, + 0x43, 0x68, 0x9e, 0x48, 0xbe, 0x9c, 0xe2, 0x63, 0x70, 0xf3, 0x79, 0x11, 0x8a, 0x9e, 0x1b, 0xef, + 0x0c, 0xb1, 0x5f, 0xd5, 0xfb, 0x1f, 0xe6, 0xa4, 0x25, 0xab, 0x3c, 0x4b, 0x4c, 0x39, 0xfa, 0x01, + 0x41, 0x9d, 0xc1, 0x27, 0xe0, 0x53, 0xc6, 0x8a, 0x17, 0xa1, 0xe8, 0x89, 0x78, 0x67, 0xb8, 0x5b, + 0xb3, 0xc6, 0x36, 0x9d, 0x54, 0x65, 0x3c, 0x00, 0x8f, 0x17, 0x73, 0x0a, 0x9d, 0x9e, 0x88, 0x3b, + 0xc3, 0xbd, 0x75, 0xf1, 0xfe, 0xf9, 0x62, 0x4e, 0x89, 0x05, 0x45, 0x0f, 0xc1, 0x33, 0x11, 0xb6, + 0xc0, 0x9d, 0x8c, 0xcf, 0xbb, 0x0d, 0x04, 0xf0, 0x47, 0xe3, 0xf7, 0xe3, 0xf3, 0x71, 0x57, 0x44, + 0xbf, 0x04, 0xf8, 0xa5, 0x38, 0x76, 0xc0, 0x51, 0xa9, 0xed, 0xdc, 0x4c, 0x1c, 0x95, 0x62, 0x17, + 0xdc, 0x2f, 0xb4, 0xb0, 0x3d, 0xfe, 0x4b, 0xcc, 0x11, 0xef, 0x43, 0xf3, 0x46, 0x5e, 0x7f, 0xa3, + 0xd0, 0xb5, 0xb9, 0x32, 0xc0, 0x07, 0xe0, 0xd3, 0x77, 0x55, 0x70, 0x11, 0x7a, 0x3d, 0x11, 0xb7, + 0x93, 0x2a, 0x32, 0xe8, 0x82, 0xa5, 0xe6, 0xb0, 0x59, 0xa2, 0x6d, 0x60, 0x54, 0x29, 0x4b, 0x43, + 0xbf, 0x54, 0xa5, 0xcc, 0xf6, 0x21, 0xad, 0xc3, 0x56, 0x4f, 0xc4, 0x41, 0x62, 0x8e, 0xf8, 0x08, + 0xe0, 0x52, 0x93, 0x64, 0x4a, 0x3f, 0x49, 0x0e, 0xdb, 0x3d, 0x11, 0xbb, 0x49, 0x50, 0x65, 0x8e, + 0x39, 0x0a, 0xa0, 0x75, 0x96, 0xf3, 0x54, 0x65, 0x57, 0xd1, 0x00, 0xfc, 0x51, 0x3e, 0x93, 0x2a, + 0x5b, 0x75, 0x13, 0x1b, 0xba, 0x39, 0x75, 0xb7, 0xe8, 0x2b, 0xb4, 0x4f, 0xd9, 0xb8, 0x94, 0x6b, + 0xe3, 0x77, 0x6a, 0xd9, 0x6b, 0x7e, 0x97, 0xa2, 0x49, 0x55, 0xae, 0x06, 0x57, 0xa5, 0x50, 0x3b, + 0x29, 0x83, 0xa5, 0x41, 0xee, 0x06, 0x83, 0xbc, 0xbf, 0x0c, 0x8a, 0x7e, 0x0a, 0x68, 0x4e, 0x58, + 0x72, 0x81, 0xcf, 0xc1, 0x4b, 0x25, 0xcb, 0x6a, 0x29, 0xc2, 0xba, 0x9d, 0xad, 0xf6, 0x47, 0x92, + 0xe5, 0x38, 0x63, 0xbd, 0x48, 0x2c, 0x0a, 0xf7, 0xa0, 0xc5, 0x6a, 0x46, 0xc6, 0x03, 0xc7, 0x7a, + 0xe0, 0x9b, 0xf0, 0x98, 0xf7, 0x5f, 0x41, 0x50, 0x63, 0x97, 0xb7, 0x10, 0xa5, 0x7d, 0xb7, 0x6e, + 0xe1, 0xd8, 0x5c, 0x19, 0xbc, 0x75, 0x5e, 0x8b, 0xe8, 0x1d, 0x78, 0xa7, 0x99, 0x62, 0xc4, 0x72, + 0x25, 0x2a, 0x52, 0xb9, 0x1e, 0x08, 0xde, 0x99, 0x9c, 0x2d, 0x49, 0xf6, 0x6c, 0xb4, 0x47, 0x4a, + 0xdb, 0x09, 0x83, 0xc4, 0x1c, 0x87, 0xbf, 0x3d, 0x70, 0x46, 0x27, 0x18, 0x83, 0xa7, 0x8c, 0xd0, + 0xff, 0xf5, 0x08, 0x46, 0x77, 0xff, 0xee, 0xc2, 0x46, 0x0d, 0x7c, 0x0a, 0xee, 0x15, 0x31, 0xde, + 0xad, 0x6c, 0x82, 0x1e, 0x41, 0x70, 0x45, 0x3c, 0x61, 0x4d, 0x72, 0xb6, 0x0d, 0x21, 0x16, 0x03, + 0x61, 0xf4, 0xa7, 0xb2, 0xd8, 0x4a, 0xff, 0x19, 0xb8, 0xc5, 0xa6, 0xab, 0x74, 0xeb, 0xc4, 0x72, + 0xad, 0x1a, 0xd8, 0x87, 0x56, 0x41, 0x3c, 0x59, 0x64, 0x97, 0xdb, 0xe1, 0x0f, 0xc1, 0x4f, 0xe9, + 0x9a, 0x98, 0xb6, 0x83, 0xbf, 0x30, 0x8f, 0x87, 0x81, 0x6f, 0xdf, 0x61, 0x08, 0x6d, 0xb5, 0x5c, + 0xdc, 0x35, 0xc2, 0xbd, 0xd5, 0xff, 0x50, 0x61, 0xa2, 0xc6, 0x40, 0xe0, 0x1b, 0xd8, 0xd5, 0x74, + 0x43, 0xba, 0xa0, 0xd3, 0x7f, 0xa5, 0x1e, 0xd8, 0xef, 0x89, 0x0b, 0x5c, 0xbb, 0xcb, 0x7e, 0xe7, + 0xf6, 0xde, 0x46, 0x0d, 0x1c, 0x00, 0x5c, 0x98, 0x47, 0xef, 0xa3, 0x56, 0x4c, 0xb8, 0xaa, 0xdb, + 0x97, 0x70, 0xe3, 0x34, 0x2f, 0xa1, 0xb3, 0x62, 0x58, 0x13, 0xb6, 0x60, 0x5d, 0xf8, 0x36, 0x75, + 0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf4, 0xe3, 0x82, 0x7a, 0x05, 0x00, 0x00, } diff --git a/proto/defs.proto b/proto/defs.proto index 4b52e9afa..70471f234 100644 --- a/proto/defs.proto +++ b/proto/defs.proto @@ -2,6 +2,19 @@ syntax = "proto3"; package protodb; +message Batch { + repeated Operation ops = 1; +} + +message Operation { + Entity entity = 1; + enum Type { + SET = 0; + DELETE = 1; + } + Type type = 2; +} + message Entity { int32 id = 1; bytes key = 2; @@ -53,4 +66,6 @@ service DB { rpc reverseIterator(Entity) returns (stream Iterator) {} // rpc print(Nothing) returns (Entity) {} rpc stats(Nothing) returns (Stats) {} + rpc batchWrite(Batch) returns (Nothing) {} + rpc batchWriteSync(Batch) returns (Nothing) {} } diff --git a/remotedb/remotedb.go b/remotedb/remotedb.go index b80cd3fdb..f6e4d9c17 100644 --- a/remotedb/remotedb.go +++ b/remotedb/remotedb.go @@ -90,9 +90,11 @@ func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator { return makeReverseIterator(dic) } -// TODO: Implement NewBatch func (rd *RemoteDB) NewBatch() db.Batch { - panic("Unimplemented") + return &batch{ + db: rd, + ops: nil, + } } // TODO: Implement Print when db.DB implements a method @@ -218,5 +220,43 @@ func (itr *iterator) Value() []byte { } func (itr *iterator) Close() { - // TODO: Shut down the iterator + err := itr.dic.CloseSend() + if err != nil { + panic(fmt.Sprintf("Error closing iterator: %v", err)) + } +} + +type batch struct { + db *RemoteDB + ops []*protodb.Operation +} + +var _ db.Batch = (*batch)(nil) + +func (bat *batch) Set(key, value []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key, Value: value}, + Type: protodb.Operation_SET, + } + bat.ops = append(bat.ops, op) +} + +func (bat *batch) Delete(key []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key}, + Type: protodb.Operation_DELETE, + } + bat.ops = append(bat.ops, op) +} + +func (bat *batch) Write() { + if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { + panic(fmt.Sprintf("RemoteDB.BatchWrite: %v", err)) + } +} + +func (bat *batch) WriteSync() { + if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { + panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err)) + } } diff --git a/remotedb/remotedb_test.go b/remotedb/remotedb_test.go index c4014fe66..6bc0c77bd 100644 --- a/remotedb/remotedb_test.go +++ b/remotedb/remotedb_test.go @@ -60,6 +60,7 @@ func TestRemoteDB(t *testing.T) { require.Equal(t, itr.Key(), []byte("key-2")) require.Equal(t, itr.Value(), []byte("value-2")) require.Panics(t, itr.Next) + itr.Close() // Deletion client.Delete(k1) @@ -69,5 +70,36 @@ func TestRemoteDB(t *testing.T) { require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore") - // TODO Batch tests + // Batch tests - set + k3 := []byte("key-3") + k4 := []byte("key-4") + k5 := []byte("key-5") + v3 := []byte("value-3") + v4 := []byte("value-4") + v5 := []byte("value-5") + bat := client.NewBatch() + bat.Set(k3, v3) + bat.Set(k4, v4) + rv3 := client.Get(k3) + require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored") + rv4 := client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored") + bat.Write() + rv3 = client.Get(k3) + require.Equal(t, rv3, v3, "expecting k3 to have been stored") + rv4 = client.Get(k4) + require.Equal(t, rv4, v4, "expecting k4 to have been stored") + + // Batch tests - set and delete + bat = client.NewBatch() + bat.Delete(k4) + bat.Set(k5, v5) + bat.Delete(k3) + bat.WriteSync() + rv3 = client.Get(k3) + require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted") + rv4 = client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") + rv5 := client.Get(k5) + require.Equal(t, rv5, v5, "expecting k5 to have been stored") } From 20be8c75e53523b022f22e4e306ba8a4da034e80 Mon Sep 17 00:00:00 2001 From: Christopher Goes Date: Tue, 8 May 2018 17:13:13 +0200 Subject: [PATCH 440/515] Tweak testcases --- remotedb/remotedb_test.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/remotedb/remotedb_test.go b/remotedb/remotedb_test.go index 6bc0c77bd..cbe9d9095 100644 --- a/remotedb/remotedb_test.go +++ b/remotedb/remotedb_test.go @@ -47,7 +47,9 @@ func TestRemoteDB(t *testing.T) { // Set some more keys k2 := []byte("key-2") v2 := []byte("value-2") - client.Set(k2, v2) + client.SetSync(k2, v2) + has := client.Has(k2) + require.True(t, has) gv2 := client.Get(k2) require.Equal(t, gv2, v2) @@ -64,7 +66,7 @@ func TestRemoteDB(t *testing.T) { // Deletion client.Delete(k1) - client.Delete(k2) + client.DeleteSync(k2) gv1 = client.Get(k1) gv2 = client.Get(k2) require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") @@ -90,16 +92,24 @@ func TestRemoteDB(t *testing.T) { rv4 = client.Get(k4) require.Equal(t, rv4, v4, "expecting k4 to have been stored") - // Batch tests - set and delete + // Batch tests - deletion bat = client.NewBatch() bat.Delete(k4) - bat.Set(k5, v5) bat.Delete(k3) bat.WriteSync() rv3 = client.Get(k3) require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted") rv4 = client.Get(k4) require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") + + // Batch tests - set and delete + bat = client.NewBatch() + bat.Set(k4, v4) + bat.Set(k5, v5) + bat.Delete(k4) + bat.WriteSync() + rv4 = client.Get(k4) + require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") rv5 := client.Get(k5) require.Equal(t, rv5, v5, "expecting k5 to have been stored") } From 2e41756b55929c5fbf2a57e2f1a6172224141be5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 10 May 2018 20:42:10 -0700 Subject: [PATCH 441/515] Add logjack command --- CHANGELOG.md | 1 + autofile/cmd/logjack.go | 108 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 autofile/cmd/logjack.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d1cfceb9..9db04f131 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ FEATURES: + - [autofile] logjack command for piping stdin to a rotating file - [common] ASCIITrim() ## 0.8.2 (April 23rd, 2018) diff --git a/autofile/cmd/logjack.go b/autofile/cmd/logjack.go new file mode 100644 index 000000000..d475397bb --- /dev/null +++ b/autofile/cmd/logjack.go @@ -0,0 +1,108 @@ +package main + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + + auto "github.com/tendermint/tmlibs/autofile" + cmn "github.com/tendermint/tmlibs/common" +) + +const Version = "0.0.1" +const sleepSeconds = 1 // Every second +const readBufferSize = 1024 // 1KB at a time + +// Parse command-line options +func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { + var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) + var chopSizeStr, limitSizeStr string + flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") + flagSet.StringVar(&chopSizeStr, "chop", "1M", "Move file if greater than this") + flagSet.StringVar(&limitSizeStr, "limit", "1G", "Only keep this much (for each specified file). Remove old files.") + flagSet.BoolVar(&version, "version", false, "Version") + flagSet.Parse(os.Args[1:]) + chopSize = parseBytesize(chopSizeStr) + limitSize = parseBytesize(limitSizeStr) + return +} + +func main() { + + // Read options + headPath, chopSize, limitSize, version := parseFlags() + if version { + fmt.Printf("logjack version %v\n", Version) + return + } + + // Open Group + group, err := auto.OpenGroup(headPath) + if err != nil { + fmt.Printf("logjack couldn't create output file %v\n", headPath) + os.Exit(1) + } + group.SetHeadSizeLimit(chopSize) + group.SetTotalSizeLimit(limitSize) + err = group.Start() + if err != nil { + fmt.Printf("logjack couldn't start with file %v\n", headPath) + os.Exit(1) + } + + go func() { + // Forever, read from stdin and write to AutoFile. + buf := make([]byte, readBufferSize) + for { + n, err := os.Stdin.Read(buf) + group.Write(buf[:n]) + group.Flush() + if err != nil { + group.Stop() + if err == io.EOF { + os.Exit(0) + } else { + fmt.Println("logjack errored") + os.Exit(1) + } + } + } + }() + + // Trap signal + cmn.TrapSignal(func() { + fmt.Println("logjack shutting down") + }) +} + +func parseBytesize(chopSize string) int64 { + // Handle suffix multiplier + var multiplier int64 = 1 + if strings.HasSuffix(chopSize, "T") { + multiplier = 1042 * 1024 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "G") { + multiplier = 1042 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "M") { + multiplier = 1042 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "K") { + multiplier = 1042 + chopSize = chopSize[:len(chopSize)-1] + } + + // Parse the numeric part + chopSizeInt, err := strconv.Atoi(chopSize) + if err != nil { + panic(err) + } + + return int64(chopSizeInt) * multiplier +} From a8fcf45624613ba0a60ca7640363e0fd074fbaef Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 10 May 2018 20:58:28 -0700 Subject: [PATCH 442/515] Change defaults to 100M and 10G respectively --- autofile/cmd/logjack.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autofile/cmd/logjack.go b/autofile/cmd/logjack.go index d475397bb..f2739a7e5 100644 --- a/autofile/cmd/logjack.go +++ b/autofile/cmd/logjack.go @@ -21,8 +21,8 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) var chopSizeStr, limitSizeStr string flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") - flagSet.StringVar(&chopSizeStr, "chop", "1M", "Move file if greater than this") - flagSet.StringVar(&limitSizeStr, "limit", "1G", "Only keep this much (for each specified file). Remove old files.") + flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") + flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") flagSet.BoolVar(&version, "version", false, "Version") flagSet.Parse(os.Args[1:]) chopSize = parseBytesize(chopSizeStr) From 45caff1a20857c7557d1cff5d552e5b8bec8ded5 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 14 May 2018 15:21:29 -0400 Subject: [PATCH 443/515] changelog and version --- CHANGELOG.md | 2 +- version/version.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9db04f131..d87f094b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 0.8.3 (develop branch) +## 0.8.3 (May 14th, 2018) FEATURES: diff --git a/version/version.go b/version/version.go index 107f5cf3a..40472c9a9 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.8.2" +const Version = "0.8.3" From 468be0f8d6115c1c710a2a50cf19a7b3bc6a7040 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 14 May 2018 15:49:00 -0400 Subject: [PATCH 444/515] mv remotedb, proto, grpcdb all under db/remotedb --- CHANGELOG.md | 2 + Makefile | 6 +- {remotedb => db/remotedb}/doc.go | 0 {grpcdb => db/remotedb/grpcdb}/client.go | 2 +- {grpcdb => db/remotedb/grpcdb}/doc.go | 0 .../remotedb/grpcdb}/example_test.go | 4 +- {grpcdb => db/remotedb/grpcdb}/server.go | 2 +- {proto => db/remotedb/proto}/defs.pb.go | 340 +++++++++++++----- {proto => db/remotedb/proto}/defs.proto | 0 {remotedb => db/remotedb}/remotedb.go | 4 +- {remotedb => db/remotedb}/remotedb_test.go | 14 +- 11 files changed, 274 insertions(+), 100 deletions(-) rename {remotedb => db/remotedb}/doc.go (100%) rename {grpcdb => db/remotedb/grpcdb}/client.go (92%) rename {grpcdb => db/remotedb/grpcdb}/doc.go (100%) rename {grpcdb => db/remotedb/grpcdb}/example_test.go (90%) rename {grpcdb => db/remotedb/grpcdb}/server.go (98%) rename {proto => db/remotedb/proto}/defs.pb.go (66%) rename {proto => db/remotedb/proto}/defs.proto (100%) rename {remotedb => db/remotedb}/remotedb.go (98%) rename {remotedb => db/remotedb}/remotedb_test.go (90%) diff --git a/CHANGELOG.md b/CHANGELOG.md index d87f094b0..06f9f08e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ FEATURES: + - [db/remotedb] New DB type using an external CLevelDB process via + GRPC - [autofile] logjack command for piping stdin to a rotating file - [common] ASCIITrim() diff --git a/Makefile b/Makefile index 41e5f1291..efef45996 100644 --- a/Makefile +++ b/Makefile @@ -72,12 +72,12 @@ gen_certs: clean_certs certstrap init --common-name "tendermint.com" --passphrase "" certstrap request-cert -ip "::" --passphrase "" certstrap sign "::" --CA "tendermint.com" --passphrase "" - mv out/::.crt out/::.key remotedb + mv out/::.crt out/::.key db/remotedb clean_certs: ## Cleaning TLS testing certificates... rm -rf out - rm -f remotedb/::.crt remotedb/::.key + rm -f db/remotedb/::.crt db/remotedb/::.key test: gen_certs go test -tags gcc $(shell go list ./... | grep -v vendor) @@ -135,4 +135,4 @@ metalinter_all: .PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all gen_certs clean_certs grpc_dbserver: - protoc -I proto/ proto/defs.proto --go_out=plugins=grpc:proto + protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto diff --git a/remotedb/doc.go b/db/remotedb/doc.go similarity index 100% rename from remotedb/doc.go rename to db/remotedb/doc.go diff --git a/grpcdb/client.go b/db/remotedb/grpcdb/client.go similarity index 92% rename from grpcdb/client.go rename to db/remotedb/grpcdb/client.go index bae38b1c5..86aa12c7f 100644 --- a/grpcdb/client.go +++ b/db/remotedb/grpcdb/client.go @@ -4,7 +4,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" - protodb "github.com/tendermint/tmlibs/proto" + protodb "github.com/tendermint/tmlibs/db/remotedb/proto" ) // Security defines how the client will talk to the gRPC server. diff --git a/grpcdb/doc.go b/db/remotedb/grpcdb/doc.go similarity index 100% rename from grpcdb/doc.go rename to db/remotedb/grpcdb/doc.go diff --git a/grpcdb/example_test.go b/db/remotedb/grpcdb/example_test.go similarity index 90% rename from grpcdb/example_test.go rename to db/remotedb/grpcdb/example_test.go index 5a9c6eed9..827a1cf36 100644 --- a/grpcdb/example_test.go +++ b/db/remotedb/grpcdb/example_test.go @@ -5,8 +5,8 @@ import ( "context" "log" - grpcdb "github.com/tendermint/tmlibs/grpcdb" - protodb "github.com/tendermint/tmlibs/proto" + grpcdb "github.com/tendermint/tmlibs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tmlibs/db/remotedb/proto" ) func Example() { diff --git a/grpcdb/server.go b/db/remotedb/grpcdb/server.go similarity index 98% rename from grpcdb/server.go rename to db/remotedb/grpcdb/server.go index d4cfe4433..8320c0517 100644 --- a/grpcdb/server.go +++ b/db/remotedb/grpcdb/server.go @@ -10,7 +10,7 @@ import ( "google.golang.org/grpc/credentials" "github.com/tendermint/tmlibs/db" - protodb "github.com/tendermint/tmlibs/proto" + protodb "github.com/tendermint/tmlibs/db/remotedb/proto" ) // ListenAndServe is a blocking function that sets up a gRPC based diff --git a/proto/defs.pb.go b/db/remotedb/proto/defs.pb.go similarity index 66% rename from proto/defs.pb.go rename to db/remotedb/proto/defs.pb.go index 4d9f0b272..86b8f9b8d 100644 --- a/proto/defs.pb.go +++ b/db/remotedb/proto/defs.pb.go @@ -1,22 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: defs.proto -/* -Package protodb is a generated protocol buffer package. - -It is generated from these files: - defs.proto - -It has these top-level messages: - Batch - Operation - Entity - Nothing - Domain - Iterator - Stats - Init -*/ package protodb import proto "github.com/golang/protobuf/proto" @@ -58,16 +42,40 @@ var Operation_Type_value = map[string]int32{ func (x Operation_Type) String() string { return proto.EnumName(Operation_Type_name, int32(x)) } -func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } +func (Operation_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{1, 0} +} type Batch struct { - Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` + Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Batch) Reset() { *m = Batch{} } +func (m *Batch) String() string { return proto.CompactTextString(m) } +func (*Batch) ProtoMessage() {} +func (*Batch) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{0} +} +func (m *Batch) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Batch.Unmarshal(m, b) +} +func (m *Batch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Batch.Marshal(b, m, deterministic) +} +func (dst *Batch) XXX_Merge(src proto.Message) { + xxx_messageInfo_Batch.Merge(dst, src) +} +func (m *Batch) XXX_Size() int { + return xxx_messageInfo_Batch.Size(m) +} +func (m *Batch) XXX_DiscardUnknown() { + xxx_messageInfo_Batch.DiscardUnknown(m) } -func (m *Batch) Reset() { *m = Batch{} } -func (m *Batch) String() string { return proto.CompactTextString(m) } -func (*Batch) ProtoMessage() {} -func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Batch proto.InternalMessageInfo func (m *Batch) GetOps() []*Operation { if m != nil { @@ -77,14 +85,36 @@ func (m *Batch) GetOps() []*Operation { } type Operation struct { - Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` - Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{1} +} +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (dst *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(dst, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) } -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_Operation proto.InternalMessageInfo func (m *Operation) GetEntity() *Entity { if m != nil { @@ -101,20 +131,42 @@ func (m *Operation) GetType() Operation_Type { } type Entity struct { - Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` - Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` - End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` - Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` - CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` + Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` + Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` + CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Entity) Reset() { *m = Entity{} } -func (m *Entity) String() string { return proto.CompactTextString(m) } -func (*Entity) ProtoMessage() {} -func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{2} +} +func (m *Entity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Entity.Unmarshal(m, b) +} +func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Entity.Marshal(b, m, deterministic) +} +func (dst *Entity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Entity.Merge(dst, src) +} +func (m *Entity) XXX_Size() int { + return xxx_messageInfo_Entity.Size(m) +} +func (m *Entity) XXX_DiscardUnknown() { + xxx_messageInfo_Entity.DiscardUnknown(m) +} + +var xxx_messageInfo_Entity proto.InternalMessageInfo func (m *Entity) GetId() int32 { if m != nil { @@ -173,22 +225,66 @@ func (m *Entity) GetCreatedAt() int64 { } type Nothing struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Nothing) Reset() { *m = Nothing{} } +func (m *Nothing) String() string { return proto.CompactTextString(m) } +func (*Nothing) ProtoMessage() {} +func (*Nothing) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{3} +} +func (m *Nothing) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Nothing.Unmarshal(m, b) +} +func (m *Nothing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Nothing.Marshal(b, m, deterministic) +} +func (dst *Nothing) XXX_Merge(src proto.Message) { + xxx_messageInfo_Nothing.Merge(dst, src) +} +func (m *Nothing) XXX_Size() int { + return xxx_messageInfo_Nothing.Size(m) +} +func (m *Nothing) XXX_DiscardUnknown() { + xxx_messageInfo_Nothing.DiscardUnknown(m) } -func (m *Nothing) Reset() { *m = Nothing{} } -func (m *Nothing) String() string { return proto.CompactTextString(m) } -func (*Nothing) ProtoMessage() {} -func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_Nothing proto.InternalMessageInfo type Domain struct { - Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` - End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Domain) Reset() { *m = Domain{} } -func (m *Domain) String() string { return proto.CompactTextString(m) } -func (*Domain) ProtoMessage() {} -func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *Domain) Reset() { *m = Domain{} } +func (m *Domain) String() string { return proto.CompactTextString(m) } +func (*Domain) ProtoMessage() {} +func (*Domain) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{4} +} +func (m *Domain) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Domain.Unmarshal(m, b) +} +func (m *Domain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Domain.Marshal(b, m, deterministic) +} +func (dst *Domain) XXX_Merge(src proto.Message) { + xxx_messageInfo_Domain.Merge(dst, src) +} +func (m *Domain) XXX_Size() int { + return xxx_messageInfo_Domain.Size(m) +} +func (m *Domain) XXX_DiscardUnknown() { + xxx_messageInfo_Domain.DiscardUnknown(m) +} + +var xxx_messageInfo_Domain proto.InternalMessageInfo func (m *Domain) GetStart() []byte { if m != nil { @@ -205,16 +301,38 @@ func (m *Domain) GetEnd() []byte { } type Iterator struct { - Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` - Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` + Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Iterator) Reset() { *m = Iterator{} } -func (m *Iterator) String() string { return proto.CompactTextString(m) } -func (*Iterator) ProtoMessage() {} -func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (m *Iterator) Reset() { *m = Iterator{} } +func (m *Iterator) String() string { return proto.CompactTextString(m) } +func (*Iterator) ProtoMessage() {} +func (*Iterator) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{5} +} +func (m *Iterator) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Iterator.Unmarshal(m, b) +} +func (m *Iterator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Iterator.Marshal(b, m, deterministic) +} +func (dst *Iterator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Iterator.Merge(dst, src) +} +func (m *Iterator) XXX_Size() int { + return xxx_messageInfo_Iterator.Size(m) +} +func (m *Iterator) XXX_DiscardUnknown() { + xxx_messageInfo_Iterator.DiscardUnknown(m) +} + +var xxx_messageInfo_Iterator proto.InternalMessageInfo func (m *Iterator) GetDomain() *Domain { if m != nil { @@ -245,14 +363,36 @@ func (m *Iterator) GetValue() []byte { } type Stats struct { - Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` + Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Stats) Reset() { *m = Stats{} } -func (m *Stats) String() string { return proto.CompactTextString(m) } -func (*Stats) ProtoMessage() {} -func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *Stats) Reset() { *m = Stats{} } +func (m *Stats) String() string { return proto.CompactTextString(m) } +func (*Stats) ProtoMessage() {} +func (*Stats) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{6} +} +func (m *Stats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stats.Unmarshal(m, b) +} +func (m *Stats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stats.Marshal(b, m, deterministic) +} +func (dst *Stats) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stats.Merge(dst, src) +} +func (m *Stats) XXX_Size() int { + return xxx_messageInfo_Stats.Size(m) +} +func (m *Stats) XXX_DiscardUnknown() { + xxx_messageInfo_Stats.DiscardUnknown(m) +} + +var xxx_messageInfo_Stats proto.InternalMessageInfo func (m *Stats) GetData() map[string]string { if m != nil { @@ -269,15 +409,37 @@ func (m *Stats) GetTimeAt() int64 { } type Init struct { - Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` - Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` + Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Init) Reset() { *m = Init{} } +func (m *Init) String() string { return proto.CompactTextString(m) } +func (*Init) ProtoMessage() {} +func (*Init) Descriptor() ([]byte, []int) { + return fileDescriptor_defs_7303098f1c775f7f, []int{7} +} +func (m *Init) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Init.Unmarshal(m, b) +} +func (m *Init) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Init.Marshal(b, m, deterministic) +} +func (dst *Init) XXX_Merge(src proto.Message) { + xxx_messageInfo_Init.Merge(dst, src) +} +func (m *Init) XXX_Size() int { + return xxx_messageInfo_Init.Size(m) +} +func (m *Init) XXX_DiscardUnknown() { + xxx_messageInfo_Init.DiscardUnknown(m) } -func (m *Init) Reset() { *m = Init{} } -func (m *Init) String() string { return proto.CompactTextString(m) } -func (*Init) ProtoMessage() {} -func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +var xxx_messageInfo_Init proto.InternalMessageInfo func (m *Init) GetType() string { if m != nil { @@ -308,6 +470,7 @@ func init() { proto.RegisterType((*Domain)(nil), "protodb.Domain") proto.RegisterType((*Iterator)(nil), "protodb.Iterator") proto.RegisterType((*Stats)(nil), "protodb.Stats") + proto.RegisterMapType((map[string]string)(nil), "protodb.Stats.DataEntry") proto.RegisterType((*Init)(nil), "protodb.Init") proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value) } @@ -320,8 +483,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for DB service - +// DBClient is the client API for DB service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DBClient interface { Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) @@ -349,7 +513,7 @@ func NewDBClient(cc *grpc.ClientConn) DBClient { func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) { out := new(Entity) - err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/init", in, out, opts...) if err != nil { return nil, err } @@ -358,7 +522,7 @@ func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { out := new(Entity) - err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/get", in, out, opts...) if err != nil { return nil, err } @@ -366,7 +530,7 @@ func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) } func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) { - stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...) + stream, err := c.cc.NewStream(ctx, &_DB_serviceDesc.Streams[0], "/protodb.DB/getStream", opts...) if err != nil { return nil, err } @@ -398,7 +562,7 @@ func (x *dBGetStreamClient) Recv() (*Entity, error) { func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { out := new(Entity) - err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/has", in, out, opts...) if err != nil { return nil, err } @@ -407,7 +571,7 @@ func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/set", in, out, opts...) if err != nil { return nil, err } @@ -416,7 +580,7 @@ func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/setSync", in, out, opts...) if err != nil { return nil, err } @@ -425,7 +589,7 @@ func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOpt func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/delete", in, out, opts...) if err != nil { return nil, err } @@ -434,7 +598,7 @@ func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOpti func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, opts...) if err != nil { return nil, err } @@ -442,7 +606,7 @@ func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.Call } func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) { - stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...) + stream, err := c.cc.NewStream(ctx, &_DB_serviceDesc.Streams[1], "/protodb.DB/iterator", opts...) if err != nil { return nil, err } @@ -474,7 +638,7 @@ func (x *dBIteratorClient) Recv() (*Iterator, error) { } func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) { - stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...) + stream, err := c.cc.NewStream(ctx, &_DB_serviceDesc.Streams[2], "/protodb.DB/reverseIterator", opts...) if err != nil { return nil, err } @@ -507,7 +671,7 @@ func (x *dBReverseIteratorClient) Recv() (*Iterator, error) { func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) { out := new(Stats) - err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/stats", in, out, opts...) if err != nil { return nil, err } @@ -516,7 +680,7 @@ func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOpti func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, opts...) if err != nil { return nil, err } @@ -525,7 +689,7 @@ func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallO func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, opts...) if err != nil { return nil, err } @@ -869,9 +1033,9 @@ var _DB_serviceDesc = grpc.ServiceDesc{ Metadata: "defs.proto", } -func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } +func init() { proto.RegisterFile("defs.proto", fileDescriptor_defs_7303098f1c775f7f) } -var fileDescriptor0 = []byte{ +var fileDescriptor_defs_7303098f1c775f7f = []byte{ // 606 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, 0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b, diff --git a/proto/defs.proto b/db/remotedb/proto/defs.proto similarity index 100% rename from proto/defs.proto rename to db/remotedb/proto/defs.proto diff --git a/remotedb/remotedb.go b/db/remotedb/remotedb.go similarity index 98% rename from remotedb/remotedb.go rename to db/remotedb/remotedb.go index f6e4d9c17..5332bd68e 100644 --- a/remotedb/remotedb.go +++ b/db/remotedb/remotedb.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/grpcdb" - protodb "github.com/tendermint/tmlibs/proto" + "github.com/tendermint/tmlibs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tmlibs/db/remotedb/proto" ) type RemoteDB struct { diff --git a/remotedb/remotedb_test.go b/db/remotedb/remotedb_test.go similarity index 90% rename from remotedb/remotedb_test.go rename to db/remotedb/remotedb_test.go index cbe9d9095..b126a9012 100644 --- a/remotedb/remotedb_test.go +++ b/db/remotedb/remotedb_test.go @@ -2,12 +2,13 @@ package remotedb_test import ( "net" + "os" "testing" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/grpcdb" - "github.com/tendermint/tmlibs/remotedb" + "github.com/tendermint/tmlibs/db/remotedb" + "github.com/tendermint/tmlibs/db/remotedb/grpcdb" ) func TestRemoteDB(t *testing.T) { @@ -26,7 +27,14 @@ func TestRemoteDB(t *testing.T) { client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) require.Nil(t, err, "expecting a successful client creation") - require.Nil(t, client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"})) + dbName := "test-remote-db" + require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"})) + defer func() { + err := os.RemoveAll(dbName + ".db") + if err != nil { + panic(err) + } + }() k1 := []byte("key-1") v1 := client.Get(k1) From 4a77eda368f70d25697483e6589439ecda78130d Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 16 May 2018 12:57:08 +0400 Subject: [PATCH 445/515] events and pubsub were moved to tendermint core Refs https://github.com/tendermint/tendermint/issues/847 --- events/Makefile | 9 - events/README.md | 175 ---- events/event_cache.go | 37 - events/event_cache_test.go | 35 - events/events.go | 226 ----- events/events_test.go | 380 -------- pubsub/example_test.go | 27 - pubsub/pubsub.go | 342 ------- pubsub/pubsub_test.go | 252 ------ pubsub/query/Makefile | 11 - pubsub/query/empty.go | 16 - pubsub/query/empty_test.go | 17 - pubsub/query/fuzz_test/main.go | 30 - pubsub/query/parser_test.go | 91 -- pubsub/query/query.go | 345 ------- pubsub/query/query.peg | 33 - pubsub/query/query.peg.go | 1553 -------------------------------- pubsub/query/query_test.go | 86 -- 18 files changed, 3665 deletions(-) delete mode 100644 events/Makefile delete mode 100644 events/README.md delete mode 100644 events/event_cache.go delete mode 100644 events/event_cache_test.go delete mode 100644 events/events.go delete mode 100644 events/events_test.go delete mode 100644 pubsub/example_test.go delete mode 100644 pubsub/pubsub.go delete mode 100644 pubsub/pubsub_test.go delete mode 100644 pubsub/query/Makefile delete mode 100644 pubsub/query/empty.go delete mode 100644 pubsub/query/empty_test.go delete mode 100644 pubsub/query/fuzz_test/main.go delete mode 100644 pubsub/query/parser_test.go delete mode 100644 pubsub/query/query.go delete mode 100644 pubsub/query/query.peg delete mode 100644 pubsub/query/query.peg.go delete mode 100644 pubsub/query/query_test.go diff --git a/events/Makefile b/events/Makefile deleted file mode 100644 index c425ee5a6..000000000 --- a/events/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -.PHONY: docs -REPO:=github.com/tendermint/tmlibs/events - -docs: - @go get github.com/davecheney/godoc2md - godoc2md $(REPO) > README.md - -test: - go test -v ./... diff --git a/events/README.md b/events/README.md deleted file mode 100644 index d7469515e..000000000 --- a/events/README.md +++ /dev/null @@ -1,175 +0,0 @@ - - -# events -`import "github.com/tendermint/tmlibs/events"` - -* [Overview](#pkg-overview) -* [Index](#pkg-index) - -## Overview -Pub-Sub in go with event caching - - - - -## Index -* [type EventCache](#EventCache) - * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) - * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) - * [func (evc *EventCache) Flush()](#EventCache.Flush) -* [type EventCallback](#EventCallback) -* [type EventData](#EventData) -* [type EventSwitch](#EventSwitch) - * [func NewEventSwitch() EventSwitch](#NewEventSwitch) -* [type Eventable](#Eventable) -* [type Fireable](#Fireable) - - -#### Package files -[event_cache.go](/src/github.com/tendermint/tmlibs/events/event_cache.go) [events.go](/src/github.com/tendermint/tmlibs/events/events.go) [log.go](/src/github.com/tendermint/tmlibs/events/log.go) - - - - - - -## type [EventCache](/src/target/event_cache.go?s=152:215#L1) -``` go -type EventCache struct { - // contains filtered or unexported fields -} -``` -An EventCache buffers events for a Fireable -All events are cached. Filtering happens on Flush - - - - - - - -### func [NewEventCache](/src/target/event_cache.go?s=275:320#L5) -``` go -func NewEventCache(evsw Fireable) *EventCache -``` -Create a new EventCache with an EventSwitch as backend - - - - - -### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=534:596#L19) -``` go -func (evc *EventCache) FireEvent(event string, data EventData) -``` -Cache an event to be fired upon finality. - - - - -### func (\*EventCache) [Flush](/src/target/event_cache.go?s=773:803#L26) -``` go -func (evc *EventCache) Flush() -``` -Fire events by running evsw.FireEvent on all cached events. Blocks. -Clears cached events - - - - -## type [EventCallback](/src/target/events.go?s=4182:4221#L175) -``` go -type EventCallback func(data EventData) -``` - - - - - - - - - -## type [EventData](/src/target/events.go?s=236:287#L4) -``` go -type EventData interface { -} -``` -Generic event data can be typed and registered with tendermint/go-amino -via concrete implementation of this interface - - - - - - - - - - -## type [EventSwitch](/src/target/events.go?s=553:760#L19) -``` go -type EventSwitch interface { - Service - Fireable - - AddListenerForEvent(listenerID, event string, cb EventCallback) - RemoveListenerForEvent(event string, listenerID string) - RemoveListener(listenerID string) -} -``` - - - - - - -### func [NewEventSwitch](/src/target/events.go?s=902:935#L36) -``` go -func NewEventSwitch() EventSwitch -``` - - - - -## type [Eventable](/src/target/events.go?s=371:433#L10) -``` go -type Eventable interface { - SetEventSwitch(evsw EventSwitch) -} -``` -reactors and other modules should export -this interface to become eventable - - - - - - - - - - -## type [Fireable](/src/target/events.go?s=483:551#L15) -``` go -type Fireable interface { - FireEvent(event string, data EventData) -} -``` -an event switch or cache implements fireable - - - - - - - - - - - - - - -- - - -Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/events/event_cache.go b/events/event_cache.go deleted file mode 100644 index f508e873d..000000000 --- a/events/event_cache.go +++ /dev/null @@ -1,37 +0,0 @@ -package events - -// An EventCache buffers events for a Fireable -// All events are cached. Filtering happens on Flush -type EventCache struct { - evsw Fireable - events []eventInfo -} - -// Create a new EventCache with an EventSwitch as backend -func NewEventCache(evsw Fireable) *EventCache { - return &EventCache{ - evsw: evsw, - } -} - -// a cached event -type eventInfo struct { - event string - data EventData -} - -// Cache an event to be fired upon finality. -func (evc *EventCache) FireEvent(event string, data EventData) { - // append to list (go will grow our backing array exponentially) - evc.events = append(evc.events, eventInfo{event, data}) -} - -// Fire events by running evsw.FireEvent on all cached events. Blocks. -// Clears cached events -func (evc *EventCache) Flush() { - for _, ei := range evc.events { - evc.evsw.FireEvent(ei.event, ei.data) - } - // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation - evc.events = nil -} diff --git a/events/event_cache_test.go b/events/event_cache_test.go deleted file mode 100644 index ab321da3a..000000000 --- a/events/event_cache_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package events - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEventCache_Flush(t *testing.T) { - evsw := NewEventSwitch() - evsw.Start() - evsw.AddListenerForEvent("nothingness", "", func(data EventData) { - // Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache - require.FailNow(t, "We should never receive a message on this switch since none are fired") - }) - evc := NewEventCache(evsw) - evc.Flush() - // Check after reset - evc.Flush() - fail := true - pass := false - evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { - if fail { - require.FailNow(t, "Shouldn't see a message until flushed") - } - pass = true - }) - evc.FireEvent("something", struct{ int }{1}) - evc.FireEvent("something", struct{ int }{2}) - evc.FireEvent("something", struct{ int }{3}) - fail = false - evc.Flush() - assert.True(t, pass) -} diff --git a/events/events.go b/events/events.go deleted file mode 100644 index f1b2a754e..000000000 --- a/events/events.go +++ /dev/null @@ -1,226 +0,0 @@ -/* -Pub-Sub in go with event caching -*/ -package events - -import ( - "sync" - - cmn "github.com/tendermint/tmlibs/common" -) - -// Generic event data can be typed and registered with tendermint/go-amino -// via concrete implementation of this interface -type EventData interface { - //AssertIsEventData() -} - -// reactors and other modules should export -// this interface to become eventable -type Eventable interface { - SetEventSwitch(evsw EventSwitch) -} - -// an event switch or cache implements fireable -type Fireable interface { - FireEvent(event string, data EventData) -} - -type EventSwitch interface { - cmn.Service - Fireable - - AddListenerForEvent(listenerID, event string, cb EventCallback) - RemoveListenerForEvent(event string, listenerID string) - RemoveListener(listenerID string) -} - -type eventSwitch struct { - cmn.BaseService - - mtx sync.RWMutex - eventCells map[string]*eventCell - listeners map[string]*eventListener -} - -func NewEventSwitch() EventSwitch { - evsw := &eventSwitch{} - evsw.BaseService = *cmn.NewBaseService(nil, "EventSwitch", evsw) - return evsw -} - -func (evsw *eventSwitch) OnStart() error { - evsw.BaseService.OnStart() - evsw.eventCells = make(map[string]*eventCell) - evsw.listeners = make(map[string]*eventListener) - return nil -} - -func (evsw *eventSwitch) OnStop() { - evsw.mtx.Lock() - defer evsw.mtx.Unlock() - evsw.BaseService.OnStop() - evsw.eventCells = nil - evsw.listeners = nil -} - -func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) { - // Get/Create eventCell and listener - evsw.mtx.Lock() - eventCell := evsw.eventCells[event] - if eventCell == nil { - eventCell = newEventCell() - evsw.eventCells[event] = eventCell - } - listener := evsw.listeners[listenerID] - if listener == nil { - listener = newEventListener(listenerID) - evsw.listeners[listenerID] = listener - } - evsw.mtx.Unlock() - - // Add event and listener - eventCell.AddListener(listenerID, cb) - listener.AddEvent(event) -} - -func (evsw *eventSwitch) RemoveListener(listenerID string) { - // Get and remove listener - evsw.mtx.RLock() - listener := evsw.listeners[listenerID] - evsw.mtx.RUnlock() - if listener == nil { - return - } - - evsw.mtx.Lock() - delete(evsw.listeners, listenerID) - evsw.mtx.Unlock() - - // Remove callback for each event. - listener.SetRemoved() - for _, event := range listener.GetEvents() { - evsw.RemoveListenerForEvent(event, listenerID) - } -} - -func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) { - // Get eventCell - evsw.mtx.Lock() - eventCell := evsw.eventCells[event] - evsw.mtx.Unlock() - - if eventCell == nil { - return - } - - // Remove listenerID from eventCell - numListeners := eventCell.RemoveListener(listenerID) - - // Maybe garbage collect eventCell. - if numListeners == 0 { - // Lock again and double check. - evsw.mtx.Lock() // OUTER LOCK - eventCell.mtx.Lock() // INNER LOCK - if len(eventCell.listeners) == 0 { - delete(evsw.eventCells, event) - } - eventCell.mtx.Unlock() // INNER LOCK - evsw.mtx.Unlock() // OUTER LOCK - } -} - -func (evsw *eventSwitch) FireEvent(event string, data EventData) { - // Get the eventCell - evsw.mtx.RLock() - eventCell := evsw.eventCells[event] - evsw.mtx.RUnlock() - - if eventCell == nil { - return - } - - // Fire event for all listeners in eventCell - eventCell.FireEvent(data) -} - -//----------------------------------------------------------------------------- - -// eventCell handles keeping track of listener callbacks for a given event. -type eventCell struct { - mtx sync.RWMutex - listeners map[string]EventCallback -} - -func newEventCell() *eventCell { - return &eventCell{ - listeners: make(map[string]EventCallback), - } -} - -func (cell *eventCell) AddListener(listenerID string, cb EventCallback) { - cell.mtx.Lock() - cell.listeners[listenerID] = cb - cell.mtx.Unlock() -} - -func (cell *eventCell) RemoveListener(listenerID string) int { - cell.mtx.Lock() - delete(cell.listeners, listenerID) - numListeners := len(cell.listeners) - cell.mtx.Unlock() - return numListeners -} - -func (cell *eventCell) FireEvent(data EventData) { - cell.mtx.RLock() - for _, listener := range cell.listeners { - listener(data) - } - cell.mtx.RUnlock() -} - -//----------------------------------------------------------------------------- - -type EventCallback func(data EventData) - -type eventListener struct { - id string - - mtx sync.RWMutex - removed bool - events []string -} - -func newEventListener(id string) *eventListener { - return &eventListener{ - id: id, - removed: false, - events: nil, - } -} - -func (evl *eventListener) AddEvent(event string) { - evl.mtx.Lock() - defer evl.mtx.Unlock() - - if evl.removed { - return - } - evl.events = append(evl.events, event) -} - -func (evl *eventListener) GetEvents() []string { - evl.mtx.RLock() - defer evl.mtx.RUnlock() - - events := make([]string, len(evl.events)) - copy(events, evl.events) - return events -} - -func (evl *eventListener) SetRemoved() { - evl.mtx.Lock() - defer evl.mtx.Unlock() - evl.removed = true -} diff --git a/events/events_test.go b/events/events_test.go deleted file mode 100644 index 4995ae730..000000000 --- a/events/events_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package events - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -// TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single -// listener to an event, and sends a string "data". -func TestAddListenerForEventFireOnce(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - messages := make(chan EventData) - evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - messages <- data - }) - go evsw.FireEvent("event", "data") - received := <-messages - if received != "data" { - t.Errorf("Message received does not match: %v", received) - } -} - -// TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single -// listener to an event, and sends a thousand integers. -func TestAddListenerForEventFireMany(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum := make(chan uint64) - doneSending := make(chan uint64) - numbers := make(chan uint64, 4) - // subscribe one listener for one event - evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - numbers <- data.(uint64) - }) - // collect received events - go sumReceivedNumbers(numbers, doneSum) - // go fire events - go fireEvents(evsw, "event", doneSending, uint64(1)) - checkSum := <-doneSending - close(numbers) - eventSum := <-doneSum - if checkSum != eventSum { - t.Errorf("Not all messages sent were received.\n") - } -} - -// TestAddListenerForDifferentEvents sets up an EventSwitch, subscribes a single -// listener to three different events and sends a thousand integers for each -// of the three events. -func TestAddListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - doneSending3 := make(chan uint64) - numbers := make(chan uint64, 4) - // subscribe one listener to three events - evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers <- data.(uint64) - }) - evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers <- data.(uint64) - }) - evsw.AddListenerForEvent("listener", "event3", - func(data EventData) { - numbers <- data.(uint64) - }) - // collect received events - go sumReceivedNumbers(numbers, doneSum) - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1)) - go fireEvents(evsw, "event3", doneSending3, uint64(1)) - var checkSum uint64 = 0 - checkSum += <-doneSending1 - checkSum += <-doneSending2 - checkSum += <-doneSending3 - close(numbers) - eventSum := <-doneSum - if checkSum != eventSum { - t.Errorf("Not all messages sent were received.\n") - } -} - -// TestAddDifferentListenerForDifferentEvents sets up an EventSwitch, -// subscribes a first listener to three events, and subscribes a second -// listener to two of those three events, and then sends a thousand integers -// for each of the three events. -func TestAddDifferentListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum1 := make(chan uint64) - doneSum2 := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - doneSending3 := make(chan uint64) - numbers1 := make(chan uint64, 4) - numbers2 := make(chan uint64, 4) - // subscribe two listener to three events - evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - // collect received events for listener1 - go sumReceivedNumbers(numbers1, doneSum1) - // collect received events for listener2 - go sumReceivedNumbers(numbers2, doneSum2) - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) - checkSumEvent1 := <-doneSending1 - checkSumEvent2 := <-doneSending2 - checkSumEvent3 := <-doneSending3 - checkSum1 := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 - checkSum2 := checkSumEvent2 + checkSumEvent3 - close(numbers1) - close(numbers2) - eventSum1 := <-doneSum1 - eventSum2 := <-doneSum2 - if checkSum1 != eventSum1 || - checkSum2 != eventSum2 { - t.Errorf("Not all messages sent were received for different listeners to different events.\n") - } -} - -// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to -// two events, fires a thousand integers for the first event, then unsubscribes -// the listener and fires a thousand integers for the second event. -func TestAddAndRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum1 := make(chan uint64) - doneSum2 := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - numbers1 := make(chan uint64, 4) - numbers2 := make(chan uint64, 4) - // subscribe two listener to three events - evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - // collect received events for event1 - go sumReceivedNumbers(numbers1, doneSum1) - // collect received events for event2 - go sumReceivedNumbers(numbers2, doneSum2) - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - checkSumEvent1 := <-doneSending1 - // after sending all event1, unsubscribe for all events - evsw.RemoveListener("listener") - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - checkSumEvent2 := <-doneSending2 - close(numbers1) - close(numbers2) - eventSum1 := <-doneSum1 - eventSum2 := <-doneSum2 - if checkSumEvent1 != eventSum1 || - // correct value asserted by preceding tests, suffices to be non-zero - checkSumEvent2 == uint64(0) || - eventSum2 != uint64(0) { - t.Errorf("Not all messages sent were received or unsubscription did not register.\n") - } -} - -// TestRemoveListener does basic tests on adding and removing -func TestRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - count := 10 - sum1, sum2 := 0, 0 - // add some listeners and make sure they work - evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - sum1++ - }) - evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - sum2++ - }) - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count, sum1) - assert.Equal(t, count, sum2) - - // remove one by event and make sure it is gone - evsw.RemoveListenerForEvent("event2", "listener") - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count*2, sum1) - assert.Equal(t, count, sum2) - - // remove the listener entirely and make sure both gone - evsw.RemoveListener("listener") - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count*2, sum1) - assert.Equal(t, count, sum2) -} - -// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two -// listeners to three events, and fires a thousand integers for each event. -// These two listeners serve as the baseline validation while other listeners -// are randomly subscribed and unsubscribed. -// More precisely it randomly subscribes new listeners (different from the first -// two listeners) to one of these three events. At the same time it starts -// randomly unsubscribing these additional listeners from all events they are -// at that point subscribed to. -// NOTE: it is important to run this test with race conditions tracking on, -// `go test -race`, to examine for possible race conditions. -func TestRemoveListenersAsync(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum1 := make(chan uint64) - doneSum2 := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - doneSending3 := make(chan uint64) - numbers1 := make(chan uint64, 4) - numbers2 := make(chan uint64, 4) - // subscribe two listener to three events - evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event1", - func(data EventData) { - numbers2 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - // collect received events for event1 - go sumReceivedNumbers(numbers1, doneSum1) - // collect received events for event2 - go sumReceivedNumbers(numbers2, doneSum2) - addListenersStress := func() { - s1 := rand.NewSource(time.Now().UnixNano()) - r1 := rand.New(s1) - for k := uint16(0); k < 400; k++ { - listenerNumber := r1.Intn(100) + 3 - eventNumber := r1.Intn(3) + 1 - go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), - fmt.Sprintf("event%v", eventNumber), - func(_ EventData) {}) - } - } - removeListenersStress := func() { - s2 := rand.NewSource(time.Now().UnixNano()) - r2 := rand.New(s2) - for k := uint16(0); k < 80; k++ { - listenerNumber := r2.Intn(100) + 3 - go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber)) - } - } - addListenersStress() - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - removeListenersStress() - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) - checkSumEvent1 := <-doneSending1 - checkSumEvent2 := <-doneSending2 - checkSumEvent3 := <-doneSending3 - checkSum := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 - close(numbers1) - close(numbers2) - eventSum1 := <-doneSum1 - eventSum2 := <-doneSum2 - if checkSum != eventSum1 || - checkSum != eventSum2 { - t.Errorf("Not all messages sent were received.\n") - } -} - -//------------------------------------------------------------------------------ -// Helper functions - -// sumReceivedNumbers takes two channels and adds all numbers received -// until the receiving channel `numbers` is closed; it then sends the sum -// on `doneSum` and closes that channel. Expected to be run in a go-routine. -func sumReceivedNumbers(numbers, doneSum chan uint64) { - var sum uint64 = 0 - for { - j, more := <-numbers - sum += j - if !more { - doneSum <- sum - close(doneSum) - return - } - } -} - -// fireEvents takes an EventSwitch and fires a thousand integers under -// a given `event` with the integers mootonically increasing from `offset` -// to `offset` + 999. It additionally returns the addition of all integers -// sent on `doneChan` for assertion that all events have been sent, and enabling -// the test to assert all events have also been received. -func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, - offset uint64) { - var sentSum uint64 = 0 - for i := offset; i <= offset+uint64(999); i++ { - sentSum += i - evsw.FireEvent(event, i) - } - doneChan <- sentSum - close(doneChan) -} diff --git a/pubsub/example_test.go b/pubsub/example_test.go deleted file mode 100644 index 71f1b9cd5..000000000 --- a/pubsub/example_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package pubsub_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tmlibs/log" - "github.com/tendermint/tmlibs/pubsub" - "github.com/tendermint/tmlibs/pubsub/query" -) - -func TestExample(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}, 1) - err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]interface{}{"abci.account.name": "John"})) - require.NoError(t, err) - assertReceive(t, "Tombstone", ch) -} diff --git a/pubsub/pubsub.go b/pubsub/pubsub.go deleted file mode 100644 index 67f264ace..000000000 --- a/pubsub/pubsub.go +++ /dev/null @@ -1,342 +0,0 @@ -// Package pubsub implements a pub-sub model with a single publisher (Server) -// and multiple subscribers (clients). -// -// Though you can have multiple publishers by sharing a pointer to a server or -// by giving the same channel to each publisher and publishing messages from -// that channel (fan-in). -// -// Clients subscribe for messages, which could be of any type, using a query. -// When some message is published, we match it with all queries. If there is a -// match, this message will be pushed to all clients, subscribed to that query. -// See query subpackage for our implementation. -package pubsub - -import ( - "context" - "errors" - "sync" - - cmn "github.com/tendermint/tmlibs/common" -) - -type operation int - -const ( - sub operation = iota - pub - unsub - shutdown -) - -var ( - // ErrSubscriptionNotFound is returned when a client tries to unsubscribe - // from not existing subscription. - ErrSubscriptionNotFound = errors.New("subscription not found") - - // ErrAlreadySubscribed is returned when a client tries to subscribe twice or - // more using the same query. - ErrAlreadySubscribed = errors.New("already subscribed") -) - -// TagMap is used to associate tags to a message. -// They can be queried by subscribers to choose messages they will received. -type TagMap interface { - // Get returns the value for a key, or nil if no value is present. - // The ok result indicates whether value was found in the tags. - Get(key string) (value interface{}, ok bool) - // Len returns the number of tags. - Len() int -} - -type tagMap map[string]interface{} - -type cmd struct { - op operation - query Query - ch chan<- interface{} - clientID string - msg interface{} - tags TagMap -} - -// Query defines an interface for a query to be used for subscribing. -type Query interface { - Matches(tags TagMap) bool - String() string -} - -// Server allows clients to subscribe/unsubscribe for messages, publishing -// messages with or without tags, and manages internal state. -type Server struct { - cmn.BaseService - - cmds chan cmd - cmdsCap int - - mtx sync.RWMutex - subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query -} - -// Option sets a parameter for the server. -type Option func(*Server) - -// NewTagMap constructs a new immutable tag set from a map. -func NewTagMap(data map[string]interface{}) TagMap { - return tagMap(data) -} - -// Get returns the value for a key, or nil if no value is present. -// The ok result indicates whether value was found in the tags. -func (ts tagMap) Get(key string) (value interface{}, ok bool) { - value, ok = ts[key] - return -} - -// Len returns the number of tags. -func (ts tagMap) Len() int { - return len(ts) -} - -// NewServer returns a new server. See the commentary on the Option functions -// for a detailed description of how to configure buffering. If no options are -// provided, the resulting server's queue is unbuffered. -func NewServer(options ...Option) *Server { - s := &Server{ - subscriptions: make(map[string]map[string]Query), - } - s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) - - for _, option := range options { - option(s) - } - - // if BufferCapacity option was not set, the channel is unbuffered - s.cmds = make(chan cmd, s.cmdsCap) - - return s -} - -// BufferCapacity allows you to specify capacity for the internal server's -// queue. Since the server, given Y subscribers, could only process X messages, -// this option could be used to survive spikes (e.g. high amount of -// transactions during peak hours). -func BufferCapacity(cap int) Option { - return func(s *Server) { - if cap > 0 { - s.cmdsCap = cap - } - } -} - -// BufferCapacity returns capacity of the internal server's queue. -func (s *Server) BufferCapacity() int { - return s.cmdsCap -} - -// Subscribe creates a subscription for the given client. It accepts a channel -// on which messages matching the given query can be received. An error will be -// returned to the caller if the context is canceled or if subscription already -// exist for pair clientID and query. -func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error { - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - _, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if ok { - return ErrAlreadySubscribed - } - - select { - case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: - s.mtx.Lock() - if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]Query) - } - s.subscriptions[clientID][query.String()] = query - s.mtx.Unlock() - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled or if subscription does -// not exist. -func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { - var origQuery Query - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - origQuery, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound - } - - // original query is used here because we're using pointers as map keys - select { - case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}: - s.mtx.Lock() - delete(clientSubscriptions, query.String()) - s.mtx.Unlock() - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled or if subscription does not exist. -func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { - s.mtx.RLock() - _, ok := s.subscriptions[clientID] - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound - } - - select { - case s.cmds <- cmd{op: unsub, clientID: clientID}: - s.mtx.Lock() - delete(s.subscriptions, clientID) - s.mtx.Unlock() - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// Publish publishes the given message. An error will be returned to the caller -// if the context is canceled. -func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]interface{}))) -} - -// PublishWithTags publishes the given message with the set of tags. The set is -// matched with clients queries. If there is a match, the message is sent to -// the client. -func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error { - select { - case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// OnStop implements Service.OnStop by shutting down the server. -func (s *Server) OnStop() { - s.cmds <- cmd{op: shutdown} -} - -// NOTE: not goroutine safe -type state struct { - // query -> client -> ch - queries map[Query]map[string]chan<- interface{} - // client -> query -> struct{} - clients map[string]map[Query]struct{} -} - -// OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { - go s.loop(state{ - queries: make(map[Query]map[string]chan<- interface{}), - clients: make(map[string]map[Query]struct{}), - }) - return nil -} - -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { - return nil -} - -func (s *Server) loop(state state) { -loop: - for cmd := range s.cmds { - switch cmd.op { - case unsub: - if cmd.query != nil { - state.remove(cmd.clientID, cmd.query) - } else { - state.removeAll(cmd.clientID) - } - case shutdown: - for clientID := range state.clients { - state.removeAll(clientID) - } - break loop - case sub: - state.add(cmd.clientID, cmd.query, cmd.ch) - case pub: - state.send(cmd.msg, cmd.tags) - } - } -} - -func (state *state) add(clientID string, q Query, ch chan<- interface{}) { - // add query if needed - if _, ok := state.queries[q]; !ok { - state.queries[q] = make(map[string]chan<- interface{}) - } - - // create subscription - state.queries[q][clientID] = ch - - // add client if needed - if _, ok := state.clients[clientID]; !ok { - state.clients[clientID] = make(map[Query]struct{}) - } - state.clients[clientID][q] = struct{}{} -} - -func (state *state) remove(clientID string, q Query) { - clientToChannelMap, ok := state.queries[q] - if !ok { - return - } - - ch, ok := clientToChannelMap[clientID] - if ok { - close(ch) - - delete(state.clients[clientID], q) - - // if it not subscribed to anything else, remove the client - if len(state.clients[clientID]) == 0 { - delete(state.clients, clientID) - } - - delete(state.queries[q], clientID) - } -} - -func (state *state) removeAll(clientID string) { - queryMap, ok := state.clients[clientID] - if !ok { - return - } - - for q := range queryMap { - ch := state.queries[q][clientID] - close(ch) - - delete(state.queries[q], clientID) - } - - delete(state.clients, clientID) -} - -func (state *state) send(msg interface{}, tags TagMap) { - for q, clientToChannelMap := range state.queries { - if q.Matches(tags) { - for _, ch := range clientToChannelMap { - ch <- msg - } - } - } -} diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go deleted file mode 100644 index f853d163b..000000000 --- a/pubsub/pubsub_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package pubsub_test - -import ( - "context" - "fmt" - "runtime/debug" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tmlibs/log" - "github.com/tendermint/tmlibs/pubsub" - "github.com/tendermint/tmlibs/pubsub/query" -) - -const ( - clientID = "test-client" -) - -func TestSubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, query.Empty{}, ch) - require.NoError(t, err) - err = s.Publish(ctx, "Ka-Zar") - require.NoError(t, err) - assertReceive(t, "Ka-Zar", ch) - - err = s.Publish(ctx, "Quicksilver") - require.NoError(t, err) - assertReceive(t, "Quicksilver", ch) -} - -func TestDifferentClients(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch1 := make(chan interface{}, 1) - err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"})) - require.NoError(t, err) - assertReceive(t, "Iceman", ch1) - - ch2 := make(chan interface{}, 1) - err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"})) - require.NoError(t, err) - assertReceive(t, "Ultimo", ch1) - assertReceive(t, "Ultimo", ch2) - - ch3 := make(chan interface{}, 1) - err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewRoundStep"})) - require.NoError(t, err) - assert.Zero(t, len(ch3)) -} - -func TestClientSubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - q := query.MustParse("tm.events.type='NewBlock'") - - ch1 := make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, q, ch1) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"})) - require.NoError(t, err) - assertReceive(t, "Goblin Queen", ch1) - - ch2 := make(chan interface{}, 1) - err = s.Subscribe(ctx, clientID, q, ch2) - require.Error(t, err) - - err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"})) - require.NoError(t, err) - assertReceive(t, "Spider-Man", ch1) -} - -func TestUnsubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}) - err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch) - require.NoError(t, err) - err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe") - - _, ok := <-ch - assert.False(t, ok) -} - -func TestResubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}) - err := s.Subscribe(ctx, clientID, query.Empty{}, ch) - require.NoError(t, err) - err = s.Unsubscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - ch = make(chan interface{}) - err = s.Subscribe(ctx, clientID, query.Empty{}, ch) - require.NoError(t, err) - - err = s.Publish(ctx, "Cable") - require.NoError(t, err) - assertReceive(t, "Cable", ch) -} - -func TestUnsubscribeAll(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1) - require.NoError(t, err) - err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2) - require.NoError(t, err) - - err = s.UnsubscribeAll(ctx, clientID) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll") - assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll") - - _, ok := <-ch1 - assert.False(t, ok) - _, ok = <-ch2 - assert.False(t, ok) -} - -func TestBufferCapacity(t *testing.T) { - s := pubsub.NewServer(pubsub.BufferCapacity(2)) - s.SetLogger(log.TestingLogger()) - - assert.Equal(t, 2, s.BufferCapacity()) - - ctx := context.Background() - err := s.Publish(ctx, "Nighthawk") - require.NoError(t, err) - err = s.Publish(ctx, "Sage") - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) - defer cancel() - err = s.Publish(ctx, "Ironclad") - if assert.Error(t, err) { - assert.Equal(t, context.DeadlineExceeded, err) - } -} - -func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } -func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } -func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } - -func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } -func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } -func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } - -func benchmarkNClients(n int, b *testing.B) { - s := pubsub.NewServer() - s.Start() - defer s.Stop() - - ctx := context.Background() - for i := 0; i < n; i++ { - ch := make(chan interface{}) - go func() { - for range ch { - } - }() - s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch) - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i})) - } -} - -func benchmarkNClientsOneQuery(n int, b *testing.B) { - s := pubsub.NewServer() - s.Start() - defer s.Stop() - - ctx := context.Background() - q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") - for i := 0; i < n; i++ { - ch := make(chan interface{}) - go func() { - for range ch { - } - }() - s.Subscribe(ctx, clientID, q, ch) - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1})) - } -} - -/////////////////////////////////////////////////////////////////////////////// -/// HELPERS -/////////////////////////////////////////////////////////////////////////////// - -func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) { - select { - case actual := <-ch: - if actual != nil { - assert.Equal(t, expected, actual, msgAndArgs...) - } - case <-time.After(1 * time.Second): - t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected) - debug.PrintStack() - } -} diff --git a/pubsub/query/Makefile b/pubsub/query/Makefile deleted file mode 100644 index ca3ff5b56..000000000 --- a/pubsub/query/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -gen_query_parser: - @go get github.com/pointlander/peg - peg -inline -switch query.peg - -fuzzy_test: - @go get github.com/dvyukov/go-fuzz/go-fuzz - @go get github.com/dvyukov/go-fuzz/go-fuzz-build - go-fuzz-build github.com/tendermint/tmlibs/pubsub/query/fuzz_test - go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output - -.PHONY: gen_query_parser fuzzy_test diff --git a/pubsub/query/empty.go b/pubsub/query/empty.go deleted file mode 100644 index cefdace4a..000000000 --- a/pubsub/query/empty.go +++ /dev/null @@ -1,16 +0,0 @@ -package query - -import "github.com/tendermint/tmlibs/pubsub" - -// Empty query matches any set of tags. -type Empty struct { -} - -// Matches always returns true. -func (Empty) Matches(tags pubsub.TagMap) bool { - return true -} - -func (Empty) String() string { - return "empty" -} diff --git a/pubsub/query/empty_test.go b/pubsub/query/empty_test.go deleted file mode 100644 index b5e8a3001..000000000 --- a/pubsub/query/empty_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/tmlibs/pubsub" - "github.com/tendermint/tmlibs/pubsub/query" -) - -func TestEmptyQueryMatchesAnything(t *testing.T) { - q := query.Empty{} - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{}))) - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Asher": "Roth"}))) - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66}))) - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66, "Billy": "Blue"}))) -} diff --git a/pubsub/query/fuzz_test/main.go b/pubsub/query/fuzz_test/main.go deleted file mode 100644 index 3b0ef1473..000000000 --- a/pubsub/query/fuzz_test/main.go +++ /dev/null @@ -1,30 +0,0 @@ -package fuzz_test - -import ( - "fmt" - - "github.com/tendermint/tmlibs/pubsub/query" -) - -func Fuzz(data []byte) int { - sdata := string(data) - q0, err := query.New(sdata) - if err != nil { - return 0 - } - - sdata1 := q0.String() - q1, err := query.New(sdata1) - if err != nil { - panic(err) - } - - sdata2 := q1.String() - if sdata1 != sdata2 { - fmt.Printf("q0: %q\n", sdata1) - fmt.Printf("q1: %q\n", sdata2) - panic("query changed") - } - - return 1 -} diff --git a/pubsub/query/parser_test.go b/pubsub/query/parser_test.go deleted file mode 100644 index e31079b43..000000000 --- a/pubsub/query/parser_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/tmlibs/pubsub/query" -) - -// TODO: fuzzy testing? -func TestParser(t *testing.T) { - cases := []struct { - query string - valid bool - }{ - {"tm.events.type='NewBlock'", true}, - {"tm.events.type = 'NewBlock'", true}, - {"tm.events.name = ''", true}, - {"tm.events.type='TIME'", true}, - {"tm.events.type='DATE'", true}, - {"tm.events.type='='", true}, - {"tm.events.type='TIME", false}, - {"tm.events.type=TIME'", false}, - {"tm.events.type==", false}, - {"tm.events.type=NewBlock", false}, - {">==", false}, - {"tm.events.type 'NewBlock' =", false}, - {"tm.events.type>'NewBlock'", false}, - {"", false}, - {"=", false}, - {"='NewBlock'", false}, - {"tm.events.type=", false}, - - {"tm.events.typeNewBlock", false}, - {"tm.events.type'NewBlock'", false}, - {"'NewBlock'", false}, - {"NewBlock", false}, - {"", false}, - - {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, - {"tm.events.type='NewBlock' AND", false}, - {"tm.events.type='NewBlock' AN", false}, - {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, - {"AND tm.events.type='NewBlock' ", false}, - - {"abci.account.name CONTAINS 'Igor'", true}, - - {"tx.date > DATE 2013-05-03", true}, - {"tx.date < DATE 2013-05-03", true}, - {"tx.date <= DATE 2013-05-03", true}, - {"tx.date >= DATE 2013-05-03", true}, - {"tx.date >= DAT 2013-05-03", false}, - {"tx.date <= DATE2013-05-03", false}, - {"tx.date <= DATE -05-03", false}, - {"tx.date >= DATE 20130503", false}, - {"tx.date >= DATE 2013+01-03", false}, - // incorrect year, month, day - {"tx.date >= DATE 0013-01-03", false}, - {"tx.date >= DATE 2013-31-03", false}, - {"tx.date >= DATE 2013-01-83", false}, - - {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, - {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, - {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, - {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, - {"tx.date >= TIME2013-05-03T14:45:00Z", false}, - {"tx.date = IME 2013-05-03T14:45:00Z", false}, - {"tx.date = TIME 2013-05-:45:00Z", false}, - {"tx.date >= TIME 2013-05-03T14:45:00", false}, - {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, - {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, - - {"account.balance=100", true}, - {"account.balance >= 200", true}, - {"account.balance >= -300", false}, - {"account.balance >>= 400", false}, - {"account.balance=33.22.1", false}, - - {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, - {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, - } - - for _, c := range cases { - _, err := query.New(c.query) - if c.valid { - assert.NoErrorf(t, err, "Query was '%s'", c.query) - } else { - assert.Errorf(t, err, "Query was '%s'", c.query) - } - } -} diff --git a/pubsub/query/query.go b/pubsub/query/query.go deleted file mode 100644 index 84c3aa180..000000000 --- a/pubsub/query/query.go +++ /dev/null @@ -1,345 +0,0 @@ -// Package query provides a parser for a custom query format: -// -// abci.invoice.number=22 AND abci.invoice.owner=Ivan -// -// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. -// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics -// -// It has a support for numbers (integer and floating point), dates and times. -package query - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "github.com/tendermint/tmlibs/pubsub" -) - -// Query holds the query string and the query parser. -type Query struct { - str string - parser *QueryParser -} - -// Condition represents a single condition within a query and consists of tag -// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7"). -type Condition struct { - Tag string - Op Operator - Operand interface{} -} - -// New parses the given string and returns a query or error if the string is -// invalid. -func New(s string) (*Query, error) { - p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)} - p.Init() - if err := p.Parse(); err != nil { - return nil, err - } - return &Query{str: s, parser: p}, nil -} - -// MustParse turns the given string into a query or panics; for tests or others -// cases where you know the string is valid. -func MustParse(s string) *Query { - q, err := New(s) - if err != nil { - panic(fmt.Sprintf("failed to parse %s: %v", s, err)) - } - return q -} - -// String returns the original string. -func (q *Query) String() string { - return q.str -} - -// Operator is an operator that defines some kind of relation between tag and -// operand (equality, etc.). -type Operator uint8 - -const ( - // "<=" - OpLessEqual Operator = iota - // ">=" - OpGreaterEqual - // "<" - OpLess - // ">" - OpGreater - // "=" - OpEqual - // "CONTAINS"; used to check if a string contains a certain sub string. - OpContains -) - -// Conditions returns a list of conditions. -func (q *Query) Conditions() []Condition { - conditions := make([]Condition, 0) - - buffer, begin, end := q.parser.Buffer, 0, 0 - - var tag string - var op Operator - - // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") - for _, token := range q.parser.Tokens() { - switch token.pegRule { - - case rulePegText: - begin, end = int(token.begin), int(token.end) - case ruletag: - tag = buffer[begin:end] - case rulele: - op = OpLessEqual - case rulege: - op = OpGreaterEqual - case rulel: - op = OpLess - case ruleg: - op = OpGreater - case ruleequal: - op = OpEqual - case rulecontains: - op = OpContains - case rulevalue: - // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") - valueWithoutSingleQuotes := buffer[begin+1 : end-1] - conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes}) - case rulenumber: - number := buffer[begin:end] - if strings.Contains(number, ".") { // if it looks like a floating-point number - value, err := strconv.ParseFloat(number, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) - } - conditions = append(conditions, Condition{tag, op, value}) - } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) - } - conditions = append(conditions, Condition{tag, op, value}) - } - case ruletime: - value, err := time.Parse(time.RFC3339, buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - conditions = append(conditions, Condition{tag, op, value}) - case ruledate: - value, err := time.Parse("2006-01-02", buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - conditions = append(conditions, Condition{tag, op, value}) - } - } - - return conditions -} - -// Matches returns true if the query matches the given set of tags, false otherwise. -// -// For example, query "name=John" matches tags = {"name": "John"}. More -// examples could be found in parser_test.go and query_test.go. -func (q *Query) Matches(tags pubsub.TagMap) bool { - if tags.Len() == 0 { - return false - } - - buffer, begin, end := q.parser.Buffer, 0, 0 - - var tag string - var op Operator - - // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") - for _, token := range q.parser.Tokens() { - switch token.pegRule { - - case rulePegText: - begin, end = int(token.begin), int(token.end) - case ruletag: - tag = buffer[begin:end] - case rulele: - op = OpLessEqual - case rulege: - op = OpGreaterEqual - case rulel: - op = OpLess - case ruleg: - op = OpGreater - case ruleequal: - op = OpEqual - case rulecontains: - op = OpContains - case rulevalue: - // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") - valueWithoutSingleQuotes := buffer[begin+1 : end-1] - - // see if the triplet (tag, operator, operand) matches any tag - // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } - if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) { - return false - } - case rulenumber: - number := buffer[begin:end] - if strings.Contains(number, ".") { // if it looks like a floating-point number - value, err := strconv.ParseFloat(number, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - } - case ruletime: - value, err := time.Parse(time.RFC3339, buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - case ruledate: - value, err := time.Parse("2006-01-02", buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - } - } - - return true -} - -// match returns true if the given triplet (tag, operator, operand) matches any tag. -// -// First, it looks up the tag in tags and if it finds one, tries to compare the -// value from it to the operand using the operator. -// -// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } -func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool { - // look up the tag from the query in tags - value, ok := tags.Get(tag) - if !ok { - return false - } - switch operand.Kind() { - case reflect.Struct: // time - operandAsTime := operand.Interface().(time.Time) - v, ok := value.(time.Time) - if !ok { // if value from tags is not time.Time - return false - } - switch op { - case OpLessEqual: - return v.Before(operandAsTime) || v.Equal(operandAsTime) - case OpGreaterEqual: - return v.Equal(operandAsTime) || v.After(operandAsTime) - case OpLess: - return v.Before(operandAsTime) - case OpGreater: - return v.After(operandAsTime) - case OpEqual: - return v.Equal(operandAsTime) - } - case reflect.Float64: - operandFloat64 := operand.Interface().(float64) - var v float64 - // try our best to convert value from tags to float64 - switch vt := value.(type) { - case float64: - v = vt - case float32: - v = float64(vt) - case int: - v = float64(vt) - case int8: - v = float64(vt) - case int16: - v = float64(vt) - case int32: - v = float64(vt) - case int64: - v = float64(vt) - default: // fail for all other types - panic(fmt.Sprintf("Incomparable types: %T (%v) vs float64 (%v)", value, value, operandFloat64)) - } - switch op { - case OpLessEqual: - return v <= operandFloat64 - case OpGreaterEqual: - return v >= operandFloat64 - case OpLess: - return v < operandFloat64 - case OpGreater: - return v > operandFloat64 - case OpEqual: - return v == operandFloat64 - } - case reflect.Int64: - operandInt := operand.Interface().(int64) - var v int64 - // try our best to convert value from tags to int64 - switch vt := value.(type) { - case int64: - v = vt - case int8: - v = int64(vt) - case int16: - v = int64(vt) - case int32: - v = int64(vt) - case int: - v = int64(vt) - case float64: - v = int64(vt) - case float32: - v = int64(vt) - default: // fail for all other types - panic(fmt.Sprintf("Incomparable types: %T (%v) vs int64 (%v)", value, value, operandInt)) - } - switch op { - case OpLessEqual: - return v <= operandInt - case OpGreaterEqual: - return v >= operandInt - case OpLess: - return v < operandInt - case OpGreater: - return v > operandInt - case OpEqual: - return v == operandInt - } - case reflect.String: - v, ok := value.(string) - if !ok { // if value from tags is not string - return false - } - switch op { - case OpEqual: - return v == operand.String() - case OpContains: - return strings.Contains(v, operand.String()) - } - default: - panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind())) - } - - return false -} diff --git a/pubsub/query/query.peg b/pubsub/query/query.peg deleted file mode 100644 index 739892e4f..000000000 --- a/pubsub/query/query.peg +++ /dev/null @@ -1,33 +0,0 @@ -package query - -type QueryParser Peg { -} - -e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !. - -condition <- tag ' '* (le ' '* (number / time / date) - / ge ' '* (number / time / date) - / l ' '* (number / time / date) - / g ' '* (number / time / date) - / equal ' '* (number / time / date / value) - / contains ' '* value - ) - -tag <- < (![ \t\n\r\\()"'=><] .)+ > -value <- < '\'' (!["'] .)* '\''> -number <- < ('0' - / [1-9] digit* ('.' digit*)?) > -digit <- [0-9] -time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') > -date <- "DATE " < year '-' month '-' day > -year <- ('1' / '2') digit digit digit -month <- ('0' / '1') digit -day <- ('0' / '1' / '2' / '3') digit -and <- "AND" - -equal <- "=" -contains <- "CONTAINS" -le <- "<=" -ge <- ">=" -l <- "<" -g <- ">" diff --git a/pubsub/query/query.peg.go b/pubsub/query/query.peg.go deleted file mode 100644 index c86e4a47f..000000000 --- a/pubsub/query/query.peg.go +++ /dev/null @@ -1,1553 +0,0 @@ -// nolint -package query - -import ( - "fmt" - "math" - "sort" - "strconv" -) - -const endSymbol rune = 1114112 - -/* The rule types inferred from the grammar are below. */ -type pegRule uint8 - -const ( - ruleUnknown pegRule = iota - rulee - rulecondition - ruletag - rulevalue - rulenumber - ruledigit - ruletime - ruledate - ruleyear - rulemonth - ruleday - ruleand - ruleequal - rulecontains - rulele - rulege - rulel - ruleg - rulePegText -) - -var rul3s = [...]string{ - "Unknown", - "e", - "condition", - "tag", - "value", - "number", - "digit", - "time", - "date", - "year", - "month", - "day", - "and", - "equal", - "contains", - "le", - "ge", - "l", - "g", - "PegText", -} - -type token32 struct { - pegRule - begin, end uint32 -} - -func (t *token32) String() string { - return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end) -} - -type node32 struct { - token32 - up, next *node32 -} - -func (node *node32) print(pretty bool, buffer string) { - var print func(node *node32, depth int) - print = func(node *node32, depth int) { - for node != nil { - for c := 0; c < depth; c++ { - fmt.Printf(" ") - } - rule := rul3s[node.pegRule] - quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end]))) - if !pretty { - fmt.Printf("%v %v\n", rule, quote) - } else { - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote) - } - if node.up != nil { - print(node.up, depth+1) - } - node = node.next - } - } - print(node, 0) -} - -func (node *node32) Print(buffer string) { - node.print(false, buffer) -} - -func (node *node32) PrettyPrint(buffer string) { - node.print(true, buffer) -} - -type tokens32 struct { - tree []token32 -} - -func (t *tokens32) Trim(length uint32) { - t.tree = t.tree[:length] -} - -func (t *tokens32) Print() { - for _, token := range t.tree { - fmt.Println(token.String()) - } -} - -func (t *tokens32) AST() *node32 { - type element struct { - node *node32 - down *element - } - tokens := t.Tokens() - var stack *element - for _, token := range tokens { - if token.begin == token.end { - continue - } - node := &node32{token32: token} - for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { - stack.node.next = node.up - node.up = stack.node - stack = stack.down - } - stack = &element{node: node, down: stack} - } - if stack != nil { - return stack.node - } - return nil -} - -func (t *tokens32) PrintSyntaxTree(buffer string) { - t.AST().Print(buffer) -} - -func (t *tokens32) PrettyPrintSyntaxTree(buffer string) { - t.AST().PrettyPrint(buffer) -} - -func (t *tokens32) Add(rule pegRule, begin, end, index uint32) { - if tree := t.tree; int(index) >= len(tree) { - expanded := make([]token32, 2*len(tree)) - copy(expanded, tree) - t.tree = expanded - } - t.tree[index] = token32{ - pegRule: rule, - begin: begin, - end: end, - } -} - -func (t *tokens32) Tokens() []token32 { - return t.tree -} - -type QueryParser struct { - Buffer string - buffer []rune - rules [20]func() bool - parse func(rule ...int) error - reset func() - Pretty bool - tokens32 -} - -func (p *QueryParser) Parse(rule ...int) error { - return p.parse(rule...) -} - -func (p *QueryParser) Reset() { - p.reset() -} - -type textPosition struct { - line, symbol int -} - -type textPositionMap map[int]textPosition - -func translatePositions(buffer []rune, positions []int) textPositionMap { - length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 - sort.Ints(positions) - -search: - for i, c := range buffer { - if c == '\n' { - line, symbol = line+1, 0 - } else { - symbol++ - } - if i == positions[j] { - translations[positions[j]] = textPosition{line, symbol} - for j++; j < length; j++ { - if i != positions[j] { - continue search - } - } - break search - } - } - - return translations -} - -type parseError struct { - p *QueryParser - max token32 -} - -func (e *parseError) Error() string { - tokens, error := []token32{e.max}, "\n" - positions, p := make([]int, 2*len(tokens)), 0 - for _, token := range tokens { - positions[p], p = int(token.begin), p+1 - positions[p], p = int(token.end), p+1 - } - translations := translatePositions(e.p.buffer, positions) - format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" - if e.p.Pretty { - format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" - } - for _, token := range tokens { - begin, end := int(token.begin), int(token.end) - error += fmt.Sprintf(format, - rul3s[token.pegRule], - translations[begin].line, translations[begin].symbol, - translations[end].line, translations[end].symbol, - strconv.Quote(string(e.p.buffer[begin:end]))) - } - - return error -} - -func (p *QueryParser) PrintSyntaxTree() { - if p.Pretty { - p.tokens32.PrettyPrintSyntaxTree(p.Buffer) - } else { - p.tokens32.PrintSyntaxTree(p.Buffer) - } -} - -func (p *QueryParser) Init() { - var ( - max token32 - position, tokenIndex uint32 - buffer []rune - ) - p.reset = func() { - max = token32{} - position, tokenIndex = 0, 0 - - p.buffer = []rune(p.Buffer) - if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { - p.buffer = append(p.buffer, endSymbol) - } - buffer = p.buffer - } - p.reset() - - _rules := p.rules - tree := tokens32{tree: make([]token32, math.MaxInt16)} - p.parse = func(rule ...int) error { - r := 1 - if len(rule) > 0 { - r = rule[0] - } - matches := p.rules[r]() - p.tokens32 = tree - if matches { - p.Trim(tokenIndex) - return nil - } - return &parseError{p, max} - } - - add := func(rule pegRule, begin uint32) { - tree.Add(rule, begin, position, tokenIndex) - tokenIndex++ - if begin != position && position > max.end { - max = token32{rule, begin, position} - } - } - - matchDot := func() bool { - if buffer[position] != endSymbol { - position++ - return true - } - return false - } - - /*matchChar := func(c byte) bool { - if buffer[position] == c { - position++ - return true - } - return false - }*/ - - /*matchRange := func(lower byte, upper byte) bool { - if c := buffer[position]; c >= lower && c <= upper { - position++ - return true - } - return false - }*/ - - _rules = [...]func() bool{ - nil, - /* 0 e <- <('"' condition (' '+ and ' '+ condition)* '"' !.)> */ - func() bool { - position0, tokenIndex0 := position, tokenIndex - { - position1 := position - if buffer[position] != rune('"') { - goto l0 - } - position++ - if !_rules[rulecondition]() { - goto l0 - } - l2: - { - position3, tokenIndex3 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l3 - } - position++ - l4: - { - position5, tokenIndex5 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l5 - } - position++ - goto l4 - l5: - position, tokenIndex = position5, tokenIndex5 - } - { - position6 := position - { - position7, tokenIndex7 := position, tokenIndex - if buffer[position] != rune('a') { - goto l8 - } - position++ - goto l7 - l8: - position, tokenIndex = position7, tokenIndex7 - if buffer[position] != rune('A') { - goto l3 - } - position++ - } - l7: - { - position9, tokenIndex9 := position, tokenIndex - if buffer[position] != rune('n') { - goto l10 - } - position++ - goto l9 - l10: - position, tokenIndex = position9, tokenIndex9 - if buffer[position] != rune('N') { - goto l3 - } - position++ - } - l9: - { - position11, tokenIndex11 := position, tokenIndex - if buffer[position] != rune('d') { - goto l12 - } - position++ - goto l11 - l12: - position, tokenIndex = position11, tokenIndex11 - if buffer[position] != rune('D') { - goto l3 - } - position++ - } - l11: - add(ruleand, position6) - } - if buffer[position] != rune(' ') { - goto l3 - } - position++ - l13: - { - position14, tokenIndex14 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l14 - } - position++ - goto l13 - l14: - position, tokenIndex = position14, tokenIndex14 - } - if !_rules[rulecondition]() { - goto l3 - } - goto l2 - l3: - position, tokenIndex = position3, tokenIndex3 - } - if buffer[position] != rune('"') { - goto l0 - } - position++ - { - position15, tokenIndex15 := position, tokenIndex - if !matchDot() { - goto l15 - } - goto l0 - l15: - position, tokenIndex = position15, tokenIndex15 - } - add(rulee, position1) - } - return true - l0: - position, tokenIndex = position0, tokenIndex0 - return false - }, - /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('=') (equal ' '* ((&('\'') value) | (&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ - func() bool { - position16, tokenIndex16 := position, tokenIndex - { - position17 := position - { - position18 := position - { - position19 := position - { - position22, tokenIndex22 := position, tokenIndex - { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l22 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l22 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l22 - } - position++ - break - case '\'': - if buffer[position] != rune('\'') { - goto l22 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l22 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l22 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l22 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l22 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l22 - } - position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l22 - } - position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l22 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l22 - } - position++ - break - } - } - - goto l16 - l22: - position, tokenIndex = position22, tokenIndex22 - } - if !matchDot() { - goto l16 - } - l20: - { - position21, tokenIndex21 := position, tokenIndex - { - position24, tokenIndex24 := position, tokenIndex - { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l24 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l24 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l24 - } - position++ - break - case '\'': - if buffer[position] != rune('\'') { - goto l24 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l24 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l24 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l24 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l24 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l24 - } - position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l24 - } - position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l24 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l24 - } - position++ - break - } - } - - goto l21 - l24: - position, tokenIndex = position24, tokenIndex24 - } - if !matchDot() { - goto l21 - } - goto l20 - l21: - position, tokenIndex = position21, tokenIndex21 - } - add(rulePegText, position19) - } - add(ruletag, position18) - } - l26: - { - position27, tokenIndex27 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l27 - } - position++ - goto l26 - l27: - position, tokenIndex = position27, tokenIndex27 - } - { - position28, tokenIndex28 := position, tokenIndex - { - position30 := position - if buffer[position] != rune('<') { - goto l29 - } - position++ - if buffer[position] != rune('=') { - goto l29 - } - position++ - add(rulele, position30) - } - l31: - { - position32, tokenIndex32 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l32 - } - position++ - goto l31 - l32: - position, tokenIndex = position32, tokenIndex32 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l29 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l29 - } - break - default: - if !_rules[rulenumber]() { - goto l29 - } - break - } - } - - goto l28 - l29: - position, tokenIndex = position28, tokenIndex28 - { - position35 := position - if buffer[position] != rune('>') { - goto l34 - } - position++ - if buffer[position] != rune('=') { - goto l34 - } - position++ - add(rulege, position35) - } - l36: - { - position37, tokenIndex37 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l37 - } - position++ - goto l36 - l37: - position, tokenIndex = position37, tokenIndex37 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l34 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l34 - } - break - default: - if !_rules[rulenumber]() { - goto l34 - } - break - } - } - - goto l28 - l34: - position, tokenIndex = position28, tokenIndex28 - { - switch buffer[position] { - case '=': - { - position40 := position - if buffer[position] != rune('=') { - goto l16 - } - position++ - add(ruleequal, position40) - } - l41: - { - position42, tokenIndex42 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l42 - } - position++ - goto l41 - l42: - position, tokenIndex = position42, tokenIndex42 - } - { - switch buffer[position] { - case '\'': - if !_rules[rulevalue]() { - goto l16 - } - break - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - case '>': - { - position44 := position - if buffer[position] != rune('>') { - goto l16 - } - position++ - add(ruleg, position44) - } - l45: - { - position46, tokenIndex46 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l46 - } - position++ - goto l45 - l46: - position, tokenIndex = position46, tokenIndex46 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - case '<': - { - position48 := position - if buffer[position] != rune('<') { - goto l16 - } - position++ - add(rulel, position48) - } - l49: - { - position50, tokenIndex50 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l50 - } - position++ - goto l49 - l50: - position, tokenIndex = position50, tokenIndex50 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - default: - { - position52 := position - { - position53, tokenIndex53 := position, tokenIndex - if buffer[position] != rune('c') { - goto l54 - } - position++ - goto l53 - l54: - position, tokenIndex = position53, tokenIndex53 - if buffer[position] != rune('C') { - goto l16 - } - position++ - } - l53: - { - position55, tokenIndex55 := position, tokenIndex - if buffer[position] != rune('o') { - goto l56 - } - position++ - goto l55 - l56: - position, tokenIndex = position55, tokenIndex55 - if buffer[position] != rune('O') { - goto l16 - } - position++ - } - l55: - { - position57, tokenIndex57 := position, tokenIndex - if buffer[position] != rune('n') { - goto l58 - } - position++ - goto l57 - l58: - position, tokenIndex = position57, tokenIndex57 - if buffer[position] != rune('N') { - goto l16 - } - position++ - } - l57: - { - position59, tokenIndex59 := position, tokenIndex - if buffer[position] != rune('t') { - goto l60 - } - position++ - goto l59 - l60: - position, tokenIndex = position59, tokenIndex59 - if buffer[position] != rune('T') { - goto l16 - } - position++ - } - l59: - { - position61, tokenIndex61 := position, tokenIndex - if buffer[position] != rune('a') { - goto l62 - } - position++ - goto l61 - l62: - position, tokenIndex = position61, tokenIndex61 - if buffer[position] != rune('A') { - goto l16 - } - position++ - } - l61: - { - position63, tokenIndex63 := position, tokenIndex - if buffer[position] != rune('i') { - goto l64 - } - position++ - goto l63 - l64: - position, tokenIndex = position63, tokenIndex63 - if buffer[position] != rune('I') { - goto l16 - } - position++ - } - l63: - { - position65, tokenIndex65 := position, tokenIndex - if buffer[position] != rune('n') { - goto l66 - } - position++ - goto l65 - l66: - position, tokenIndex = position65, tokenIndex65 - if buffer[position] != rune('N') { - goto l16 - } - position++ - } - l65: - { - position67, tokenIndex67 := position, tokenIndex - if buffer[position] != rune('s') { - goto l68 - } - position++ - goto l67 - l68: - position, tokenIndex = position67, tokenIndex67 - if buffer[position] != rune('S') { - goto l16 - } - position++ - } - l67: - add(rulecontains, position52) - } - l69: - { - position70, tokenIndex70 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l70 - } - position++ - goto l69 - l70: - position, tokenIndex = position70, tokenIndex70 - } - if !_rules[rulevalue]() { - goto l16 - } - break - } - } - - } - l28: - add(rulecondition, position17) - } - return true - l16: - position, tokenIndex = position16, tokenIndex16 - return false - }, - /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('\'') '\'') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ - nil, - /* 3 value <- <<('\'' (!('"' / '\'') .)* '\'')>> */ - func() bool { - position72, tokenIndex72 := position, tokenIndex - { - position73 := position - { - position74 := position - if buffer[position] != rune('\'') { - goto l72 - } - position++ - l75: - { - position76, tokenIndex76 := position, tokenIndex - { - position77, tokenIndex77 := position, tokenIndex - { - position78, tokenIndex78 := position, tokenIndex - if buffer[position] != rune('"') { - goto l79 - } - position++ - goto l78 - l79: - position, tokenIndex = position78, tokenIndex78 - if buffer[position] != rune('\'') { - goto l77 - } - position++ - } - l78: - goto l76 - l77: - position, tokenIndex = position77, tokenIndex77 - } - if !matchDot() { - goto l76 - } - goto l75 - l76: - position, tokenIndex = position76, tokenIndex76 - } - if buffer[position] != rune('\'') { - goto l72 - } - position++ - add(rulePegText, position74) - } - add(rulevalue, position73) - } - return true - l72: - position, tokenIndex = position72, tokenIndex72 - return false - }, - /* 4 number <- <<('0' / ([1-9] digit* ('.' digit*)?))>> */ - func() bool { - position80, tokenIndex80 := position, tokenIndex - { - position81 := position - { - position82 := position - { - position83, tokenIndex83 := position, tokenIndex - if buffer[position] != rune('0') { - goto l84 - } - position++ - goto l83 - l84: - position, tokenIndex = position83, tokenIndex83 - if c := buffer[position]; c < rune('1') || c > rune('9') { - goto l80 - } - position++ - l85: - { - position86, tokenIndex86 := position, tokenIndex - if !_rules[ruledigit]() { - goto l86 - } - goto l85 - l86: - position, tokenIndex = position86, tokenIndex86 - } - { - position87, tokenIndex87 := position, tokenIndex - if buffer[position] != rune('.') { - goto l87 - } - position++ - l89: - { - position90, tokenIndex90 := position, tokenIndex - if !_rules[ruledigit]() { - goto l90 - } - goto l89 - l90: - position, tokenIndex = position90, tokenIndex90 - } - goto l88 - l87: - position, tokenIndex = position87, tokenIndex87 - } - l88: - } - l83: - add(rulePegText, position82) - } - add(rulenumber, position81) - } - return true - l80: - position, tokenIndex = position80, tokenIndex80 - return false - }, - /* 5 digit <- <[0-9]> */ - func() bool { - position91, tokenIndex91 := position, tokenIndex - { - position92 := position - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l91 - } - position++ - add(ruledigit, position92) - } - return true - l91: - position, tokenIndex = position91, tokenIndex91 - return false - }, - /* 6 time <- <(('t' / 'T') ('i' / 'I') ('m' / 'M') ('e' / 'E') ' ' <(year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit ((('-' / '+') digit digit ':' digit digit) / 'Z'))>)> */ - func() bool { - position93, tokenIndex93 := position, tokenIndex - { - position94 := position - { - position95, tokenIndex95 := position, tokenIndex - if buffer[position] != rune('t') { - goto l96 - } - position++ - goto l95 - l96: - position, tokenIndex = position95, tokenIndex95 - if buffer[position] != rune('T') { - goto l93 - } - position++ - } - l95: - { - position97, tokenIndex97 := position, tokenIndex - if buffer[position] != rune('i') { - goto l98 - } - position++ - goto l97 - l98: - position, tokenIndex = position97, tokenIndex97 - if buffer[position] != rune('I') { - goto l93 - } - position++ - } - l97: - { - position99, tokenIndex99 := position, tokenIndex - if buffer[position] != rune('m') { - goto l100 - } - position++ - goto l99 - l100: - position, tokenIndex = position99, tokenIndex99 - if buffer[position] != rune('M') { - goto l93 - } - position++ - } - l99: - { - position101, tokenIndex101 := position, tokenIndex - if buffer[position] != rune('e') { - goto l102 - } - position++ - goto l101 - l102: - position, tokenIndex = position101, tokenIndex101 - if buffer[position] != rune('E') { - goto l93 - } - position++ - } - l101: - if buffer[position] != rune(' ') { - goto l93 - } - position++ - { - position103 := position - if !_rules[ruleyear]() { - goto l93 - } - if buffer[position] != rune('-') { - goto l93 - } - position++ - if !_rules[rulemonth]() { - goto l93 - } - if buffer[position] != rune('-') { - goto l93 - } - position++ - if !_rules[ruleday]() { - goto l93 - } - if buffer[position] != rune('T') { - goto l93 - } - position++ - if !_rules[ruledigit]() { - goto l93 - } - if !_rules[ruledigit]() { - goto l93 - } - if buffer[position] != rune(':') { - goto l93 - } - position++ - if !_rules[ruledigit]() { - goto l93 - } - if !_rules[ruledigit]() { - goto l93 - } - if buffer[position] != rune(':') { - goto l93 - } - position++ - if !_rules[ruledigit]() { - goto l93 - } - if !_rules[ruledigit]() { - goto l93 - } - { - position104, tokenIndex104 := position, tokenIndex - { - position106, tokenIndex106 := position, tokenIndex - if buffer[position] != rune('-') { - goto l107 - } - position++ - goto l106 - l107: - position, tokenIndex = position106, tokenIndex106 - if buffer[position] != rune('+') { - goto l105 - } - position++ - } - l106: - if !_rules[ruledigit]() { - goto l105 - } - if !_rules[ruledigit]() { - goto l105 - } - if buffer[position] != rune(':') { - goto l105 - } - position++ - if !_rules[ruledigit]() { - goto l105 - } - if !_rules[ruledigit]() { - goto l105 - } - goto l104 - l105: - position, tokenIndex = position104, tokenIndex104 - if buffer[position] != rune('Z') { - goto l93 - } - position++ - } - l104: - add(rulePegText, position103) - } - add(ruletime, position94) - } - return true - l93: - position, tokenIndex = position93, tokenIndex93 - return false - }, - /* 7 date <- <(('d' / 'D') ('a' / 'A') ('t' / 'T') ('e' / 'E') ' ' <(year '-' month '-' day)>)> */ - func() bool { - position108, tokenIndex108 := position, tokenIndex - { - position109 := position - { - position110, tokenIndex110 := position, tokenIndex - if buffer[position] != rune('d') { - goto l111 - } - position++ - goto l110 - l111: - position, tokenIndex = position110, tokenIndex110 - if buffer[position] != rune('D') { - goto l108 - } - position++ - } - l110: - { - position112, tokenIndex112 := position, tokenIndex - if buffer[position] != rune('a') { - goto l113 - } - position++ - goto l112 - l113: - position, tokenIndex = position112, tokenIndex112 - if buffer[position] != rune('A') { - goto l108 - } - position++ - } - l112: - { - position114, tokenIndex114 := position, tokenIndex - if buffer[position] != rune('t') { - goto l115 - } - position++ - goto l114 - l115: - position, tokenIndex = position114, tokenIndex114 - if buffer[position] != rune('T') { - goto l108 - } - position++ - } - l114: - { - position116, tokenIndex116 := position, tokenIndex - if buffer[position] != rune('e') { - goto l117 - } - position++ - goto l116 - l117: - position, tokenIndex = position116, tokenIndex116 - if buffer[position] != rune('E') { - goto l108 - } - position++ - } - l116: - if buffer[position] != rune(' ') { - goto l108 - } - position++ - { - position118 := position - if !_rules[ruleyear]() { - goto l108 - } - if buffer[position] != rune('-') { - goto l108 - } - position++ - if !_rules[rulemonth]() { - goto l108 - } - if buffer[position] != rune('-') { - goto l108 - } - position++ - if !_rules[ruleday]() { - goto l108 - } - add(rulePegText, position118) - } - add(ruledate, position109) - } - return true - l108: - position, tokenIndex = position108, tokenIndex108 - return false - }, - /* 8 year <- <(('1' / '2') digit digit digit)> */ - func() bool { - position119, tokenIndex119 := position, tokenIndex - { - position120 := position - { - position121, tokenIndex121 := position, tokenIndex - if buffer[position] != rune('1') { - goto l122 - } - position++ - goto l121 - l122: - position, tokenIndex = position121, tokenIndex121 - if buffer[position] != rune('2') { - goto l119 - } - position++ - } - l121: - if !_rules[ruledigit]() { - goto l119 - } - if !_rules[ruledigit]() { - goto l119 - } - if !_rules[ruledigit]() { - goto l119 - } - add(ruleyear, position120) - } - return true - l119: - position, tokenIndex = position119, tokenIndex119 - return false - }, - /* 9 month <- <(('0' / '1') digit)> */ - func() bool { - position123, tokenIndex123 := position, tokenIndex - { - position124 := position - { - position125, tokenIndex125 := position, tokenIndex - if buffer[position] != rune('0') { - goto l126 - } - position++ - goto l125 - l126: - position, tokenIndex = position125, tokenIndex125 - if buffer[position] != rune('1') { - goto l123 - } - position++ - } - l125: - if !_rules[ruledigit]() { - goto l123 - } - add(rulemonth, position124) - } - return true - l123: - position, tokenIndex = position123, tokenIndex123 - return false - }, - /* 10 day <- <(((&('3') '3') | (&('2') '2') | (&('1') '1') | (&('0') '0')) digit)> */ - func() bool { - position127, tokenIndex127 := position, tokenIndex - { - position128 := position - { - switch buffer[position] { - case '3': - if buffer[position] != rune('3') { - goto l127 - } - position++ - break - case '2': - if buffer[position] != rune('2') { - goto l127 - } - position++ - break - case '1': - if buffer[position] != rune('1') { - goto l127 - } - position++ - break - default: - if buffer[position] != rune('0') { - goto l127 - } - position++ - break - } - } - - if !_rules[ruledigit]() { - goto l127 - } - add(ruleday, position128) - } - return true - l127: - position, tokenIndex = position127, tokenIndex127 - return false - }, - /* 11 and <- <(('a' / 'A') ('n' / 'N') ('d' / 'D'))> */ - nil, - /* 12 equal <- <'='> */ - nil, - /* 13 contains <- <(('c' / 'C') ('o' / 'O') ('n' / 'N') ('t' / 'T') ('a' / 'A') ('i' / 'I') ('n' / 'N') ('s' / 'S'))> */ - nil, - /* 14 le <- <('<' '=')> */ - nil, - /* 15 ge <- <('>' '=')> */ - nil, - /* 16 l <- <'<'> */ - nil, - /* 17 g <- <'>'> */ - nil, - nil, - } - p.rules = _rules -} diff --git a/pubsub/query/query_test.go b/pubsub/query/query_test.go deleted file mode 100644 index 7d3ac6ba4..000000000 --- a/pubsub/query/query_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package query_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/pubsub" - "github.com/tendermint/tmlibs/pubsub/query" -) - -func TestMatches(t *testing.T) { - const shortForm = "2006-Jan-02" - txDate, err := time.Parse(shortForm, "2017-Jan-01") - require.NoError(t, err) - txTime, err := time.Parse(time.RFC3339, "2018-05-03T14:45:00Z") - require.NoError(t, err) - - testCases := []struct { - s string - tags map[string]interface{} - err bool - matches bool - }{ - {"tm.events.type='NewBlock'", map[string]interface{}{"tm.events.type": "NewBlock"}, false, true}, - - {"tx.gas > 7", map[string]interface{}{"tx.gas": 8}, false, true}, - {"tx.gas > 7 AND tx.gas < 9", map[string]interface{}{"tx.gas": 8}, false, true}, - {"body.weight >= 3.5", map[string]interface{}{"body.weight": 3.5}, false, true}, - {"account.balance < 1000.0", map[string]interface{}{"account.balance": 900}, false, true}, - {"apples.kg <= 4", map[string]interface{}{"apples.kg": 4.0}, false, true}, - {"body.weight >= 4.5", map[string]interface{}{"body.weight": float32(4.5)}, false, true}, - {"oranges.kg < 4 AND watermellons.kg > 10", map[string]interface{}{"oranges.kg": 3, "watermellons.kg": 12}, false, true}, - {"peaches.kg < 4", map[string]interface{}{"peaches.kg": 5}, false, false}, - - {"tx.date > DATE 2017-01-01", map[string]interface{}{"tx.date": time.Now()}, false, true}, - {"tx.date = DATE 2017-01-01", map[string]interface{}{"tx.date": txDate}, false, true}, - {"tx.date = DATE 2018-01-01", map[string]interface{}{"tx.date": txDate}, false, false}, - - {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": time.Now()}, false, true}, - {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": txTime}, false, false}, - - {"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Igor,Ivan"}, false, true}, - {"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Pavel,Ivan"}, false, false}, - } - - for _, tc := range testCases { - q, err := query.New(tc.s) - if !tc.err { - require.Nil(t, err) - } - - if tc.matches { - assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags) - } else { - assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags) - } - } -} - -func TestMustParse(t *testing.T) { - assert.Panics(t, func() { query.MustParse("=") }) - assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") }) -} - -func TestConditions(t *testing.T) { - txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") - require.NoError(t, err) - - testCases := []struct { - s string - conditions []query.Condition - }{ - {s: "tm.events.type='NewBlock'", conditions: []query.Condition{query.Condition{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}}, - {s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{query.Condition{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, query.Condition{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}}, - {s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{query.Condition{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}}, - } - - for _, tc := range testCases { - q, err := query.New(tc.s) - require.Nil(t, err) - - assert.Equal(t, tc.conditions, q.Conditions()) - } -} From f27c358a8a7bb82c3c5e12012775dec3e762853b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 21 May 2018 17:37:53 -0400 Subject: [PATCH 446/515] revert protobuf recompile --- db/remotedb/proto/defs.pb.go | 340 +++++++++-------------------------- 1 file changed, 88 insertions(+), 252 deletions(-) diff --git a/db/remotedb/proto/defs.pb.go b/db/remotedb/proto/defs.pb.go index 86b8f9b8d..4d9f0b272 100644 --- a/db/remotedb/proto/defs.pb.go +++ b/db/remotedb/proto/defs.pb.go @@ -1,6 +1,22 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: defs.proto +/* +Package protodb is a generated protocol buffer package. + +It is generated from these files: + defs.proto + +It has these top-level messages: + Batch + Operation + Entity + Nothing + Domain + Iterator + Stats + Init +*/ package protodb import proto "github.com/golang/protobuf/proto" @@ -42,40 +58,16 @@ var Operation_Type_value = map[string]int32{ func (x Operation_Type) String() string { return proto.EnumName(Operation_Type_name, int32(x)) } -func (Operation_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{1, 0} -} +func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } type Batch struct { - Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Batch) Reset() { *m = Batch{} } -func (m *Batch) String() string { return proto.CompactTextString(m) } -func (*Batch) ProtoMessage() {} -func (*Batch) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{0} -} -func (m *Batch) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Batch.Unmarshal(m, b) -} -func (m *Batch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Batch.Marshal(b, m, deterministic) -} -func (dst *Batch) XXX_Merge(src proto.Message) { - xxx_messageInfo_Batch.Merge(dst, src) -} -func (m *Batch) XXX_Size() int { - return xxx_messageInfo_Batch.Size(m) -} -func (m *Batch) XXX_DiscardUnknown() { - xxx_messageInfo_Batch.DiscardUnknown(m) + Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` } -var xxx_messageInfo_Batch proto.InternalMessageInfo +func (m *Batch) Reset() { *m = Batch{} } +func (m *Batch) String() string { return proto.CompactTextString(m) } +func (*Batch) ProtoMessage() {} +func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *Batch) GetOps() []*Operation { if m != nil { @@ -85,36 +77,14 @@ func (m *Batch) GetOps() []*Operation { } type Operation struct { - Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` - Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{1} -} -func (m *Operation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Operation.Unmarshal(m, b) -} -func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Operation.Marshal(b, m, deterministic) -} -func (dst *Operation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Operation.Merge(dst, src) -} -func (m *Operation) XXX_Size() int { - return xxx_messageInfo_Operation.Size(m) -} -func (m *Operation) XXX_DiscardUnknown() { - xxx_messageInfo_Operation.DiscardUnknown(m) + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` } -var xxx_messageInfo_Operation proto.InternalMessageInfo +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *Operation) GetEntity() *Entity { if m != nil { @@ -131,42 +101,20 @@ func (m *Operation) GetType() Operation_Type { } type Entity struct { - Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` - Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` - End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` - Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` - CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` + Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` + Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` + CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` } -func (m *Entity) Reset() { *m = Entity{} } -func (m *Entity) String() string { return proto.CompactTextString(m) } -func (*Entity) ProtoMessage() {} -func (*Entity) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{2} -} -func (m *Entity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Entity.Unmarshal(m, b) -} -func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Entity.Marshal(b, m, deterministic) -} -func (dst *Entity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Entity.Merge(dst, src) -} -func (m *Entity) XXX_Size() int { - return xxx_messageInfo_Entity.Size(m) -} -func (m *Entity) XXX_DiscardUnknown() { - xxx_messageInfo_Entity.DiscardUnknown(m) -} - -var xxx_messageInfo_Entity proto.InternalMessageInfo +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *Entity) GetId() int32 { if m != nil { @@ -225,66 +173,22 @@ func (m *Entity) GetCreatedAt() int64 { } type Nothing struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Nothing) Reset() { *m = Nothing{} } -func (m *Nothing) String() string { return proto.CompactTextString(m) } -func (*Nothing) ProtoMessage() {} -func (*Nothing) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{3} -} -func (m *Nothing) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Nothing.Unmarshal(m, b) -} -func (m *Nothing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Nothing.Marshal(b, m, deterministic) -} -func (dst *Nothing) XXX_Merge(src proto.Message) { - xxx_messageInfo_Nothing.Merge(dst, src) -} -func (m *Nothing) XXX_Size() int { - return xxx_messageInfo_Nothing.Size(m) -} -func (m *Nothing) XXX_DiscardUnknown() { - xxx_messageInfo_Nothing.DiscardUnknown(m) } -var xxx_messageInfo_Nothing proto.InternalMessageInfo +func (m *Nothing) Reset() { *m = Nothing{} } +func (m *Nothing) String() string { return proto.CompactTextString(m) } +func (*Nothing) ProtoMessage() {} +func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type Domain struct { - Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` - End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` } -func (m *Domain) Reset() { *m = Domain{} } -func (m *Domain) String() string { return proto.CompactTextString(m) } -func (*Domain) ProtoMessage() {} -func (*Domain) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{4} -} -func (m *Domain) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Domain.Unmarshal(m, b) -} -func (m *Domain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Domain.Marshal(b, m, deterministic) -} -func (dst *Domain) XXX_Merge(src proto.Message) { - xxx_messageInfo_Domain.Merge(dst, src) -} -func (m *Domain) XXX_Size() int { - return xxx_messageInfo_Domain.Size(m) -} -func (m *Domain) XXX_DiscardUnknown() { - xxx_messageInfo_Domain.DiscardUnknown(m) -} - -var xxx_messageInfo_Domain proto.InternalMessageInfo +func (m *Domain) Reset() { *m = Domain{} } +func (m *Domain) String() string { return proto.CompactTextString(m) } +func (*Domain) ProtoMessage() {} +func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *Domain) GetStart() []byte { if m != nil { @@ -301,38 +205,16 @@ func (m *Domain) GetEnd() []byte { } type Iterator struct { - Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` - Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` + Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` } -func (m *Iterator) Reset() { *m = Iterator{} } -func (m *Iterator) String() string { return proto.CompactTextString(m) } -func (*Iterator) ProtoMessage() {} -func (*Iterator) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{5} -} -func (m *Iterator) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Iterator.Unmarshal(m, b) -} -func (m *Iterator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Iterator.Marshal(b, m, deterministic) -} -func (dst *Iterator) XXX_Merge(src proto.Message) { - xxx_messageInfo_Iterator.Merge(dst, src) -} -func (m *Iterator) XXX_Size() int { - return xxx_messageInfo_Iterator.Size(m) -} -func (m *Iterator) XXX_DiscardUnknown() { - xxx_messageInfo_Iterator.DiscardUnknown(m) -} - -var xxx_messageInfo_Iterator proto.InternalMessageInfo +func (m *Iterator) Reset() { *m = Iterator{} } +func (m *Iterator) String() string { return proto.CompactTextString(m) } +func (*Iterator) ProtoMessage() {} +func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *Iterator) GetDomain() *Domain { if m != nil { @@ -363,36 +245,14 @@ func (m *Iterator) GetValue() []byte { } type Stats struct { - Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` } -func (m *Stats) Reset() { *m = Stats{} } -func (m *Stats) String() string { return proto.CompactTextString(m) } -func (*Stats) ProtoMessage() {} -func (*Stats) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{6} -} -func (m *Stats) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stats.Unmarshal(m, b) -} -func (m *Stats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stats.Marshal(b, m, deterministic) -} -func (dst *Stats) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stats.Merge(dst, src) -} -func (m *Stats) XXX_Size() int { - return xxx_messageInfo_Stats.Size(m) -} -func (m *Stats) XXX_DiscardUnknown() { - xxx_messageInfo_Stats.DiscardUnknown(m) -} - -var xxx_messageInfo_Stats proto.InternalMessageInfo +func (m *Stats) Reset() { *m = Stats{} } +func (m *Stats) String() string { return proto.CompactTextString(m) } +func (*Stats) ProtoMessage() {} +func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *Stats) GetData() map[string]string { if m != nil { @@ -409,37 +269,15 @@ func (m *Stats) GetTimeAt() int64 { } type Init struct { - Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` - Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Init) Reset() { *m = Init{} } -func (m *Init) String() string { return proto.CompactTextString(m) } -func (*Init) ProtoMessage() {} -func (*Init) Descriptor() ([]byte, []int) { - return fileDescriptor_defs_7303098f1c775f7f, []int{7} -} -func (m *Init) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Init.Unmarshal(m, b) -} -func (m *Init) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Init.Marshal(b, m, deterministic) -} -func (dst *Init) XXX_Merge(src proto.Message) { - xxx_messageInfo_Init.Merge(dst, src) -} -func (m *Init) XXX_Size() int { - return xxx_messageInfo_Init.Size(m) -} -func (m *Init) XXX_DiscardUnknown() { - xxx_messageInfo_Init.DiscardUnknown(m) + Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` } -var xxx_messageInfo_Init proto.InternalMessageInfo +func (m *Init) Reset() { *m = Init{} } +func (m *Init) String() string { return proto.CompactTextString(m) } +func (*Init) ProtoMessage() {} +func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *Init) GetType() string { if m != nil { @@ -470,7 +308,6 @@ func init() { proto.RegisterType((*Domain)(nil), "protodb.Domain") proto.RegisterType((*Iterator)(nil), "protodb.Iterator") proto.RegisterType((*Stats)(nil), "protodb.Stats") - proto.RegisterMapType((map[string]string)(nil), "protodb.Stats.DataEntry") proto.RegisterType((*Init)(nil), "protodb.Init") proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value) } @@ -483,9 +320,8 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// DBClient is the client API for DB service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for DB service + type DBClient interface { Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) @@ -513,7 +349,7 @@ func NewDBClient(cc *grpc.ClientConn) DBClient { func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) { out := new(Entity) - err := c.cc.Invoke(ctx, "/protodb.DB/init", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -522,7 +358,7 @@ func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { out := new(Entity) - err := c.cc.Invoke(ctx, "/protodb.DB/get", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -530,7 +366,7 @@ func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) } func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_DB_serviceDesc.Streams[0], "/protodb.DB/getStream", opts...) + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...) if err != nil { return nil, err } @@ -562,7 +398,7 @@ func (x *dBGetStreamClient) Recv() (*Entity, error) { func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { out := new(Entity) - err := c.cc.Invoke(ctx, "/protodb.DB/has", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -571,7 +407,7 @@ func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := c.cc.Invoke(ctx, "/protodb.DB/set", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -580,7 +416,7 @@ func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := c.cc.Invoke(ctx, "/protodb.DB/setSync", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -589,7 +425,7 @@ func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOpt func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := c.cc.Invoke(ctx, "/protodb.DB/delete", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -598,7 +434,7 @@ func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOpti func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := c.cc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -606,7 +442,7 @@ func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.Call } func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) { - stream, err := c.cc.NewStream(ctx, &_DB_serviceDesc.Streams[1], "/protodb.DB/iterator", opts...) + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...) if err != nil { return nil, err } @@ -638,7 +474,7 @@ func (x *dBIteratorClient) Recv() (*Iterator, error) { } func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) { - stream, err := c.cc.NewStream(ctx, &_DB_serviceDesc.Streams[2], "/protodb.DB/reverseIterator", opts...) + stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...) if err != nil { return nil, err } @@ -671,7 +507,7 @@ func (x *dBReverseIteratorClient) Recv() (*Iterator, error) { func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) { out := new(Stats) - err := c.cc.Invoke(ctx, "/protodb.DB/stats", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -680,7 +516,7 @@ func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOpti func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := c.cc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -689,7 +525,7 @@ func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallO func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { out := new(Nothing) - err := c.cc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, opts...) + err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -1033,9 +869,9 @@ var _DB_serviceDesc = grpc.ServiceDesc{ Metadata: "defs.proto", } -func init() { proto.RegisterFile("defs.proto", fileDescriptor_defs_7303098f1c775f7f) } +func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } -var fileDescriptor_defs_7303098f1c775f7f = []byte{ +var fileDescriptor0 = []byte{ // 606 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, 0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b, From d82699bf438fe3c991f47df08ec07d8283f86b2a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 21 May 2018 20:15:32 -0400 Subject: [PATCH 447/515] tmhash --- merkle/simple_map.go | 4 ++-- merkle/simple_tree.go | 6 +++--- merkle/tmhash/hash.go | 41 ++++++++++++++++++++++++++++++++++++++ merkle/tmhash/hash_test.go | 23 +++++++++++++++++++++ 4 files changed, 69 insertions(+), 5 deletions(-) create mode 100644 merkle/tmhash/hash.go create mode 100644 merkle/tmhash/hash_test.go diff --git a/merkle/simple_map.go b/merkle/simple_map.go index cd38de761..bd5c88d85 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -2,7 +2,7 @@ package merkle import ( cmn "github.com/tendermint/tmlibs/common" - "golang.org/x/crypto/ripemd160" + "github.com/tendermint/tmlibs/merkle/tmhash" ) type SimpleMap struct { @@ -63,7 +63,7 @@ func (sm *SimpleMap) KVPairs() cmn.KVPairs { type KVPair cmn.KVPair func (kv KVPair) Hash() []byte { - hasher := ripemd160.New() + hasher := tmhash.New() err := encodeByteSlice(hasher, kv.Key) if err != nil { panic(err) diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 9bdf52cb2..6bd80f55f 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -25,11 +25,11 @@ For larger datasets, use IAVLTree. package merkle import ( - "golang.org/x/crypto/ripemd160" + "github.com/tendermint/tmlibs/merkle/tmhash" ) func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { - var hasher = ripemd160.New() + var hasher = tmhash.New() err := encodeByteSlice(hasher, left) if err != nil { panic(err) @@ -68,7 +68,7 @@ func SimpleHashFromByteslices(bzs [][]byte) []byte { } func SimpleHashFromBytes(bz []byte) []byte { - hasher := ripemd160.New() + hasher := tmhash.New() hasher.Write(bz) return hasher.Sum(nil) } diff --git a/merkle/tmhash/hash.go b/merkle/tmhash/hash.go new file mode 100644 index 000000000..de69c406f --- /dev/null +++ b/merkle/tmhash/hash.go @@ -0,0 +1,41 @@ +package tmhash + +import ( + "crypto/sha256" + "hash" +) + +var ( + Size = 20 + BlockSize = sha256.BlockSize +) + +type sha256trunc struct { + sha256 hash.Hash +} + +func (h sha256trunc) Write(p []byte) (n int, err error) { + return h.sha256.Write(p) +} +func (h sha256trunc) Sum(b []byte) []byte { + shasum := h.sha256.Sum(b) + return shasum[:Size] +} + +func (h sha256trunc) Reset() { + h.sha256.Reset() +} + +func (h sha256trunc) Size() int { + return Size +} + +func (h sha256trunc) BlockSize() int { + return h.sha256.BlockSize() +} + +func New() hash.Hash { + return sha256trunc{ + sha256: sha256.New(), + } +} diff --git a/merkle/tmhash/hash_test.go b/merkle/tmhash/hash_test.go new file mode 100644 index 000000000..c9e80f2bc --- /dev/null +++ b/merkle/tmhash/hash_test.go @@ -0,0 +1,23 @@ +package tmhash_test + +import ( + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tmlibs/merkle/tmhash" +) + +func TestHash(t *testing.T) { + testVector := []byte("abc") + hasher := tmhash.New() + hasher.Write(testVector) + bz := hasher.Sum(nil) + + hasher = sha256.New() + hasher.Write(testVector) + bz2 := hasher.Sum(nil) + bz2 = bz2[:20] + + assert.Equal(t, bz, bz2) +} From d72de8ba685b84d145c272fb8ecbad6973e73884 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 21 May 2018 20:26:54 -0400 Subject: [PATCH 448/515] fix test --- merkle/simple_map_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index c9c871354..6e1004db2 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -17,37 +17,37 @@ func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() db.Set("key1", strHasher("value1")) - assert.Equal(t, "acdb4f121bc6f25041eb263ab463f1cd79236a32", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "3dafc06a52039d029be57c75c9d16356a4256ef4", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", strHasher("value2")) - assert.Equal(t, "b8cbf5adee8c524e14f531da9b49adbbbd66fffa", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "03eb5cfdff646bc4e80fec844e72fd248a1c6b2c", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", strHasher("value1")) db.Set("key2", strHasher("value2")) - assert.Equal(t, "1708aabc85bbe00242d3db8c299516aa54e48c38", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", strHasher("value2")) // NOTE: out of order db.Set("key1", strHasher("value1")) - assert.Equal(t, "1708aabc85bbe00242d3db8c299516aa54e48c38", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", strHasher("value1")) db.Set("key2", strHasher("value2")) db.Set("key3", strHasher("value3")) - assert.Equal(t, "e728afe72ce351eed6aca65c5f78da19b9a6e214", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", strHasher("value2")) // NOTE: out of order db.Set("key1", strHasher("value1")) db.Set("key3", strHasher("value3")) - assert.Equal(t, "e728afe72ce351eed6aca65c5f78da19b9a6e214", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } From 06cffa6acb95d61af02396e1ec33d24c4dae2fab Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 21 May 2018 21:38:02 -0400 Subject: [PATCH 449/515] changelog and version --- CHANGELOG.md | 4 +++- version/version.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d1cfceb9..1e41faced 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ # Changelog -## 0.8.3 (develop branch) +## 0.8.3 + +*May 21, 2018* FEATURES: diff --git a/version/version.go b/version/version.go index 107f5cf3a..40472c9a9 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.8.2" +const Version = "0.8.3" From d24a30858e23ace7aeee306e2ce652aba8021631 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 24 May 2018 00:10:35 -0400 Subject: [PATCH 450/515] no gogo proto --- Makefile | 9 ++++----- common/types.pb.go | 31 ++++++++++++++----------------- common/types.proto | 7 ------- 3 files changed, 18 insertions(+), 29 deletions(-) diff --git a/Makefile b/Makefile index efef45996..3c79e6803 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,11 @@ GOTOOLS = \ github.com/golang/dep/cmd/dep \ - github.com/gogo/protobuf/protoc-gen-gogo \ - github.com/gogo/protobuf/gogoproto \ + github.com/golang/protobuf/protoc-gen-go \ github.com/square/certstrap # github.com/alecthomas/gometalinter.v2 \ -GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-gogo -INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf +GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-go +INCLUDE = -I=. -I=${GOPATH}/src all: check get_vendor_deps protoc grpc_dbserver build test install metalinter @@ -19,7 +18,7 @@ protoc: ## If you get the following error, ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" ## See https://stackoverflow.com/a/25518702 - protoc $(INCLUDE) --gogo_out=plugins=grpc:. common/*.proto + protoc $(INCLUDE) --go_out=plugins=grpc:. common/*.proto @echo "--> adding nolint declarations to protobuf generated files" @awk '/package common/ { print "//nolint: gas"; print; next }1' common/types.pb.go > common/types.pb.go.new @mv common/types.pb.go.new common/types.pb.go diff --git a/common/types.pb.go b/common/types.pb.go index 047b7aee2..f6645602a 100644 --- a/common/types.pb.go +++ b/common/types.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: common/types.proto /* @@ -14,10 +14,9 @@ It has these top-level messages: //nolint: gas package common -import proto "github.com/gogo/protobuf/proto" +import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -28,7 +27,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Define these here for compatibility but use tmlibs/common.KVPair. type KVPair struct { @@ -39,7 +38,7 @@ type KVPair struct { func (m *KVPair) Reset() { *m = KVPair{} } func (m *KVPair) String() string { return proto.CompactTextString(m) } func (*KVPair) ProtoMessage() {} -func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } +func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *KVPair) GetKey() []byte { if m != nil { @@ -58,13 +57,13 @@ func (m *KVPair) GetValue() []byte { // Define these here for compatibility but use tmlibs/common.KI64Pair. type KI64Pair struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` } func (m *KI64Pair) Reset() { *m = KI64Pair{} } func (m *KI64Pair) String() string { return proto.CompactTextString(m) } func (*KI64Pair) ProtoMessage() {} -func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } +func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *KI64Pair) GetKey() []byte { if m != nil { @@ -85,17 +84,15 @@ func init() { proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") } -func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) } +func init() { proto.RegisterFile("common/types.proto", fileDescriptor0) } -var fileDescriptorTypes = []byte{ - // 137 bytes of a gzipped FileDescriptorProto +var fileDescriptor0 = []byte{ + // 107 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, - 0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, - 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68, - 0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e, - 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, - 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99, - 0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00, + 0x83, 0x88, 0x29, 0x19, 0x70, 0xb1, 0x79, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, 0x09, 0x70, 0x31, + 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, + 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x31, 0x08, 0x47, 0xc9, 0x88, 0x8b, 0xc3, 0xdb, + 0xd3, 0xcc, 0x84, 0x18, 0x3d, 0xcc, 0x50, 0x3d, 0x49, 0x6c, 0x60, 0x4b, 0x8d, 0x01, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xd8, 0xf1, 0xc3, 0x8c, 0x8a, 0x00, 0x00, 0x00, } diff --git a/common/types.proto b/common/types.proto index 94abcccc3..8406fcfdd 100644 --- a/common/types.proto +++ b/common/types.proto @@ -1,13 +1,6 @@ syntax = "proto3"; package common; -// For more information on gogo.proto, see: -// https://github.com/gogo/protobuf/blob/master/extensions.md -// NOTE: Try really hard not to use custom types, -// it's often complicated, broken, nor not worth it. -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - - //---------------------------------------- // Abstract types From 44f1bdb0d55cc6527e38d0a7aab406e2580f56a4 Mon Sep 17 00:00:00 2001 From: Zaki Manian Date: Fri, 1 Jun 2018 11:56:00 +0200 Subject: [PATCH 451/515] Bech32 (#216) * Add support for regular bech32 to tmlibs * Add bech32 to gopkg.toml --- Gopkg.lock | 23 +++++++---------------- Gopkg.toml | 3 +++ bech32/bech32.go | 28 ++++++++++++++++++++++++++++ bech32/bech32_test.go | 31 +++++++++++++++++++++++++++++++ 4 files changed, 69 insertions(+), 16 deletions(-) create mode 100644 bech32/bech32.go create mode 100644 bech32/bech32_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 96df808a5..f0eaee18d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,12 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + branch = "master" + name = "github.com/btcsuite/btcutil" + packages = ["bech32"] + revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" + [[projects]] name = "github.com/davecgh/go-spew" packages = ["spew"] @@ -41,16 +47,6 @@ revision = "817915b46b97fd7bb80e8ab6b69f01a53ac3eebf" version = "v1.6.0" -[[projects]] - name = "github.com/gogo/protobuf" - packages = [ - "gogoproto", - "proto", - "protoc-gen-gogo/descriptor" - ] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" - [[projects]] name = "github.com/golang/protobuf" packages = [ @@ -196,11 +192,6 @@ ] revision = "b89cc31ef7977104127d34c1bd31ebd1a9db2199" -[[projects]] - name = "golang.org/x/crypto" - packages = ["ripemd160"] - revision = "edd5e9b0879d13ee6970a50153d85b8fec9f7686" - [[projects]] branch = "master" name = "golang.org/x/net" @@ -285,6 +276,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "8aa4ea7ef6d0ff170127eb5bca89c6c37c767d58047159cfd26a431c5cd5e7ad" + inputs-digest = "e0c0af880b57928787ea78a820abefd2759e6aee4cba18e67ab36b80e62ad581" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index ef3f055a8..ff42087f8 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -61,6 +61,9 @@ name = "github.com/stretchr/testify" version = "1.2.1" +[[constraint]] + name = "github.com/btcsuite/btcutil" + branch ="master" [prune] go-tests = true unused-packages = true diff --git a/bech32/bech32.go b/bech32/bech32.go new file mode 100644 index 000000000..3c778309c --- /dev/null +++ b/bech32/bech32.go @@ -0,0 +1,28 @@ +package bech32 + +import ( + "github.com/btcsuite/btcutil/bech32" +) + +//ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32 +func ConvertAndEncode(hrp string, data []byte) (string, error) { + converted, err := bech32.ConvertBits(data, 8, 5, true) + if err != nil { + return "", err + } + return bech32.Encode(hrp, converted) + +} + +//DecodeAndConvert decodes a bech32 encoded string and converts to base64 encoded bytes +func DecodeAndConvert(bech string) (string, []byte, error) { + hrp, data, err := bech32.Decode(bech) + if err != nil { + return "", nil, err + } + converted, err := bech32.ConvertBits(data, 5, 8, false) + if err != nil { + return "", nil, err + } + return hrp, converted, nil +} diff --git a/bech32/bech32_test.go b/bech32/bech32_test.go new file mode 100644 index 000000000..7cdebba2b --- /dev/null +++ b/bech32/bech32_test.go @@ -0,0 +1,31 @@ +package bech32_test + +import ( + "bytes" + "crypto/sha256" + "testing" + + "github.com/tendermint/tmlibs/bech32" +) + +func TestEncodeAndDecode(t *testing.T) { + + sum := sha256.Sum256([]byte("hello world\n")) + + bech, err := bech32.ConvertAndEncode("shasum", sum[:]) + + if err != nil { + t.Error(err) + } + hrp, data, err := bech32.DecodeAndConvert(bech) + + if err != nil { + t.Error(err) + } + if hrp != "shasum" { + t.Error("Invalid hrp") + } + if bytes.Compare(data, sum[:]) != 0 { + t.Error("Invalid decode") + } +} From e0985bf56686e2182b07cfc8a11afde33bb855d1 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sun, 3 Jun 2018 11:56:57 +0400 Subject: [PATCH 452/515] flush on stop & function to close group as opposite to OpenGroup --- autofile/group.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/autofile/group.go b/autofile/group.go index 652c33310..1ae545032 100644 --- a/autofile/group.go +++ b/autofile/group.go @@ -71,8 +71,9 @@ type Group struct { // and their dependencies. } +// OpenGroup creates a new Group with head at headPath. It returns an error if +// it fails to open head file. func OpenGroup(headPath string) (g *Group, err error) { - dir := path.Dir(headPath) head, err := OpenAutoFile(headPath) if err != nil { @@ -98,16 +99,27 @@ func OpenGroup(headPath string) (g *Group, err error) { return } +// OnStart implements Service by starting the goroutine that checks file and +// group limits. func (g *Group) OnStart() error { - g.BaseService.OnStart() go g.processTicks() return nil } -// NOTE: g.Head must be closed separately +// OnStop implements Service by stopping the goroutine described above. +// NOTE: g.Head must be closed separately using Close. func (g *Group) OnStop() { - g.BaseService.OnStop() g.ticker.Stop() + g.Flush() // flush any uncommitted data +} + +// Close closes the head file. The group must be stopped by this moment. +func (g *Group) Close() { + g.Flush() // flush any uncommitted data + + g.mtx.Lock() + _ = g.Head.closeFile() + g.mtx.Unlock() } // SetHeadSizeLimit allows you to overwrite default head size limit - 10MB. From 22541b881de70981a23fd2348e222979d2c89f0a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 5 Jun 2018 17:12:33 -0700 Subject: [PATCH 453/515] changelog and version --- CHANGELOG.md | 8 ++++++++ version/version.go | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e41faced..9f1ba808b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.8.4 + +*June 5, 2018* + +IMPROVEMENTS: + + - [autofile] Flush on Stop; Close() method to Flush and close file + ## 0.8.3 *May 21, 2018* diff --git a/version/version.go b/version/version.go index 40472c9a9..f36a2a474 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.8.3" +const Version = "0.8.4" From 8bbe43aa333e8637275afdbd9367fcbe1cf99bdf Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 5 Jun 2018 21:50:03 -0700 Subject: [PATCH 454/515] update changelog --- CHANGELOG.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index efd0c9e14..46dca65aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,18 @@ *TBD* BREAKING: + - [events, pubsub] Removed - moved to github.com/tendermint/tendermint + - [merkle] Use 20-bytes of SHA256 instead of RIPEMD160. NOTE: this package is + moving to github.com/tendermint/go-crypto ! + - [common] Remove gogoproto from KVPair types + +FEATURES: + + - [db/remotedb] New DB type using an external CLevelDB process via + GRPC + - [autofile] logjack command for piping stdin to a rotating file + - [bech32] New package. NOTE: should move out of here - it's just two small + functions ## 0.8.4 @@ -20,9 +32,6 @@ IMPROVEMENTS: FEATURES: - - [db/remotedb] New DB type using an external CLevelDB process via - GRPC - - [autofile] logjack command for piping stdin to a rotating file - [common] ASCIITrim() ## 0.8.2 (April 23rd, 2018) From f0c44d1bd0145b3f57e4dc5db98b8232fba73edb Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 6 Jun 2018 11:52:59 +0400 Subject: [PATCH 455/515] [autofile] include call to group#Close in tests --- autofile/group_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/autofile/group_test.go b/autofile/group_test.go index 1a1111961..2ffedcc27 100644 --- a/autofile/group_test.go +++ b/autofile/group_test.go @@ -32,6 +32,7 @@ func createTestGroup(t *testing.T, headSizeLimit int64) *Group { } func destroyTestGroup(t *testing.T, g *Group) { + g.Close() err := os.RemoveAll(g.Dir) require.NoError(t, err, "Error removing test Group directory") } From 1d66e34dc8f9524e72139dba6eb0ec87cf059fc6 Mon Sep 17 00:00:00 2001 From: ValarDragon Date: Sat, 9 Jun 2018 16:22:52 -0700 Subject: [PATCH 456/515] Bech32: Wrap error messages --- bech32/bech32.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bech32/bech32.go b/bech32/bech32.go index 3c778309c..a4db86d5f 100644 --- a/bech32/bech32.go +++ b/bech32/bech32.go @@ -2,13 +2,14 @@ package bech32 import ( "github.com/btcsuite/btcutil/bech32" + "github.com/pkg/errors" ) //ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32 func ConvertAndEncode(hrp string, data []byte) (string, error) { converted, err := bech32.ConvertBits(data, 8, 5, true) if err != nil { - return "", err + return "", errors.Wrap(err, "encoding bech32 failed") } return bech32.Encode(hrp, converted) @@ -18,11 +19,11 @@ func ConvertAndEncode(hrp string, data []byte) (string, error) { func DecodeAndConvert(bech string) (string, []byte, error) { hrp, data, err := bech32.Decode(bech) if err != nil { - return "", nil, err + return "", nil, errors.Wrap(err, "decoding bech32 failed") } converted, err := bech32.ConvertBits(data, 5, 8, false) if err != nil { - return "", nil, err + return "", nil, errors.Wrap(err, "decoding bech32 failed") } return hrp, converted, nil } From 1b1c4cd94d151ff11181524c9257fb3111e70f19 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 6 Jun 2018 21:24:28 -0700 Subject: [PATCH 457/515] Reduce Errors T/Cause/Message into single Data --- common/async_test.go | 2 +- common/errors.go | 173 +++++++++++++++++++----------------------- common/errors_test.go | 64 ++++++++-------- 3 files changed, 108 insertions(+), 131 deletions(-) diff --git a/common/async_test.go b/common/async_test.go index 037afcaaa..f565b4bd3 100644 --- a/common/async_test.go +++ b/common/async_test.go @@ -135,7 +135,7 @@ func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val inte if err != nil { assert.Equal(t, err, taskResult.Error, taskName) } else if pnk != nil { - assert.Equal(t, pnk, taskResult.Error.(Error).Cause(), taskName) + assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName) } else { assert.Nil(t, taskResult.Error, taskName) } diff --git a/common/errors.go b/common/errors.go index 5992b2346..7259b29dc 100644 --- a/common/errors.go +++ b/common/errors.go @@ -6,106 +6,81 @@ import ( ) //---------------------------------------- -// Convenience methods +// Convenience method. -// ErrorWrap will just call .TraceFrom(), or create a new *cmnError. func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { - msg := Fmt(format, args...) if causeCmnError, ok := cause.(*cmnError); ok { - return causeCmnError.TraceFrom(1, msg) + msg := Fmt(format, args...) + return causeCmnError.Stacktrace().Trace(1, msg) + } else if cause == nil { + return newCmnError(FmtError{format, args}).Stacktrace() + } else { + // NOTE: causeCmnError is a typed nil here. + msg := Fmt(format, args...) + return newCmnError(cause).Stacktrace().Trace(1, msg) } - // NOTE: cause may be nil. - // NOTE: do not use causeCmnError here, not the same as nil. - return newError(msg, cause, cause).Stacktrace() } //---------------------------------------- // Error & cmnError /* -Usage: + +Usage with arbitrary error data: ```go // Error construction - var someT = errors.New("Some err type") - var err1 error = NewErrorWithT(someT, "my message") + type MyError struct{} + var err1 error = NewErrorWithData(MyError{}, "my message") ... // Wrapping var err2 error = ErrorWrap(err1, "another message") if (err1 != err2) { panic("should be the same") ... // Error handling - switch err2.T() { - case someT: ... + switch err2.Data().(type){ + case MyError: ... default: ... } ``` - */ type Error interface { Error() string - Message() string Stacktrace() Error - Trace(format string, args ...interface{}) Error - TraceFrom(offset int, format string, args ...interface{}) Error - Cause() interface{} - WithT(t interface{}) Error - T() interface{} - Format(s fmt.State, verb rune) + Trace(offset int, format string, args ...interface{}) Error + Data() interface{} } -// New Error with no cause where the type is the format string of the message.. +// New Error with formatted message. +// The Error's Data will be a FmtError type. func NewError(format string, args ...interface{}) Error { - msg := Fmt(format, args...) - return newError(msg, nil, format) - + err := FmtError{format, args} + return newCmnError(err) } -// New Error with specified type and message. -func NewErrorWithT(t interface{}, format string, args ...interface{}) Error { - msg := Fmt(format, args...) - return newError(msg, nil, t) -} - -// NOTE: The name of a function "NewErrorWithCause()" implies that you are -// creating a new Error, yet, if the cause is an Error, creating a new Error to -// hold a ref to the old Error is probably *not* what you want to do. -// So, use ErrorWrap(cause, format, a...) instead, which returns the same error -// if cause is an Error. -// IF you must set an Error as the cause of an Error, -// then you can use the WithCauser interface to do so manually. -// e.g. (error).(tmlibs.WithCauser).WithCause(causeError) - -type WithCauser interface { - WithCause(cause interface{}) Error +// New Error with specified data. +func NewErrorWithData(data interface{}) Error { + return newCmnError(data) } type cmnError struct { - msg string // first msg which also appears in msg - cause interface{} // underlying cause (or panic object) - t interface{} // for switching on error + data interface{} // associated data msgtraces []msgtraceItem // all messages traced stacktrace []uintptr // first stack trace } -var _ WithCauser = &cmnError{} var _ Error = &cmnError{} // NOTE: do not expose. -func newError(msg string, cause interface{}, t interface{}) *cmnError { +func newCmnError(data interface{}) *cmnError { return &cmnError{ - msg: msg, - cause: cause, - t: t, + data: data, msgtraces: nil, stacktrace: nil, } } -func (err *cmnError) Message() string { - return err.msg -} - +// Implements error. func (err *cmnError) Error() string { return fmt.Sprintf("%v", err) } @@ -121,42 +96,17 @@ func (err *cmnError) Stacktrace() Error { } // Add tracing information with msg. -func (err *cmnError) Trace(format string, args ...interface{}) Error { - msg := Fmt(format, args...) - return err.doTrace(msg, 0) -} - -// Same as Trace, but traces the line `offset` calls out. -// If n == 0, the behavior is identical to Trace(). -func (err *cmnError) TraceFrom(offset int, format string, args ...interface{}) Error { +// Set n=0 unless wrapped with some function, then n > 0. +func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error { msg := Fmt(format, args...) return err.doTrace(msg, offset) } -// Return last known cause. -// NOTE: The meaning of "cause" is left for the caller to define. -// There exists no "canonical" definition of "cause". -// Instead of blaming, try to handle it, or organize it. -func (err *cmnError) Cause() interface{} { - return err.cause -} - -// Overwrites the Error's cause. -func (err *cmnError) WithCause(cause interface{}) Error { - err.cause = cause - return err -} - -// Overwrites the Error's type. -func (err *cmnError) WithT(t interface{}) Error { - err.t = t - return err -} - -// Return the "type" of this message, primarily for switching -// to handle this Error. -func (err *cmnError) T() interface{} { - return err.t +// Return the "data" of this error. +// Data could be used for error handling/switching, +// or for holding general error/debug information. +func (err *cmnError) Data() interface{} { + return err.data } func (err *cmnError) doTrace(msg string, n int) Error { @@ -177,12 +127,8 @@ func (err *cmnError) Format(s fmt.State, verb rune) { default: if s.Flag('#') { s.Write([]byte("--= Error =--\n")) - // Write msg. - s.Write([]byte(fmt.Sprintf("Message: %s\n", err.msg))) - // Write cause. - s.Write([]byte(fmt.Sprintf("Cause: %#v\n", err.cause))) - // Write type. - s.Write([]byte(fmt.Sprintf("T: %#v\n", err.t))) + // Write data. + s.Write([]byte(fmt.Sprintf("Data: %#v\n", err.data))) // Write msg trace items. s.Write([]byte(fmt.Sprintf("Msg Traces:\n"))) for i, msgtrace := range err.msgtraces { @@ -200,11 +146,7 @@ func (err *cmnError) Format(s fmt.State, verb rune) { s.Write([]byte("--= /Error =--\n")) } else { // Write msg. - if err.cause != nil { - s.Write([]byte(fmt.Sprintf("Error{`%s` (cause: %v)}", err.msg, err.cause))) // TODO tick-esc? - } else { - s.Write([]byte(fmt.Sprintf("Error{`%s`}", err.msg))) // TODO tick-esc? - } + s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc? } } } @@ -232,6 +174,45 @@ func (mti msgtraceItem) String() string { ) } +//---------------------------------------- +// fmt error + +/* + +FmtError is the data type for NewError() (e.g. NewError().Data().(FmtError)) +Theoretically it could be used to switch on the format string. + +```go + // Error construction + var err1 error = NewError("invalid username %v", "BOB") + var err2 error = NewError("another kind of error") + ... + // Error handling + switch err1.Data().(cmn.FmtError).Format { + case "invalid username %v": ... + case "another kind of error": ... + default: ... + } +``` +*/ +type FmtError struct { + format string + args []interface{} +} + +func (fe FmtError) Error() string { + return fmt.Sprintf(fe.format, fe.args...) +} + +func (fe FmtError) String() string { + return fmt.Sprintf("FmtError{format:%v,args:%v}", + fe.format, fe.args) +} + +func (fe FmtError) Format() string { + return fe.format +} + //---------------------------------------- // Panic wrappers // XXX DEPRECATED diff --git a/common/errors_test.go b/common/errors_test.go index 2c5234f9f..16aede225 100644 --- a/common/errors_test.go +++ b/common/errors_test.go @@ -25,11 +25,9 @@ func TestErrorPanic(t *testing.T) { var err = capturePanic() - assert.Equal(t, pnk{"something"}, err.Cause()) - assert.Equal(t, pnk{"something"}, err.T()) - assert.Equal(t, "This is the message in ErrorWrap(r, message).", err.Message()) - assert.Equal(t, "Error{`This is the message in ErrorWrap(r, message).` (cause: {something})}", fmt.Sprintf("%v", err)) - assert.Contains(t, fmt.Sprintf("%#v", err), "Message: This is the message in ErrorWrap(r, message).") + assert.Equal(t, pnk{"something"}, err.Data()) + assert.Equal(t, "Error{{something}}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -37,11 +35,9 @@ func TestErrorWrapSomething(t *testing.T) { var err = ErrorWrap("something", "formatter%v%v", 0, 1) - assert.Equal(t, "something", err.Cause()) - assert.Equal(t, "something", err.T()) - assert.Equal(t, "formatter01", err.Message()) - assert.Equal(t, "Error{`formatter01` (cause: something)}", fmt.Sprintf("%v", err)) - assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Equal(t, "something", err.Data()) + assert.Equal(t, "Error{something}", fmt.Sprintf("%v", err)) + assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -49,11 +45,11 @@ func TestErrorWrapNothing(t *testing.T) { var err = ErrorWrap(nil, "formatter%v%v", 0, 1) - assert.Equal(t, nil, err.Cause()) - assert.Equal(t, nil, err.T()) - assert.Equal(t, "formatter01", err.Message()) - assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) - assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -61,11 +57,11 @@ func TestErrorNewError(t *testing.T) { var err = NewError("formatter%v%v", 0, 1) - assert.Equal(t, nil, err.Cause()) - assert.Equal(t, "formatter%v%v", err.T()) - assert.Equal(t, "formatter01", err.Message()) - assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) - assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") } @@ -73,26 +69,26 @@ func TestErrorNewErrorWithStacktrace(t *testing.T) { var err = NewError("formatter%v%v", 0, 1).Stacktrace() - assert.Equal(t, nil, err.Cause()) - assert.Equal(t, "formatter%v%v", err.T()) - assert.Equal(t, "formatter01", err.Message()) - assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) - assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } func TestErrorNewErrorWithTrace(t *testing.T) { var err = NewError("formatter%v%v", 0, 1) - err.Trace("trace %v", 1) - err.Trace("trace %v", 2) - err.Trace("trace %v", 3) - - assert.Equal(t, nil, err.Cause()) - assert.Equal(t, "formatter%v%v", err.T()) - assert.Equal(t, "formatter01", err.Message()) - assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err)) - assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err)) + err.Trace(0, "trace %v", 1) + err.Trace(0, "trace %v", 2) + err.Trace(0, "trace %v", 3) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) dump := fmt.Sprintf("%#v", err) assert.NotContains(t, dump, "Stack Trace") assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 1`, dump) From 21726a6853873ae28e5a0a40bcd80fd96324d044 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 13 Jun 2018 23:33:39 -0700 Subject: [PATCH 458/515] Add ColoredBytes() and update DebugDB --- CHANGELOG.md | 3 +++ common/colors.go | 12 +++++++++++ db/debug_db.go | 56 ++++++++++++++++++++++++++++++++++-------------- 3 files changed, 55 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46dca65aa..e017cc561 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ BREAKING: - [merkle] Use 20-bytes of SHA256 instead of RIPEMD160. NOTE: this package is moving to github.com/tendermint/go-crypto ! - [common] Remove gogoproto from KVPair types + - [common] Error simplification, #220 FEATURES: @@ -17,6 +18,8 @@ FEATURES: - [autofile] logjack command for piping stdin to a rotating file - [bech32] New package. NOTE: should move out of here - it's just two small functions + - [common] ColoredBytes([]byte) string for printing mixed ascii and bytes + - [db] DebugDB uses ColoredBytes() ## 0.8.4 diff --git a/common/colors.go b/common/colors.go index 85e592248..049ce7a50 100644 --- a/common/colors.go +++ b/common/colors.go @@ -81,3 +81,15 @@ func Cyan(args ...interface{}) string { func White(args ...interface{}) string { return treatAll(ANSIFgWhite, args...) } + +func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string { + s := "" + for _, b := range data { + if 0x21 <= b && b < 0x7F { + s += textColor(string(b)) + } else { + s += bytesColor(Fmt("%02X", b)) + } + } + return s +} diff --git a/db/debug_db.go b/db/debug_db.go index 7666ed9fd..a3e785c24 100644 --- a/db/debug_db.go +++ b/db/debug_db.go @@ -33,7 +33,9 @@ func (ddb debugDB) Mutex() *sync.Mutex { return nil } // Implements DB. func (ddb debugDB) Get(key []byte) (value []byte) { defer func() { - fmt.Printf("%v.Get(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Blue(_fmt("%X", value))) + fmt.Printf("%v.Get(%v) %v\n", ddb.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) }() value = ddb.db.Get(key) return @@ -42,68 +44,85 @@ func (ddb debugDB) Get(key []byte) (value []byte) { // Implements DB. func (ddb debugDB) Has(key []byte) (has bool) { defer func() { - fmt.Printf("%v.Has(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), has) + fmt.Printf("%v.Has(%v) %v\n", ddb.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), has) }() return ddb.db.Has(key) } // Implements DB. func (ddb debugDB) Set(key []byte, value []byte) { - fmt.Printf("%v.Set(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) + fmt.Printf("%v.Set(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) ddb.db.Set(key, value) } // Implements DB. func (ddb debugDB) SetSync(key []byte, value []byte) { - fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) + fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) ddb.db.SetSync(key, value) } // Implements atomicSetDeleter. func (ddb debugDB) SetNoLock(key []byte, value []byte) { - fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) + fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) ddb.db.(atomicSetDeleter).SetNoLock(key, value) } // Implements atomicSetDeleter. func (ddb debugDB) SetNoLockSync(key []byte, value []byte) { - fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) + fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) ddb.db.(atomicSetDeleter).SetNoLockSync(key, value) } // Implements DB. func (ddb debugDB) Delete(key []byte) { - fmt.Printf("%v.Delete(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) + fmt.Printf("%v.Delete(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) ddb.db.Delete(key) } // Implements DB. func (ddb debugDB) DeleteSync(key []byte) { - fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) + fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) ddb.db.DeleteSync(key) } // Implements atomicSetDeleter. func (ddb debugDB) DeleteNoLock(key []byte) { - fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) + fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) ddb.db.(atomicSetDeleter).DeleteNoLock(key) } // Implements atomicSetDeleter. func (ddb debugDB) DeleteNoLockSync(key []byte) { - fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key))) + fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) ddb.db.(atomicSetDeleter).DeleteNoLockSync(key) } // Implements DB. func (ddb debugDB) Iterator(start, end []byte) Iterator { - fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end))) + fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, + cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end)) } // Implements DB. func (ddb debugDB) ReverseIterator(start, end []byte) Iterator { - fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end))) + fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, + cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), + cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end)) } @@ -173,15 +192,17 @@ func (ditr debugIterator) Next() { // Implements Iterator. func (ditr debugIterator) Key() (key []byte) { - fmt.Printf("%v.itr.Key() %v\n", ditr.label, cmn.Cyan(_fmt("%X", key))) key = ditr.itr.Key() + fmt.Printf("%v.itr.Key() %v\n", ditr.label, + cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue)) return } // Implements Iterator. func (ditr debugIterator) Value() (value []byte) { - fmt.Printf("%v.itr.Value() %v\n", ditr.label, cmn.Blue(_fmt("%X", value))) value = ditr.itr.Value() + fmt.Printf("%v.itr.Value() %v\n", ditr.label, + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) return } @@ -209,13 +230,16 @@ func NewDebugBatch(label string, bch Batch) debugBatch { // Implements Batch. func (dbch debugBatch) Set(key, value []byte) { - fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value))) + fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, + cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), + cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) dbch.bch.Set(key, value) } // Implements Batch. func (dbch debugBatch) Delete(key []byte) { - fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, cmn.Red(_fmt("%X", key))) + fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, + cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) dbch.bch.Delete(key) } From fb7ec62b2925f48de159aeea73b254ae8c58a738 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 13 Jun 2018 23:44:38 -0700 Subject: [PATCH 459/515] Fix comment --- common/errors.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/errors.go b/common/errors.go index 7259b29dc..5c31b8968 100644 --- a/common/errors.go +++ b/common/errors.go @@ -188,7 +188,7 @@ Theoretically it could be used to switch on the format string. var err2 error = NewError("another kind of error") ... // Error handling - switch err1.Data().(cmn.FmtError).Format { + switch err1.Data().(cmn.FmtError).Format() { case "invalid username %v": ... case "another kind of error": ... default: ... From 8ff95bf32c9f81f51d36cb5c78fffa70acc8b606 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 22 Jun 2018 18:10:50 -0400 Subject: [PATCH 460/515] remove old code --- docs/_static/custom_collapsible_code.css | 17 ----------------- docs/_static/custom_collapsible_code.js | 10 ---------- 2 files changed, 27 deletions(-) delete mode 100644 docs/_static/custom_collapsible_code.css delete mode 100644 docs/_static/custom_collapsible_code.js diff --git a/docs/_static/custom_collapsible_code.css b/docs/_static/custom_collapsible_code.css deleted file mode 100644 index 695268a83..000000000 --- a/docs/_static/custom_collapsible_code.css +++ /dev/null @@ -1,17 +0,0 @@ -.toggle { - padding-bottom: 1em ; -} - -.toggle .header { - display: block; - clear: both; - cursor: pointer; -} - -.toggle .header:after { - content: " ▼"; -} - -.toggle .header.open:after { - content: " ▲"; -} \ No newline at end of file diff --git a/docs/_static/custom_collapsible_code.js b/docs/_static/custom_collapsible_code.js deleted file mode 100644 index f4ff22adc..000000000 --- a/docs/_static/custom_collapsible_code.js +++ /dev/null @@ -1,10 +0,0 @@ -let makeCodeBlocksCollapsible = function() { - $(".toggle > *").hide(); - $(".toggle .header").show(); - $(".toggle .header").click(function() { - $(this).parent().children().not(".header").toggle({"duration": 400}); - $(this).parent().children(".header").toggleClass("open"); - }); -}; -// we could use the }(); way if we would have access to jQuery in HEAD, i.e. we would need to force the theme -// to load jQuery before our custom scripts From c1548c78617616e2da2389b253e0d4312f2d5242 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 22 Jun 2018 18:13:25 -0400 Subject: [PATCH 461/515] docs readme is vuepress landing page --- docs/README.md | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/docs/README.md b/docs/README.md index 180acdcb9..9dbe91781 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,14 +1,17 @@ -Here lies our documentation. After making edits, run: +# Tendermint -``` -pip install -r requirements.txt -make html -``` +Welcome to the Tendermint Core documentation! The introduction below provides an overview to help you navigate to your area of interest. -to build the docs locally then open the file `_build/html/index.html` in your browser. +## Introduction -**WARNING:** This documentation is intended to be viewed at: +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. In other words, a blockchain. -https://tendermint.readthedocs.io +Tendermint requires an application running over the Application Blockchain Interface (ABCI) - and comes packaged with an example application to do so. Follow the [installation instructions](./install) to get up and running quickly. For more details on [using tendermint](./using-tendermint) see that and the following sections. -and may contain broken internal links when viewed from Github. +## Networks + +Testnets can be setup manually on one or more machines, or automatically on one or more machine, using a variety of methods described in the [deploy testnets section](./deploy-testnets). For more information (and to join) about the Cosmos Network testnets, see [here](link). + +## Application Development + +The first step to building application on Tendermint is to [install ABCI-CLI](./getting-started) and play with the example applications. From 003d8956a55f7090f33d627c8446637a8b2caf2c Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 22 Jun 2018 18:23:45 -0400 Subject: [PATCH 462/515] rm dead docs file --- docs/_templates/layout.html | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 docs/_templates/layout.html diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html deleted file mode 100644 index 736460bc0..000000000 --- a/docs/_templates/layout.html +++ /dev/null @@ -1,20 +0,0 @@ -{% extends "!layout.html" %} - -{% set css_files = css_files + ["_static/custom_collapsible_code.css"] %} - -# sadly, I didn't find a css style way to add custom JS to a list that is automagically added to head like CSS (above) #} -{% block extrahead %} - -{% endblock %} - -{% block footer %} - -{% endblock %} - - From 6c92a6f99a85ad9c9d4a17d25df10067671e6a33 Mon Sep 17 00:00:00 2001 From: zramsay Date: Sat, 23 Jun 2018 00:00:42 +0000 Subject: [PATCH 463/515] prepare RTD dismantling --- docs/index.rst | 66 ++---------------------------------------- docs/specification.rst | 21 -------------- 2 files changed, 2 insertions(+), 85 deletions(-) delete mode 100644 docs/specification.rst diff --git a/docs/index.rst b/docs/index.rst index e7d86bc2b..e308ae5f8 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,68 +6,6 @@ Welcome to Tendermint! ====================== +This location for our documentation has been deprecated, please see: -.. image:: assets/tmint-logo-blue.png - :height: 200px - :width: 200px - :align: center - -Introduction ------------- - -.. toctree:: - :maxdepth: 1 - - introduction.md - install.md - getting-started.md - using-tendermint.md - deploy-testnets.md - ecosystem.md - -Tendermint Tools ----------------- - -.. the tools/ files are pulled in from the tools repo -.. see the bottom of conf.py -.. toctree:: - :maxdepth: 1 - - tools/docker.md - terraform-and-ansible.md - tools/benchmarking.md - tools/monitoring.md - -ABCI, Apps, Logging, Etc ------------------------- - -.. toctree:: - :maxdepth: 1 - - abci-cli.md - abci-spec.md - app-architecture.md - app-development.md - subscribing-to-events-via-websocket.md - indexing-transactions.md - how-to-read-logs.md - running-in-production.md - metrics.md - -Research & Specification ------------------------- - -.. toctree:: - :maxdepth: 1 - - determinism.md - transactional-semantics.md - -.. specification.md ## keep this file for legacy purpose. needs to be fixed though - -* For a deeper dive, see `this thesis `__. -* There is also the `original whitepaper `__, though it is now quite outdated. -* Readers might also be interested in the `Cosmos Whitepaper `__ which describes Tendermint, ABCI, and how to build a scalable, heterogeneous, cryptocurrency network. -* For example applications and related software built by the Tendermint team and other, see the `software ecosystem `__. - -Join the `community `__ to ask questions and discuss projects. +- https://tendermint.com/docs diff --git a/docs/specification.rst b/docs/specification.rst deleted file mode 100644 index 70ebf633f..000000000 --- a/docs/specification.rst +++ /dev/null @@ -1,21 +0,0 @@ -############# -Specification -############# - -Here you'll find details of the Tendermint specification. Tendermint's types are produced by `godoc `__. - -.. toctree:: - :maxdepth: 2 - - specification/block-structure.rst - specification/byzantine-consensus-algorithm.rst - specification/configuration.rst - specification/corruption.rst - specification/fast-sync.rst - specification/genesis.rst - specification/light-client-protocol.rst - specification/merkle.rst - specification/rpc.rst - specification/secure-p2p.rst - specification/validators.rst - specification/wire-protocol.rst From 835af6fcb9b65d15553416883b24db8928e547bd Mon Sep 17 00:00:00 2001 From: zramsay Date: Sat, 23 Jun 2018 00:06:04 +0000 Subject: [PATCH 464/515] link fixes --- docs/abci-cli.md | 6 +++--- docs/deploy-testnets.md | 2 +- docs/getting-started.md | 6 +++--- docs/how-to-read-logs.md | 10 +++++----- docs/using-tendermint.md | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/abci-cli.md b/docs/abci-cli.md index 14095d162..e6ae00376 100644 --- a/docs/abci-cli.md +++ b/docs/abci-cli.md @@ -125,7 +125,7 @@ response. The server may be generic for a particular language, and we provide a [reference implementation in Golang](https://github.com/tendermint/abci/tree/master/server). See the -[list of other ABCI implementations](./ecosystem.html) for servers in +[list of other ABCI implementations](./ecosystem.md) for servers in other languages. The handler is specific to the application, and may be arbitrary, so @@ -324,6 +324,6 @@ connects to the app using three separate connections, each with its own pattern of messages. For more information, see the [application developers -guide](./app-development.html). For examples of running an ABCI app with -Tendermint, see the [getting started guide](./getting-started.html). +guide](./app-development.md). For examples of running an ABCI app with +Tendermint, see the [getting started guide](./getting-started.md). Next is the ABCI specification. diff --git a/docs/deploy-testnets.md b/docs/deploy-testnets.md index 0c74b2c5e..e5b300836 100644 --- a/docs/deploy-testnets.md +++ b/docs/deploy-testnets.md @@ -35,7 +35,7 @@ Here are the steps to setting up a testnet manually: After a few seconds, all the nodes should connect to each other and start making blocks! For more information, see the Tendermint Networks -section of [the guide to using Tendermint](using-tendermint.html). +section of [the guide to using Tendermint](./using-tendermint.md). But wait! Steps 3 and 4 are quite manual. Instead, use [this script](https://github.com/tendermint/tendermint/blob/develop/docs/examples/init_testnet.sh), diff --git a/docs/getting-started.md b/docs/getting-started.md index 9767dae5a..aa42a7067 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -7,7 +7,7 @@ application you want to run. So, to run a complete blockchain that does something useful, you must start two programs: one is Tendermint Core, the other is your application, which can be written in any programming language. Recall from [the intro to -ABCI](introduction.html#ABCI-Overview) that Tendermint Core handles all +ABCI](./introduction.md#ABCI-Overview) that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the application when they need to be validated, or when they're ready to be committed to a block. @@ -62,7 +62,7 @@ Tendermint before, use: If you have used Tendermint, you may want to reset the data for a new blockchain by running `tendermint unsafe_reset_all`. Then you can run `tendermint node` to start Tendermint, and connect to the app. For more -details, see [the guide on using Tendermint](./using-tendermint.html). +details, see [the guide on using Tendermint](./using-tendermint.md). You should see Tendermint making blocks! We can get the status of our Tendermint node as follows: @@ -228,7 +228,7 @@ But if we send a `1`, it works again: } For more details on the `broadcast_tx` API, see [the guide on using -Tendermint](./using-tendermint.html). +Tendermint](./using-tendermint.md). ## CounterJS - Example in Another Language diff --git a/docs/how-to-read-logs.md b/docs/how-to-read-logs.md index 92f563cff..db808aca7 100644 --- a/docs/how-to-read-logs.md +++ b/docs/how-to-read-logs.md @@ -53,8 +53,8 @@ Next follows a standard block creation cycle, where we enter a new round, propose a block, receive more than 2/3 of prevotes, then precommits and finally have a chance to commit a block. For details, please refer to [Consensus -Overview](introduction.html#consensus-overview) or [Byzantine Consensus -Algorithm](specification.html). +Overview](./introduction.md#consensus-overview) or [Byzantine Consensus +Algorithm](./spec/consensus). I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus @@ -100,7 +100,7 @@ Algorithm](specification.html). Here is the list of modules you may encounter in Tendermint's log and a little overview what they do. -- `abci-client` As mentioned in [Application Development Guide](app-development.md#abci-design), Tendermint acts as an ABCI +- `abci-client` As mentioned in [Application Development Guide](./app-development.md), Tendermint acts as an ABCI client with respect to the application and maintains 3 connections: mempool, consensus and query. The code used by Tendermint Core can be found [here](https://github.com/tendermint/abci/tree/master/client). @@ -115,13 +115,13 @@ little overview what they do. found [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). You can subscribe to them by calling `subscribe` RPC method. Refer - to [RPC docs](specification/rpc.html) for additional information. + to [RPC docs](./specification/rpc.md) for additional information. - `mempool` Mempool module handles all incoming transactions, whenever they are coming from peers or the application. - `p2p` Provides an abstraction around peer-to-peer communication. For more details, please check out the [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). -- `rpc` [Tendermint's RPC](specification/rpc.html). +- `rpc` [Tendermint's RPC](./specification/rpc.md). - `rpc-server` RPC server. For implementation details, please read the [README](https://github.com/tendermint/tendermint/blob/master/rpc/lib/README.md). - `state` Represents the latest state and execution submodule, which diff --git a/docs/using-tendermint.md b/docs/using-tendermint.md index e645b48bf..cd9545ef3 100644 --- a/docs/using-tendermint.md +++ b/docs/using-tendermint.md @@ -130,7 +130,7 @@ new blockchain will not make any blocks. ## Configuration Tendermint uses a `config.toml` for configuration. For details, see [the -config specification](./specification/configuration.html). +config specification](./specification/configuration.md). Notable options include the socket address of the application (`proxy_app`), the listening address of the Tendermint peer @@ -261,7 +261,7 @@ but must be positive, thus the range is: 0 through 9223372036854775807. Because of how the current proposer selection algorithm works, we do not recommend having voting powers greater than 10\^12 (ie. 1 trillion) (see [Proposals section of Byzantine Consensus -Algorithm](./specification/byzantine-consensus-algorithm.html#proposals) +Algorithm](./specification/byzantine-consensus-algorithm.md#proposals) for details). If we want to add more nodes to the network, we have two choices: we can @@ -399,7 +399,7 @@ failing, you need at least four validator nodes (e.g., 2/3). Updating validators in a live network is supported but must be explicitly programmed by the application developer. See the [application -developers guide](./app-development.html) for more details. +developers guide](./app-development.md) for more details. ### Local Network From cb08d28d401f04093a0ac6cda8cb0a67a7258d36 Mon Sep 17 00:00:00 2001 From: Jeremiah Andrews Date: Sun, 24 Jun 2018 19:37:32 -0700 Subject: [PATCH 465/515] Reverse iterators (#224) with passing tests --- Makefile | 2 +- db/backend_test.go | 64 ++++++++++++++++++++++++++++++++++++ db/c_level_db.go | 45 +++++++++++++++++++------ db/fsdb.go | 18 +++++++--- db/go_level_db.go | 44 +++++++++++++++++++++---- db/remotedb/remotedb_test.go | 2 +- 6 files changed, 151 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index 3c79e6803..a55bc139c 100644 --- a/Makefile +++ b/Makefile @@ -79,7 +79,7 @@ clean_certs: rm -f db/remotedb/::.crt db/remotedb/::.key test: gen_certs - go test -tags gcc $(shell go list ./... | grep -v vendor) + GOCACHE=off go test -tags gcc $(shell go list ./... | grep -v vendor) make clean_certs test100: diff --git a/db/backend_test.go b/db/backend_test.go index c407b214f..d451b7c59 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -149,3 +149,67 @@ func TestGoLevelDBBackend(t *testing.T) { _, ok := db.(*GoLevelDB) assert.True(t, ok) } + +func TestDBIterator(t *testing.T) { + for dbType := range backends { + t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { + testDBIterator(t, dbType) + }) + } +} + +func testDBIterator(t *testing.T, backend DBBackendType) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, backend, "") + defer cleanupDBDir("", name) + + for i := 0; i < 10; i++ { + if i != 6 { // but skip 6. + db.Set(int642Bytes(int64(i)), nil) + } + } + + verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") + verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") + + verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0") + verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64(nil), "reverse iterator 10") + + verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") + verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") + verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10") + verifyIterator(t, db.ReverseIterator(int642Bytes(9), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9") + verifyIterator(t, db.ReverseIterator(int642Bytes(8), nil), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8") + + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6") + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7") + verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8") + verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7") + verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8") + verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8") + + verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(4)), []int64{5}, "reverse iterator from 5 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(4)), []int64{5}, "reverse iterator from 6 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(4)), []int64{7, 5}, "reverse iterator from 7 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(5)), []int64(nil), "reverse iterator from 6 to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(5)), []int64{7}, "reverse iterator from 7 to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(6)), []int64{7}, "reverse iterator from 7 to 6") + + verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1") + verifyIterator(t, db.ReverseIterator(int642Bytes(9), int642Bytes(8)), []int64{9}, "reverse iterator from 9 to 8") + + verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4") + verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2") + verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64{4, 3}, "reverse iterator from 4 to 2") + verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64(nil), "reverse iterator from 2 to 4") + +} + +func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { + var list []int64 + for itr.Valid() { + list = append(list, bytes2Int64(itr.Key())) + itr.Next() + } + assert.Equal(t, expected, list, msg) +} diff --git a/db/c_level_db.go b/db/c_level_db.go index e3e6c1d5d..307461261 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -190,7 +190,8 @@ func (db *CLevelDB) Iterator(start, end []byte) Iterator { } func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { - panic("not implemented yet") // XXX + itr := db.db.NewIterator(db.ro) + return newCLevelDBIterator(itr, start, end, true) } var _ Iterator = (*cLevelDBIterator)(nil) @@ -204,12 +205,25 @@ type cLevelDBIterator struct { func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { if isReverse { - panic("not implemented yet") // XXX - } - if start != nil { - source.Seek(start) + if start == nil { + source.SeekToLast() + } else { + source.Seek(start) + if source.Valid() { + soakey := source.Key() // start or after key + if bytes.Compare(start, soakey) < 0 { + source.Prev() + } + } else { + source.SeekToLast() + } + } } else { - source.SeekToFirst() + if start == nil { + source.SeekToFirst() + } else { + source.Seek(start) + } } return &cLevelDBIterator{ source: source, @@ -243,9 +257,16 @@ func (itr cLevelDBIterator) Valid() bool { // If key is end or past it, invalid. var end = itr.end var key = itr.source.Key() - if end != nil && bytes.Compare(end, key) <= 0 { - itr.isInvalid = true - return false + if itr.isReverse { + if end != nil && bytes.Compare(key, end) <= 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } } // It's valid. @@ -267,7 +288,11 @@ func (itr cLevelDBIterator) Value() []byte { func (itr cLevelDBIterator) Next() { itr.assertNoError() itr.assertIsValid() - itr.source.Next() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } } func (itr cLevelDBIterator) Close() { diff --git a/db/fsdb.go b/db/fsdb.go index 578c1785a..b5711ba38 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -151,21 +151,29 @@ func (db *FSDB) Mutex() *sync.Mutex { } func (db *FSDB) Iterator(start, end []byte) Iterator { + return db.MakeIterator(start, end, false) +} + +func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator { db.mtx.Lock() defer db.mtx.Unlock() // We need a copy of all of the keys. // Not the best, but probably not a bottleneck depending. - keys, err := list(db.dir, start, end) + keys, err := list(db.dir, start, end, isReversed) if err != nil { panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) } - sort.Strings(keys) + if isReversed { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + } else { + sort.Strings(keys) + } return newMemDBIterator(db, keys, start, end) } func (db *FSDB) ReverseIterator(start, end []byte) Iterator { - panic("not implemented yet") // XXX + return db.MakeIterator(start, end, true) } func (db *FSDB) nameToPath(name []byte) string { @@ -213,7 +221,7 @@ func remove(path string) error { // List keys in a directory, stripping of escape sequences and dir portions. // CONTRACT: returns os errors directly without wrapping. -func list(dirPath string, start, end []byte) ([]string, error) { +func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) { dir, err := os.Open(dirPath) if err != nil { return nil, err @@ -231,7 +239,7 @@ func list(dirPath string, start, end []byte) ([]string, error) { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } key := unescapeKey([]byte(n)) - if IsKeyInDomain(key, start, end, false) { + if IsKeyInDomain(key, start, end, isReversed) { keys = append(keys, string(key)) } } diff --git a/db/go_level_db.go b/db/go_level_db.go index 9ff162e38..eca8a07ff 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -193,7 +193,8 @@ func (db *GoLevelDB) Iterator(start, end []byte) Iterator { // Implements DB. func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { - panic("not implemented yet") // XXX + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, true) } type goLevelDBIterator struct { @@ -208,9 +209,26 @@ var _ Iterator = (*goLevelDBIterator)(nil) func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { if isReverse { - panic("not implemented yet") // XXX + if start == nil { + source.Last() + } else { + valid := source.Seek(start) + if valid { + soakey := source.Key() // start or after key + if bytes.Compare(start, soakey) < 0 { + source.Prev() + } + } else { + source.Last() + } + } + } else { + if start == nil { + source.First() + } else { + source.Seek(start) + } } - source.Seek(start) return &goLevelDBIterator{ source: source, start: start, @@ -245,9 +263,17 @@ func (itr *goLevelDBIterator) Valid() bool { // If key is end or past it, invalid. var end = itr.end var key = itr.source.Key() - if end != nil && bytes.Compare(end, key) <= 0 { - itr.isInvalid = true - return false + + if itr.isReverse { + if end != nil && bytes.Compare(key, end) <= 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } } // Valid @@ -276,7 +302,11 @@ func (itr *goLevelDBIterator) Value() []byte { func (itr *goLevelDBIterator) Next() { itr.assertNoError() itr.assertIsValid() - itr.source.Next() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } } // Implements Iterator. diff --git a/db/remotedb/remotedb_test.go b/db/remotedb/remotedb_test.go index b126a9012..3cf698a65 100644 --- a/db/remotedb/remotedb_test.go +++ b/db/remotedb/remotedb_test.go @@ -38,7 +38,7 @@ func TestRemoteDB(t *testing.T) { k1 := []byte("key-1") v1 := client.Get(k1) - require.Equal(t, 0, len(v1), "expecting no key1 to have been stored") + require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1) vv1 := []byte("value-1") client.Set(k1, vv1) gv1 := client.Get(k1) From 49596e0a1f48866603813df843c9409fc19805c6 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 24 Jun 2018 19:40:24 -0700 Subject: [PATCH 466/515] Bump version to 0.9.0 --- CHANGELOG.md | 2 +- version/version.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e017cc561..0f900c57f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## 0.9.0 -*TBD* +*June 24th, 2018* BREAKING: - [events, pubsub] Removed - moved to github.com/tendermint/tendermint diff --git a/version/version.go b/version/version.go index cfbed74e7..6e73a937d 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.9.0-dev" +const Version = "0.9.0" From 516b3399f3f319dee9c614c0773e2b79072180e3 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Mon, 25 Jun 2018 10:14:32 -0400 Subject: [PATCH 467/515] docs: update js-abci example closes https://github.com/tendermint/js-abci/issues/21 --- docs/getting-started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting-started.md b/docs/getting-started.md index aa42a7067..b2c9de21e 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -247,7 +247,7 @@ devs, we keep all our code under the `$GOPATH`, so run: Kill the previous `counter` and `tendermint` processes. Now run the app: - node example/app.js + node example/counter.js In another window, reset and start `tendermint`: From c6626f94decd248156cdf74fbaa81f4322db019d Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Mon, 25 Jun 2018 10:33:18 -0400 Subject: [PATCH 468/515] the last RTD version is archived --- docs/index.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index e308ae5f8..b3158c219 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -9,3 +9,7 @@ Welcome to Tendermint! This location for our documentation has been deprecated, please see: - https://tendermint.com/docs + +The last version built by Read The Docs will still be available at: + +- https://tendermint.readthedocs.io/projects/tools/en/v0.21.0/ From 9f656e1239501724db4e7eaf80925305caf437aa Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 26 Jun 2018 11:21:25 +0400 Subject: [PATCH 469/515] add a link to full node guide Refs https://github.com/tendermint/tendermint/pull/1800/files#r197919542 --- docs/README.md | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/docs/README.md b/docs/README.md index 9dbe91781..0ed3aaec9 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,17 +1,28 @@ # Tendermint -Welcome to the Tendermint Core documentation! The introduction below provides an overview to help you navigate to your area of interest. +Welcome to the Tendermint Core documentation! The introduction below provides +an overview to help you navigate to your area of interest. ## Introduction -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. In other words, a blockchain. +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine - written in any programming language - and securely +replicates it on many machines. In other words, a blockchain. -Tendermint requires an application running over the Application Blockchain Interface (ABCI) - and comes packaged with an example application to do so. Follow the [installation instructions](./install) to get up and running quickly. For more details on [using tendermint](./using-tendermint) see that and the following sections. +Tendermint requires an application running over the Application Blockchain +Interface (ABCI) - and comes packaged with an example application to do so. +Follow the [installation instructions](./install) to get up and running +quickly. For more details on [using tendermint](./using-tendermint) see that +and the following sections. ## Networks -Testnets can be setup manually on one or more machines, or automatically on one or more machine, using a variety of methods described in the [deploy testnets section](./deploy-testnets). For more information (and to join) about the Cosmos Network testnets, see [here](link). +Testnets can be setup manually on one or more machines, or automatically on one +or more machine, using a variety of methods described in the [deploy testnets +section](./deploy-testnets). For more information (and to join) about the +Cosmos Network testnets, see [here](/getting-started/full-node.md). ## Application Development -The first step to building application on Tendermint is to [install ABCI-CLI](./getting-started) and play with the example applications. +The first step to building application on Tendermint is to [install +ABCI-CLI](./getting-started) and play with the example applications. From e6abdb8b9d86b53f803fa629594d84038babc028 Mon Sep 17 00:00:00 2001 From: Ricardo Domingos Date: Wed, 27 Jun 2018 06:37:30 +0200 Subject: [PATCH 470/515] p2p/trust: Fix nil pointer error on TrustMetric Copy() (#1819) --- p2p/trust/metric.go | 5 +++-- p2p/trust/metric_test.go | 8 ++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go index 5770b4208..47c0ca74d 100644 --- a/p2p/trust/metric.go +++ b/p2p/trust/metric.go @@ -256,12 +256,13 @@ func (tm *TrustMetric) SetTicker(ticker MetricTicker) { // Copy returns a new trust metric with members containing the same values func (tm *TrustMetric) Copy() *TrustMetric { - tm.mtx.Lock() - defer tm.mtx.Unlock() if tm == nil { return nil } + tm.mtx.Lock() + defer tm.mtx.Unlock() + return &TrustMetric{ proportionalWeight: tm.proportionalWeight, integralWeight: tm.integralWeight, diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go index 98ea99ab4..f690ce557 100644 --- a/p2p/trust/metric_test.go +++ b/p2p/trust/metric_test.go @@ -56,6 +56,14 @@ func TestTrustMetricConfig(t *testing.T) { tm.Wait() } +func TestTrustMetricCopyNilPointer(t *testing.T) { + var tm *TrustMetric + + ctm := tm.Copy() + + assert.Nil(t, ctm) +} + // XXX: This test fails non-deterministically func _TestTrustMetricStopPause(t *testing.T) { // The TestTicker will provide manual control over From 231812c875fabcb47bb7c09257eec7e75b7f0969 Mon Sep 17 00:00:00 2001 From: Max Levy <35595512+maxim-levy@users.noreply.github.com> Date: Wed, 27 Jun 2018 15:54:01 +0900 Subject: [PATCH 471/515] proper link to a docs module (#1822) Fixed rst -> md --- networks/remote/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/networks/remote/README.md b/networks/remote/README.md index 090f6da16..2094fcc98 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](/docs/terraform-and-ansible.rst) +See the [docs](/docs/terraform-and-ansible.md) From f0e5332b1f3088ec0b133bafe0dde868720f8173 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Wed, 27 Jun 2018 13:03:47 +0200 Subject: [PATCH 472/515] Feature/support https rpc client (#1816) * Implement support for https in rpc client * Update changelog * update comment * support wss protocol * fixed changelog * if -> switch * fix lint * protocol constants introduced --- CHANGELOG.md | 6 ++++++ rpc/lib/client/http_client.go | 34 +++++++++++++++++++++++++--------- rpc/lib/client/ws_client.go | 13 +++++++++++-- 3 files changed, 42 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ffc95fac..e7e595227 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,12 @@ BUG FIXES: (`rpc.grpc_max_open_connections`). Check out [Running In Production](https://tendermint.readthedocs.io/en/master/running-in-production.html) guide if you want to increase them. + +## 0.21.2 + +IMPROVEMENT + +- [rpc/client] Supports https and wss now ## 0.21.0 diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index e26d8f274..bd440289b 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -17,6 +17,14 @@ import ( types "github.com/tendermint/tendermint/rpc/lib/types" ) +const ( + protoHTTP = "http" + protoHTTPS = "https" + protoWSS = "wss" + protoWS = "ws" + protoTCP = "tcp" +) + // HTTPClient is a common interface for JSONRPCClient and URIClient. type HTTPClient interface { Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) @@ -25,29 +33,37 @@ type HTTPClient interface { } // TODO: Deprecate support for IP:PORT or /path/to/socket -func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn, error)) { +func makeHTTPDialer(remoteAddr string) (string, string, func(string, string) (net.Conn, error)) { + // protocol to use for http operations, to support both http and https + clientProtocol := protoHTTP + parts := strings.SplitN(remoteAddr, "://", 2) var protocol, address string if len(parts) == 1 { // default to tcp if nothing specified - protocol, address = "tcp", remoteAddr + protocol, address = protoTCP, remoteAddr } else if len(parts) == 2 { protocol, address = parts[0], parts[1] } else { // return a invalid message msg := fmt.Sprintf("Invalid addr: %s", remoteAddr) - return msg, func(_ string, _ string) (net.Conn, error) { + return clientProtocol, msg, func(_ string, _ string) (net.Conn, error) { return nil, errors.New(msg) } } - // accept http as an alias for tcp - if protocol == "http" { - protocol = "tcp" + + // accept http as an alias for tcp and set the client protocol + switch protocol { + case protoHTTP, protoHTTPS: + clientProtocol = protocol + protocol = protoTCP + case protoWS, protoWSS: + clientProtocol = protocol } // replace / with . for http requests (kvstore domain) trimmedAddress := strings.Replace(address, "/", ".", -1) - return trimmedAddress, func(proto, addr string) (net.Conn, error) { + return clientProtocol, trimmedAddress, func(proto, addr string) (net.Conn, error) { return net.Dial(protocol, address) } } @@ -55,8 +71,8 @@ func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn, // We overwrite the http.Client.Dial so we can do http over tcp or unix. // remoteAddr should be fully featured (eg. with tcp:// or unix://) func makeHTTPClient(remoteAddr string) (string, *http.Client) { - address, dialer := makeHTTPDialer(remoteAddr) - return "http://" + address, &http.Client{ + protocol, address, dialer := makeHTTPDialer(remoteAddr) + return protocol + "://" + address, &http.Client{ Transport: &http.Transport{ Dial: dialer, }, diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index a95ce17d2..6928dff36 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -70,13 +70,21 @@ type WSClient struct { // Send pings to server with this period. Must be less than readWait. If 0, no pings will be sent. pingPeriod time.Duration + + // Support both ws and wss protocols + protocol string } // NewWSClient returns a new client. See the commentary on the func(*WSClient) // functions for a detailed description of how to configure ping period and // pong wait time. The endpoint argument must begin with a `/`. func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSClient { - addr, dialer := makeHTTPDialer(remoteAddr) + protocol, addr, dialer := makeHTTPDialer(remoteAddr) + // default to ws protocol, unless wss is explicitly specified + if protocol != "wss" { + protocol = "ws" + } + c := &WSClient{ cdc: amino.NewCodec(), Address: addr, @@ -88,6 +96,7 @@ func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSCli readWait: defaultReadWait, writeWait: defaultWriteWait, pingPeriod: defaultPingPeriod, + protocol: protocol, } c.BaseService = *cmn.NewBaseService(nil, "WSClient", c) for _, option := range options { @@ -242,7 +251,7 @@ func (c *WSClient) dial() error { Proxy: http.ProxyFromEnvironment, } rHeader := http.Header{} - conn, _, err := dialer.Dial("ws://"+c.Address+c.Endpoint, rHeader) + conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) if err != nil { return err } From fa3bd05d44729806d3a073dce87f86f18b1d9ac6 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 22 Jun 2018 15:06:43 -0400 Subject: [PATCH 473/515] mempool: log hashes, not whole tx --- mempool/mempool.go | 9 +++++++-- mempool/reactor.go | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index 8c9e41d59..1d12108d9 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -57,6 +57,11 @@ var ( ErrMempoolIsFull = errors.New("Mempool is full") ) +// TxID is the hex encoded hash of the bytes as a types.Tx. +func TxID(tx []byte) string { + return fmt.Sprintf("%X", types.Tx(tx).Hash()) +} + // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus // round. Transaction validity is checked using the CheckTx abci message before the transaction is // added to the pool. The Mempool uses a concurrent list structure for storing transactions that @@ -288,11 +293,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { tx: tx, } mem.txs.PushBack(memTx) - mem.logger.Info("Added good transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) + mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r) mem.notifyTxsAvailable() } else { // ignore bad transaction - mem.logger.Info("Rejected bad transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) + mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r) // remove from cache (it might be good later) mem.cache.Remove(tx) diff --git a/mempool/reactor.go b/mempool/reactor.go index d6cebfbf3..fab9480ba 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -90,7 +90,7 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *TxMessage: err := memR.Mempool.CheckTx(msg.Tx, nil) if err != nil { - memR.Logger.Info("Could not check tx", "tx", msg.Tx, "err", err) + memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err) } // broadcasting happens from go routines per peer default: From 70d314312c8a71601c959d5671c914dd143372b8 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 22 Jun 2018 15:08:39 -0400 Subject: [PATCH 474/515] consensus: fix addProposalBlockPart * When create_empty_blocks=false, we don't enterPropose until we * receive a transaction, but if we then receive a complete proposal, * we should enterPrevote. A guard in addProposalBlockPart was checking if * step==Propose before calling enterPrevote, but we need it to be step<=Propose, * since we may not have seen a tx. * This was discovered by disabling mempool broadcast, sending txs to * peers one a time, and observing their consensus logs. --- consensus/state.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 5d6842a81..386501c8d 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -624,7 +624,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { err = cs.setProposal(msg.Proposal) case *BlockPartMessage: // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - _, err = cs.addProposalBlockPart(msg.Height, msg.Part) + _, err = cs.addProposalBlockPart(msg, peerID) if err != nil && msg.Round != cs.Round { cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round) err = nil @@ -1399,17 +1399,22 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { // NOTE: block is not necessarily valid. // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block. -func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) { +func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) { + height, round, part := msg.Height, msg.Round, msg.Part + // Blocks might be reused, so round mismatch is OK if cs.Height != height { - cs.Logger.Debug("Received block part from wrong height", "height", height) + cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round) return false, nil } // We're not expecting a block part. if cs.ProposalBlockParts == nil { - cs.Logger.Info("Received a block part when we're not expecting any", "height", height) - return false, nil // TODO: bad peer? Return error? + // NOTE: this can happen when we've gone to a higher round and + // then receive parts from the previous round - not necessarily a bad peer. + cs.Logger.Info("Received a block part when we're not expecting any", + "height", height, "round", round, "index", part.Index, "peer", peerID) + return false, nil } added, err = cs.ProposalBlockParts.AddPart(part) @@ -1443,7 +1448,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) ( // procedure at this point. } - if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() { + if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { // Move onto the next step cs.enterPrevote(height, cs.Round) } else if cs.Step == cstypes.RoundStepCommit { From aa20c45ae99bb57a8efa2be7b09898e8967d1965 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 26 Jun 2018 20:13:32 +0400 Subject: [PATCH 475/515] log total when adding new txs to the mempool --- mempool/mempool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index 1d12108d9..4ae179c97 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -293,7 +293,7 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { tx: tx, } mem.txs.PushBack(memTx) - mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r) + mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "total", mem.Size()) mem.notifyTxsAvailable() } else { // ignore bad transaction From b1d6deaf0b1d10d72ca953ecf88318106d425249 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Thu, 28 Jun 2018 00:09:39 -0700 Subject: [PATCH 476/515] config: rename skip_upnp to upnp (#1827) * config: rename skip_upnp to upnp Change default option to enable upnp. Closes #1806 * doc updates - fix comment and set UPNP to false in TestP2PConfig - add UPNP to config template - update changelog --- CHANGELOG.md | 9 ++++----- cmd/tendermint/commands/run_node.go | 2 +- config/config.go | 6 +++--- config/toml.go | 3 +++ docs/specification/configuration.md | 3 +++ node/node.go | 4 ++-- p2p/listener.go | 6 +++--- p2p/listener_test.go | 2 +- p2p/pex/pex_reactor_test.go | 4 ++-- 9 files changed, 22 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7e595227..76fac1875 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,18 +3,17 @@ ## TBD BUG FIXES: - - [rpc] limited number of HTTP/WebSocket connections (`rpc.max_open_connections`) and gRPC connections (`rpc.grpc_max_open_connections`). Check out [Running In Production](https://tendermint.readthedocs.io/en/master/running-in-production.html) guide if you want to increase them. - -## 0.21.2 -IMPROVEMENT +BREAKING CHANGES: +- [config] Rename `skip_upnp` to `upnp`, and turn it off by default. -- [rpc/client] Supports https and wss now +IMPROVEMENT +- [rpc/client] Supports https and wss now. ## 0.21.0 diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 0d50f9e4b..542e5c991 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -33,7 +33,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers") - cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration") + cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "Enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange") cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode") cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs") diff --git a/config/config.go b/config/config.go index e86b0e871..e081056fa 100644 --- a/config/config.go +++ b/config/config.go @@ -284,8 +284,8 @@ type P2PConfig struct { // Do not add private peers to this list if you don't want them advertised PersistentPeers string `mapstructure:"persistent_peers"` - // Skip UPNP port forwarding - SkipUPNP bool `mapstructure:"skip_upnp"` + // UPNP port forwarding + UPNP bool `mapstructure:"upnp"` // Path to address book AddrBook string `mapstructure:"addr_book_file"` @@ -341,6 +341,7 @@ type P2PConfig struct { func DefaultP2PConfig() *P2PConfig { return &P2PConfig{ ListenAddress: "tcp://0.0.0.0:26656", + UPNP: false, AddrBook: defaultAddrBookPath, AddrBookStrict: true, MaxNumPeers: 50, @@ -363,7 +364,6 @@ func DefaultP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://0.0.0.0:36656" - cfg.SkipUPNP = true cfg.FlushThrottleTimeout = 10 cfg.AllowDuplicateIP = true return cfg diff --git a/config/toml.go b/config/toml.go index c0840e440..4d892339a 100644 --- a/config/toml.go +++ b/config/toml.go @@ -149,6 +149,9 @@ seeds = "{{ .P2P.Seeds }}" # Do not add private peers to this list if you don't want them advertised persistent_peers = "{{ .P2P.PersistentPeers }}" +# UPNP port forwarding +upnp = {{ .P2P.UPNP }} + # Path to address book addr_book_file = "{{ js .P2P.AddrBook }}" diff --git a/docs/specification/configuration.md b/docs/specification/configuration.md index 59de9767b..1cf661200 100644 --- a/docs/specification/configuration.md +++ b/docs/specification/configuration.md @@ -103,6 +103,9 @@ seeds = "" # Do not add private peers to this list if you don't want them advertised persistent_peers = "" +# UPNP port forwarding +upnp = false + # Path to address book addr_book_file = "addrbook.json" diff --git a/node/node.go b/node/node.go index bd3ac4645..6e4f7df04 100644 --- a/node/node.go +++ b/node/node.go @@ -10,8 +10,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" - abci "github.com/tendermint/tendermint/abci/types" amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" @@ -427,7 +427,7 @@ func (n *Node) OnStart() error { // Create & add listener protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress) - l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p")) + l := p2p.NewDefaultListener(protocol, address, n.config.P2P.UPNP, n.Logger.With("module", "p2p")) n.sw.AddListener(l) // Generate node PrivKey diff --git a/p2p/listener.go b/p2p/listener.go index e698765cd..3d2fae741 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -47,8 +47,8 @@ func splitHostPort(addr string) (host string, port int) { return host, port } -// skipUPNP: If true, does not try getUPNPExternalAddress() -func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log.Logger) Listener { +// UPNP: If false, does not try getUPNPExternalAddress() +func NewDefaultListener(protocol string, lAddr string, UPNP bool, logger log.Logger) Listener { // Local listen IP & port lAddrIP, lAddrPort := splitHostPort(lAddr) @@ -79,7 +79,7 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log // Determine external address... var extAddr *NetAddress - if !skipUPNP { + if UPNP { // If the lAddrIP is INADDR_ANY, try UPnP if lAddrIP == "" || lAddrIP == "0.0.0.0" { extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) diff --git a/p2p/listener_test.go b/p2p/listener_test.go index 92018e0aa..1aa0a93a8 100644 --- a/p2p/listener_test.go +++ b/p2p/listener_test.go @@ -9,7 +9,7 @@ import ( func TestListener(t *testing.T) { // Create a listener - l := NewDefaultListener("tcp", ":8001", true, log.TestingLogger()) + l := NewDefaultListener("tcp", ":8001", false, log.TestingLogger()) // Dial the listener lAddr := l.ExternalAddress() diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index f4251e869..e8231c180 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -109,7 +109,7 @@ func TestPEXReactorRunning(t *testing.T) { addOtherNodeAddrToAddrBook(2, 1) for i, sw := range switches { - sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, true, logger.With("pex", i))) + sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, false, logger.With("pex", i))) err := sw.Start() // start switch and reactors require.Nil(t, err) @@ -232,7 +232,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { p2p.NewDefaultListener( "tcp", seed.NodeInfo().ListenAddr, - true, + false, log.TestingLogger(), ), ) From 69356a60b5cb58fb711944710ccf8863faccdcbb Mon Sep 17 00:00:00 2001 From: Max Levy <35595512+maxim-levy@users.noreply.github.com> Date: Thu, 28 Jun 2018 16:28:41 +0900 Subject: [PATCH 477/515] Broken link to docs/terraform-and-ansible corrected (#1823) Fixed rst -> md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c31c44dbe..daba4f59a 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ See the [install instructions](/docs/install.rst) - [Single node](/docs/using-tendermint.rst) - [Local cluster using docker-compose](/networks/local) -- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.rst) +- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.md) - [Join the public testnet](https://cosmos.network/testnet) ## Resources From e556e3336e9ccae1af349001afada6bd4de1ff78 Mon Sep 17 00:00:00 2001 From: kph7 <40610313+kph7@users.noreply.github.com> Date: Thu, 28 Jun 2018 00:29:27 -0700 Subject: [PATCH 478/515] Adding IPv6 support to peering (#1818) --- node/node.go | 2 +- p2p/listener.go | 35 ++++++++++++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/node/node.go b/node/node.go index 6e4f7df04..8accc7666 100644 --- a/node/node.go +++ b/node/node.go @@ -696,7 +696,7 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { } p2pListener := n.sw.Listeners()[0] - p2pHost := p2pListener.ExternalAddress().IP.String() + p2pHost := p2pListener.ExternalAddressToString() p2pPort := p2pListener.ExternalAddress().Port nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) diff --git a/p2p/listener.go b/p2p/listener.go index 3d2fae741..35ef03712 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -4,6 +4,7 @@ import ( "fmt" "net" "strconv" + "strings" "time" "github.com/tendermint/tendermint/p2p/upnp" @@ -15,6 +16,7 @@ type Listener interface { Connections() <-chan net.Conn InternalAddress() *NetAddress ExternalAddress() *NetAddress + ExternalAddressToString() string String() string Stop() error } @@ -159,6 +161,15 @@ func (l *DefaultListener) ExternalAddress() *NetAddress { return l.extAddr } +func (l *DefaultListener) ExternalAddressToString() string { + ip := l.ExternalAddress().IP + if isIpv6(ip) { + // Means it's ipv6, so format it with brackets + return "[" + ip.String() + "]" + } + return ip.String() +} + // NOTE: The returned listener is already Accept()'ing. // So it's not suitable to pass into http.Serve(). func (l *DefaultListener) NetListener() net.Listener { @@ -201,6 +212,18 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) * return NewNetAddressIPPort(ext, uint16(externalPort)) } +func isIpv6(ip net.IP) bool { + v4 := ip.To4() + if v4 != nil { + return false + } + + ipString := ip.String() + + // Extra check just to be sure it's IPv6 + return (strings.Contains(ipString, ":") && !strings.Contains(ipString, ".")) +} + // TODO: use syscalls: see issue #712 func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress { addrs, err := net.InterfaceAddrs() @@ -213,10 +236,16 @@ func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) * if !ok { continue } - v4 := ipnet.IP.To4() - if v4 == nil || (!settleForLocal && v4[0] == 127) { + if !isIpv6(ipnet.IP) { + v4 := ipnet.IP.To4() + if v4 == nil || (!settleForLocal && v4[0] == 127) { + // loopback + continue + } + } else if !settleForLocal && ipnet.IP.IsLoopback() { + // IPv6, check for loopback continue - } // loopback + } return NewNetAddressIPPort(ipnet.IP, uint16(port)) } From e26d6ed448ba82b4340fc33fcccdda2b2d5402e0 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Fri, 29 Jun 2018 00:01:11 -0700 Subject: [PATCH 479/515] Update ISSUE_TEMPLATE to mention using pastebin (#1832) * Update ISSUE_TEMPLATE to mention using pastebin closes #1826 * Update ISSUE_TEMPLATE --- .github/ISSUE_TEMPLATE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE index e0edb29ff..ed3caac5f 100644 --- a/.github/ISSUE_TEMPLATE +++ b/.github/ISSUE_TEMPLATE @@ -33,7 +33,7 @@ in a case of bug. **How to reproduce it** (as minimally and precisely as possible): -**Logs (you can paste a part showing an error or attach the whole file)**: +**Logs (you can paste a small part showing an error or link a pastebin, gist, etc. containing more of the log file)**: **Config (you can paste only the changes you've made)**: From 41733b46b9d02e30489cacd87f08505d1d45d133 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Fri, 29 Jun 2018 00:08:01 -0700 Subject: [PATCH 480/515] crypto: Abstract pubkey / signature size when known to constants (#1808) * crypto: Abstract pubkey / signature size when known to constants * Created PubKeyEd25519Size as 32 * Created PubkeySecp256k1Size as 33 * Created SignatureEd25519Size as 64 * Remove extraneous message from changelog --- CHANGELOG.md | 3 +++ crypto/pub_key.go | 16 ++++++++++------ crypto/signature.go | 4 +++- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76fac1875..f250986db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ ## TBD +IMPROVEMENTS: + - [crypto] Make public key size into public constants + BUG FIXES: - [rpc] limited number of HTTP/WebSocket connections (`rpc.max_open_connections`) and gRPC connections diff --git a/crypto/pub_key.go b/crypto/pub_key.go index 51ef6c54e..b7f954307 100644 --- a/crypto/pub_key.go +++ b/crypto/pub_key.go @@ -40,8 +40,10 @@ type PubKey interface { var _ PubKey = PubKeyEd25519{} +const PubKeyEd25519Size = 32 + // Implements PubKeyInner -type PubKeyEd25519 [32]byte +type PubKeyEd25519 [PubKeyEd25519Size]byte // Address is the SHA256-20 of the raw pubkey bytes. func (pubKey PubKeyEd25519) Address() Address { @@ -62,15 +64,15 @@ func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool { if !ok { return false } - pubKeyBytes := [32]byte(pubKey) - sigBytes := [64]byte(sig) + pubKeyBytes := [PubKeyEd25519Size]byte(pubKey) + sigBytes := [SignatureEd25519Size]byte(sig) return ed25519.Verify(&pubKeyBytes, msg, &sigBytes) } // For use with golang/crypto/nacl/box // If error, returns nil. -func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte { - keyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey) +func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { + keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey) ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes) if !ok { return nil @@ -94,10 +96,12 @@ func (pubKey PubKeyEd25519) Equals(other PubKey) bool { var _ PubKey = PubKeySecp256k1{} +const PubKeySecp256k1Size = 33 + // Implements PubKey. // Compressed pubkey (just the x-cord), // prefixed with 0x02 or 0x03, depending on the y-cord. -type PubKeySecp256k1 [33]byte +type PubKeySecp256k1 [PubKeySecp256k1Size]byte // Implements Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) func (pubKey PubKeySecp256k1) Address() Address { diff --git a/crypto/signature.go b/crypto/signature.go index 1ffb45ea3..728a2a04d 100644 --- a/crypto/signature.go +++ b/crypto/signature.go @@ -25,8 +25,10 @@ type Signature interface { var _ Signature = SignatureEd25519{} +const SignatureEd25519Size = 64 + // Implements Signature -type SignatureEd25519 [64]byte +type SignatureEd25519 [SignatureEd25519Size]byte func (sig SignatureEd25519) Bytes() []byte { bz, err := cdc.MarshalBinaryBare(sig) From 58acbf5ee32ebb471b5cecb0292e0d4d97bd1fa2 Mon Sep 17 00:00:00 2001 From: Rigel Date: Fri, 29 Jun 2018 03:09:16 -0400 Subject: [PATCH 481/515] contributing guide typos (#1831) * contributing guide typos * fix git remote cmd --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5fd2d982f..3500732f5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ Instead, we use `git remote` to add the fork as a new remote for the original re For instance, to create a fork and work on a branch of it, I would: * Create the fork on github, using the fork button. - * Go to the original repo checked out locally (ie. `$GOPATH/src/github.com/tendermint/tendermint`) + * Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) * `git remote rename origin upstream` * `git remote add origin git@github.com:ebuchman/basecoin.git` @@ -47,7 +47,7 @@ get_vendor_deps`). Even for dependencies under our control, dep helps us to keep multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use dep. -Run `dep status` to get a list of vendored dependencies that may not be +Run `dep status` to get a list of vendor dependencies that may not be up-to-date. ## Vagrant @@ -85,7 +85,7 @@ especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint - the latest state of development is on `develop` - `develop` must never fail `make test` - no --force onto `develop` (except when reverting a broken commit, which should seldom happen) -- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git add origin`) +- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`) - before submitting a pull request, begin `git rebase` on top of `develop` ### Pull Merge Procedure: @@ -110,7 +110,7 @@ especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint - make the required changes - these changes should be small and an absolute necessity - add a note to CHANGELOG.md -- bumb versions +- bump versions - push to hotfix-vX.X.X to run the extended integration tests on the CI - merge hotfix-vX.X.X to master - merge hotfix-vX.X.X to develop From ada5ef0669b691dd15f44c0695df008912d3a462 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 29 Jun 2018 11:48:52 +0400 Subject: [PATCH 482/515] [p2p] add godoc comments to Listener (#1828) * update changelog * document p2p/listener * do not expose underlying net.Listener * add a TODO * replace todo with a comment --- CHANGELOG.md | 1 + node/node.go | 2 +- p2p/listener.go | 34 +++++++++++++++++++++++----------- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f250986db..24ec19d79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ BREAKING CHANGES: IMPROVEMENT - [rpc/client] Supports https and wss now. +- [p2p] Add IPv6 support to peering. ## 0.21.0 diff --git a/node/node.go b/node/node.go index 8accc7666..7e4a986f7 100644 --- a/node/node.go +++ b/node/node.go @@ -696,7 +696,7 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { } p2pListener := n.sw.Listeners()[0] - p2pHost := p2pListener.ExternalAddressToString() + p2pHost := p2pListener.ExternalAddressHost() p2pPort := p2pListener.ExternalAddress().Port nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) diff --git a/p2p/listener.go b/p2p/listener.go index 35ef03712..bf50d2a35 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -12,16 +12,22 @@ import ( "github.com/tendermint/tmlibs/log" ) +// Listener is a network listener for stream-oriented protocols, providing +// convenient methods to get listener's internal and external addresses. +// Clients are supposed to read incoming connections from a channel, returned +// by Connections() method. type Listener interface { Connections() <-chan net.Conn InternalAddress() *NetAddress ExternalAddress() *NetAddress - ExternalAddressToString() string + ExternalAddressHost() string String() string Stop() error } -// Implements Listener +// DefaultListener is a cmn.Service, running net.Listener underneath. +// Optionally, UPnP is used upon calling NewDefaultListener to resolve external +// address. type DefaultListener struct { cmn.BaseService @@ -31,6 +37,8 @@ type DefaultListener struct { connections chan net.Conn } +var _ Listener = (*DefaultListener)(nil) + const ( numBufferedConnections = 10 defaultExternalPort = 8770 @@ -49,7 +57,8 @@ func splitHostPort(addr string) (host string, port int) { return host, port } -// UPNP: If false, does not try getUPNPExternalAddress() +// NewDefaultListener creates a new DefaultListener on lAddr, optionally trying +// to determine external address using UPnP. func NewDefaultListener(protocol string, lAddr string, UPNP bool, logger log.Logger) Listener { // Local listen IP & port lAddrIP, lAddrPort := splitHostPort(lAddr) @@ -109,6 +118,8 @@ func NewDefaultListener(protocol string, lAddr string, UPNP bool, logger log.Log return dl } +// OnStart implements cmn.Service by spinning a goroutine, listening for new +// connections. func (l *DefaultListener) OnStart() error { if err := l.BaseService.OnStart(); err != nil { return err @@ -117,6 +128,7 @@ func (l *DefaultListener) OnStart() error { return nil } +// OnStop implements cmn.Service by closing the listener. func (l *DefaultListener) OnStop() { l.BaseService.OnStop() l.listener.Close() // nolint: errcheck @@ -147,21 +159,27 @@ func (l *DefaultListener) listenRoutine() { } } -// A channel of inbound connections. +// Connections returns a channel of inbound connections. // It gets closed when the listener closes. func (l *DefaultListener) Connections() <-chan net.Conn { return l.connections } +// InternalAddress returns the internal NetAddress (address used for +// listening). func (l *DefaultListener) InternalAddress() *NetAddress { return l.intAddr } +// ExternalAddress returns the external NetAddress (publicly available, +// determined using either UPnP or local resolver). func (l *DefaultListener) ExternalAddress() *NetAddress { return l.extAddr } -func (l *DefaultListener) ExternalAddressToString() string { +// ExternalAddressHost returns the external NetAddress IP string. If an IP is +// IPv6, it's wrapped in brackets ("[2001:db8:1f70::999:de8:7648:6e8]"). +func (l *DefaultListener) ExternalAddressHost() string { ip := l.ExternalAddress().IP if isIpv6(ip) { // Means it's ipv6, so format it with brackets @@ -170,12 +188,6 @@ func (l *DefaultListener) ExternalAddressToString() string { return ip.String() } -// NOTE: The returned listener is already Accept()'ing. -// So it's not suitable to pass into http.Serve(). -func (l *DefaultListener) NetListener() net.Listener { - return l.listener -} - func (l *DefaultListener) String() string { return fmt.Sprintf("Listener(@%v)", l.extAddr) } From ac1243260391b0db7f4acc100253a32735132079 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 27 Jun 2018 11:52:21 +0400 Subject: [PATCH 483/515] [rpc/client/http] set codec on rpc client --- rpc/client/httpclient.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 1414edce3..79967bd0c 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -29,12 +29,13 @@ type HTTP struct { *WSEvents } -// New takes a remote endpoint in the form tcp://: +// NewHTTP takes a remote endpoint in the form tcp://: // and the websocket path (which always seems to be "/websocket") func NewHTTP(remote, wsEndpoint string) *HTTP { rc := rpcclient.NewJSONRPCClient(remote) cdc := rc.Codec() ctypes.RegisterAmino(cdc) + rc.SetCodec(cdc) return &HTTP{ rpc: rc, From 9563927bbd75b38421fd64ec1fbf216c7c74645c Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 27 Jun 2018 11:52:47 +0400 Subject: [PATCH 484/515] print only tx's hash and size when logging blocks Closes #1799 --- types/block.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/types/block.go b/types/block.go index 6adc0c4c4..43856f1f4 100644 --- a/types/block.go +++ b/types/block.go @@ -464,7 +464,7 @@ func (data *Data) StringIndented(indent string) string { txStrings[i] = fmt.Sprintf("... (%v total)", len(data.Txs)) break } - txStrings[i] = fmt.Sprintf("Tx:%v", tx) + txStrings[i] = fmt.Sprintf("%X (%d bytes)", tx.Hash(), len(tx)) } return fmt.Sprintf(`Data{ %s %v @@ -504,7 +504,7 @@ func (data *EvidenceData) StringIndented(indent string) string { } evStrings[i] = fmt.Sprintf("Evidence:%v", ev) } - return fmt.Sprintf(`Data{ + return fmt.Sprintf(`EvidenceData{ %s %v %s}#%v`, indent, strings.Join(evStrings, "\n"+indent+" "), From 2a7602c4ed4eccb42cc7e87928df785828d1fad9 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 27 Jun 2018 14:12:52 +0400 Subject: [PATCH 485/515] [tendermint init] add default consensus params to genesis --- cmd/tendermint/commands/init.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index 45812b9ed..ad39cd20b 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -52,8 +52,9 @@ func initFilesWithConfig(config *cfg.Config) error { logger.Info("Found genesis file", "path", genFile) } else { genDoc := types.GenesisDoc{ - ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), - GenesisTime: time.Now(), + ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), + GenesisTime: time.Now(), + ConsensusParams: types.DefaultConsensusParams(), } genDoc.Validators = []types.GenesisValidator{{ PubKey: pv.GetPubKey(), From f760c24ff0df7cd150ca1e10a16cdc70110fac79 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 27 Jun 2018 14:14:48 +0400 Subject: [PATCH 486/515] [config] remove MaxBlockSizeTxs and MaxBlockSizeBytes in favor of consensus params --- config/config.go | 6 ------ config/toml.go | 4 ---- consensus/state.go | 2 +- docs/specification/configuration.md | 4 ---- 4 files changed, 1 insertion(+), 15 deletions(-) diff --git a/config/config.go b/config/config.go index e081056fa..f5361c22b 100644 --- a/config/config.go +++ b/config/config.go @@ -454,10 +454,6 @@ type ConsensusConfig struct { // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` - // BlockSize - MaxBlockSizeTxs int `mapstructure:"max_block_size_txs"` - MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"` - // EmptyBlocks mode and possible interval between empty blocks in seconds CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` @@ -479,8 +475,6 @@ func DefaultConsensusConfig() *ConsensusConfig { TimeoutPrecommitDelta: 500, TimeoutCommit: 1000, SkipTimeoutCommit: false, - MaxBlockSizeTxs: 10000, - MaxBlockSizeBytes: 1, // TODO CreateEmptyBlocks: true, CreateEmptyBlocksInterval: 0, PeerGossipSleepDuration: 100, diff --git a/config/toml.go b/config/toml.go index 4d892339a..b3745d3c8 100644 --- a/config/toml.go +++ b/config/toml.go @@ -217,10 +217,6 @@ timeout_commit = {{ .Consensus.TimeoutCommit }} # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} -# BlockSize -max_block_size_txs = {{ .Consensus.MaxBlockSizeTxs }} -max_block_size_bytes = {{ .Consensus.MaxBlockSizeBytes }} - # EmptyBlocks mode and possible interval between empty blocks in seconds create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} diff --git a/consensus/state.go b/consensus/state.go index 386501c8d..78d528d09 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -932,7 +932,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts } // Mempool validated transactions - txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs) + txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs) block, parts := cs.state.MakeBlock(cs.Height, txs, commit) evidence := cs.evpool.PendingEvidence() block.AddEvidence(evidence) diff --git a/docs/specification/configuration.md b/docs/specification/configuration.md index 1cf661200..9ceb8be72 100644 --- a/docs/specification/configuration.md +++ b/docs/specification/configuration.md @@ -170,10 +170,6 @@ timeout_commit = 1000 # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = false -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - # EmptyBlocks mode and possible interval between empty blocks in seconds create_empty_blocks = true create_empty_blocks_interval = 0 From 297cd4cfe80b86ba7870d7d5ee64624ebd551ecb Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 27 Jun 2018 14:15:37 +0400 Subject: [PATCH 487/515] limit HTTP request body and WS read msg size to 1MB --- rpc/lib/server/handlers.go | 8 +++++++- rpc/lib/server/http_server.go | 20 ++++++++++++++++++-- types/params.go | 1 + 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 6cc03012a..dcacfb663 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -406,7 +406,13 @@ type wsConnection struct { // description of how to configure ping period and pong wait time. NOTE: if the // write buffer is full, pongs may be dropped, which may cause clients to // disconnect. see https://github.com/gorilla/websocket/issues/97 -func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, cdc *amino.Codec, options ...func(*wsConnection)) *wsConnection { +func NewWSConnection( + baseConn *websocket.Conn, + funcMap map[string]*RPCFunc, + cdc *amino.Codec, + options ...func(*wsConnection), +) *wsConnection { + baseConn.SetReadLimit(maxBodyBytes) wsc := &wsConnection{ remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 9bdb4dffa..6223d205c 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -23,6 +23,12 @@ type Config struct { MaxOpenConnections int } +const ( + // maxBodyBytes controls the maximum number of bytes the + // server will read parsing the request body. + maxBodyBytes = int64(1000000) // 1MB +) + // StartHTTPServer starts an HTTP server on listenAddr with the given handler. // It wraps handler with RecoverAndLogHandler. func StartHTTPServer( @@ -53,7 +59,7 @@ func StartHTTPServer( go func() { err := http.Serve( listener, - RecoverAndLogHandler(handler, logger), + RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), ) logger.Error("RPC HTTP server stopped", "err", err) }() @@ -99,7 +105,7 @@ func StartHTTPAndTLSServer( go func() { err := http.ServeTLS( listener, - RecoverAndLogHandler(handler, logger), + RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), certFile, keyFile, ) @@ -202,3 +208,13 @@ func (w *ResponseWriterWrapper) WriteHeader(status int) { func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { return w.ResponseWriter.(http.Hijacker).Hijack() } + +type maxBytesHandler struct { + h http.Handler + n int64 +} + +func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, h.n) + h.h.ServeHTTP(w, r) +} diff --git a/types/params.go b/types/params.go index d068342c6..6cbac47a6 100644 --- a/types/params.go +++ b/types/params.go @@ -7,6 +7,7 @@ import ( ) const ( + // MaxBlockSizeBytes is the maximum permitted size of the blocks. MaxBlockSizeBytes = 104857600 // 100MB ) From ab04201c3d658d4eb0689b2c6d48d95b672560a3 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 27 Jun 2018 18:07:26 +0400 Subject: [PATCH 488/515] fix empty packet size calculation Fixes #1762#issuecomment-398940107 --- mempool/reactor.go | 2 +- p2p/conn/connection.go | 15 +++++++-------- p2p/conn/connection_test.go | 2 +- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/mempool/reactor.go b/mempool/reactor.go index fab9480ba..066118036 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -5,8 +5,8 @@ import ( "reflect" "time" - abci "github.com/tendermint/tendermint/abci/types" amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tmlibs/clist" "github.com/tendermint/tmlibs/log" diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index c56507296..1392c8051 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -156,7 +156,7 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec onReceive: onReceive, onError: onError, config: config, - emptyPacketMsgSize: emptyPacketMsgSize(), + emptyPacketMsgSize: emptyPacketMsgSize(config.MaxPacketMsgSize), } // Create channels @@ -631,7 +631,7 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), - maxPacketMsgPayloadSize: conn.config.MaxPacketMsgSize, + maxPacketMsgPayloadSize: conn.config.MaxPacketMsgSize - conn.emptyPacketMsgSize, } } @@ -694,7 +694,7 @@ func (ch *Channel) isSendPending() bool { func (ch *Channel) nextPacketMsg() PacketMsg { packet := PacketMsg{} packet.ChannelID = byte(ch.desc.ID) - maxSize := ch.maxPacketMsgPayloadSize - ch.conn.emptyPacketMsgSize + maxSize := ch.maxPacketMsgPayloadSize packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] if len(ch.sending) <= maxSize { packet.EOF = byte(0x01) @@ -788,17 +788,16 @@ func (mp PacketMsg) String() string { // - EOF field key + byte = 2 bytes // - Bytes field key = 1 bytes // - Uvarint length of MustMarshalBinary(bytes) = 1 or 2 bytes -// - Struct terminator = 1 byte -// = up to 14 bytes overhead for the packet. +// = up to 13 bytes overhead for the packet. -func emptyPacketMsgSize() int { +func emptyPacketMsgSize(maxPayloadSize int) int { emptyPacketMsgSize := len(cdc.MustMarshalBinary(PacketMsg{ ChannelID: 0x01, EOF: 1, - Bytes: make([]byte, 1), + Bytes: make([]byte, maxPayloadSize), })) // -1 byte of data // +1 byte because uvarint length of MustMarshalBinary(bytes) will be 2 bytes for big packets // +1 byte because uvarint length of MustMarshalBinary(packet) will be 2 bytes for big packets - return emptyPacketMsgSize - 1 + 1 + 1 + return emptyPacketMsgSize - maxPayloadSize + 1 + 1 } diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 34b37ab8a..6e4c1a090 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -426,7 +426,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { var packet = PacketMsg{ ChannelID: 0x01, EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgSize-emptyPacketMsgSize()), + Bytes: make([]byte, mconnClient.config.MaxPacketMsgSize-emptyPacketMsgSize(mconnClient.config.MaxPacketMsgSize)), } _, err = cdc.MarshalBinaryWriter(buf, packet) assert.Nil(t, err) From 61c5791fa3aa0d16451285f705f48526d26ce7be Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 29 Jun 2018 12:17:26 +0400 Subject: [PATCH 489/515] revert back to Jae's original payload size limit except now we calculate the max size using the maxPacketMsgSize() function, which frees developers from having to know amino encoding details. plus, 10 additional bytes are added to leave the room for amino upgrades (both making it more efficient / less efficient) --- benchmarks/codec_test.go | 2 +- config/config.go | 39 +++++++------- config/toml.go | 5 +- crypto/merkle/doc.go | 2 +- crypto/merkle/simple_tree.go | 8 +-- crypto/merkle/simple_tree_test.go | 2 +- docs/running-in-production.md | 2 +- docs/specification/configuration.md | 2 +- evidence/wire.go | 2 +- p2p/conn/connection.go | 81 +++++++++++++---------------- p2p/conn/connection_test.go | 4 +- p2p/switch.go | 2 +- privval/priv_validator.go | 2 +- privval/priv_validator_test.go | 2 +- rpc/core/pipe.go | 2 +- 15 files changed, 73 insertions(+), 84 deletions(-) diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index ee61cc9a4..53cbf632c 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -6,8 +6,8 @@ import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" proto "github.com/tendermint/tendermint/benchmarks/proto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) diff --git a/config/config.go b/config/config.go index f5361c22b..e01819305 100644 --- a/config/config.go +++ b/config/config.go @@ -299,9 +299,8 @@ type P2PConfig struct { // Time to wait before flushing messages out on the connection, in ms FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` - // Maximum size of a message packet, in bytes - // Includes a header, which is ~13 bytes - MaxPacketMsgSize int `mapstructure:"max_packet_msg_size"` + // Maximum size of a message packet payload, in bytes + MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` // Rate at which packets can be sent, in bytes/second SendRate int64 `mapstructure:"send_rate"` @@ -340,23 +339,23 @@ type P2PConfig struct { // DefaultP2PConfig returns a default configuration for the peer-to-peer layer func DefaultP2PConfig() *P2PConfig { return &P2PConfig{ - ListenAddress: "tcp://0.0.0.0:26656", - UPNP: false, - AddrBook: defaultAddrBookPath, - AddrBookStrict: true, - MaxNumPeers: 50, - FlushThrottleTimeout: 100, - MaxPacketMsgSize: 1024, // 1 kB - SendRate: 512000, // 500 kB/s - RecvRate: 512000, // 500 kB/s - PexReactor: true, - SeedMode: false, - AllowDuplicateIP: true, // so non-breaking yet - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - TestFuzz: false, - TestFuzzConfig: DefaultFuzzConnConfig(), + ListenAddress: "tcp://0.0.0.0:26656", + UPNP: false, + AddrBook: defaultAddrBookPath, + AddrBookStrict: true, + MaxNumPeers: 50, + FlushThrottleTimeout: 100, + MaxPacketMsgPayloadSize: 1024, // 1 kB + SendRate: 512000, // 500 kB/s + RecvRate: 512000, // 500 kB/s + PexReactor: true, + SeedMode: false, + AllowDuplicateIP: true, // so non-breaking yet + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + TestFuzz: false, + TestFuzzConfig: DefaultFuzzConnConfig(), } } diff --git a/config/toml.go b/config/toml.go index b3745d3c8..4569291d4 100644 --- a/config/toml.go +++ b/config/toml.go @@ -164,9 +164,8 @@ flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} # Maximum number of peers to connect to max_num_peers = {{ .P2P.MaxNumPeers }} -# Maximum size of a message packet, in bytes -# Includes a header, which is ~13 bytes -max_packet_msg_size = {{ .P2P.MaxPacketMsgSize }} +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} # Rate at which packets can be sent, in bytes/second send_rate = {{ .P2P.SendRate }} diff --git a/crypto/merkle/doc.go b/crypto/merkle/doc.go index da65dd858..865c30217 100644 --- a/crypto/merkle/doc.go +++ b/crypto/merkle/doc.go @@ -28,4 +28,4 @@ https://bitcointalk.org/?topic=102395 TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure. */ -package merkle \ No newline at end of file +package merkle diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 35a6eaa7c..46a075909 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -9,12 +9,12 @@ func SimpleHashFromTwoHashes(left, right []byte) []byte { var hasher = tmhash.New() err := encodeByteSlice(hasher, left) if err != nil { - panic(err) - } + panic(err) + } err = encodeByteSlice(hasher, right) if err != nil { - panic(err) - } + panic(err) + } return hasher.Sum(nil) } diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index a721bccea..6eef93623 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -6,8 +6,8 @@ import ( cmn "github.com/tendermint/tmlibs/common" . "github.com/tendermint/tmlibs/test" - "testing" "github.com/tendermint/tendermint/crypto/tmhash" + "testing" ) type testItem []byte diff --git a/docs/running-in-production.md b/docs/running-in-production.md index 3ceded499..225a97853 100644 --- a/docs/running-in-production.md +++ b/docs/running-in-production.md @@ -21,7 +21,7 @@ to prevent Denial-of-service attacks. You can read more about it ### P2P The core of the Tendermint peer-to-peer system is `MConnection`. Each -connection has `MaxPacketMsgSize`, which is the maximum packet +connection has `MaxPacketMsgPayloadSize`, which is the maximum packet size and bounded send & receive queues. One can impose restrictions on send & receive rate per connection (`SendRate`, `RecvRate`). diff --git a/docs/specification/configuration.md b/docs/specification/configuration.md index 9ceb8be72..214757b9f 100644 --- a/docs/specification/configuration.md +++ b/docs/specification/configuration.md @@ -119,7 +119,7 @@ flush_throttle_timeout = 100 max_num_peers = 50 # Maximum size of a message packet payload, in bytes -max_msg_packet_payload_size = 1024 +max_packet_msg_payload_size = 1024 # Rate at which packets can be sent, in bytes/second send_rate = 512000 diff --git a/evidence/wire.go b/evidence/wire.go index d4db37c54..fb3a177cc 100644 --- a/evidence/wire.go +++ b/evidence/wire.go @@ -2,8 +2,8 @@ package evidence import ( "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" ) var cdc = amino.NewCodec() diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 1392c8051..b19a1ca19 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -18,7 +18,7 @@ import ( ) const ( - defaultMaxPacketMsgSize = 1024 + defaultMaxPacketMsgPayloadSize = 1024 numBatchPacketMsgs = 10 minReadBufferSize = 1024 @@ -96,7 +96,7 @@ type MConnection struct { created time.Time // time of creation - emptyPacketMsgSize int + _maxPacketMsgSize int } // MConnConfig is a MConnection configuration. @@ -105,7 +105,7 @@ type MConnConfig struct { RecvRate int64 `mapstructure:"recv_rate"` // Maximum payload size - MaxPacketMsgSize int `mapstructure:"max_packet_msg_size"` + MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` // Interval to flush writes (throttled) FlushThrottle time.Duration `mapstructure:"flush_throttle"` @@ -120,12 +120,12 @@ type MConnConfig struct { // DefaultMConnConfig returns the default config. func DefaultMConnConfig() MConnConfig { return MConnConfig{ - SendRate: defaultSendRate, - RecvRate: defaultRecvRate, - MaxPacketMsgSize: defaultMaxPacketMsgSize, - FlushThrottle: defaultFlushThrottle, - PingInterval: defaultPingInterval, - PongTimeout: defaultPongTimeout, + SendRate: defaultSendRate, + RecvRate: defaultRecvRate, + MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, + FlushThrottle: defaultFlushThrottle, + PingInterval: defaultPingInterval, + PongTimeout: defaultPongTimeout, } } @@ -146,17 +146,16 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec } mconn := &MConnection{ - conn: conn, - bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), - bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), - send: make(chan struct{}, 1), - pong: make(chan struct{}, 1), - onReceive: onReceive, - onError: onError, - config: config, - emptyPacketMsgSize: emptyPacketMsgSize(config.MaxPacketMsgSize), + conn: conn, + bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), + bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), + sendMonitor: flow.New(0, 0), + recvMonitor: flow.New(0, 0), + send: make(chan struct{}, 1), + pong: make(chan struct{}, 1), + onReceive: onReceive, + onError: onError, + config: config, } // Create channels @@ -173,6 +172,9 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn) + // maxPacketMsgSize() is a bit heavy, so call just once + mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() + return mconn } @@ -397,7 +399,7 @@ func (c *MConnection) sendSomePacketMsgs() bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. - c.sendMonitor.Limit(c.config.MaxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) + c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) // Now send some PacketMsgs. for i := 0; i < numBatchPacketMsgs; i++ { @@ -455,7 +457,7 @@ func (c *MConnection) recvRoutine() { FOR_LOOP: for { // Block until .recvMonitor says we can read. - c.recvMonitor.Limit(c.config.MaxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) + c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) // Peek into bufConnReader for debugging /* @@ -475,7 +477,7 @@ FOR_LOOP: var packet Packet var _n int64 var err error - _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.MaxPacketMsgSize)) + _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) c.recvMonitor.Update(int(_n)) if err != nil { if c.IsRunning() { @@ -548,6 +550,16 @@ func (c *MConnection) stopPongTimer() { } } +// maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead +// of amino encoding. +func (c *MConnection) maxPacketMsgSize() int { + return len(cdc.MustMarshalBinary(PacketMsg{ + ChannelID: 0x01, + EOF: 1, + Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), + })) + 10 // leave room for changes in amino +} + type ConnectionStatus struct { Duration time.Duration SendMonitor flow.Status @@ -631,7 +643,7 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), - maxPacketMsgPayloadSize: conn.config.MaxPacketMsgSize - conn.emptyPacketMsgSize, + maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, } } @@ -780,24 +792,3 @@ type PacketMsg struct { func (mp PacketMsg) String() string { return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) } - -// - Uvarint length of MustMarshalBinary(packet) = 1 or 2 bytes -// (as long as it's less than 16,384 bytes) -// - Prefix bytes = 4 bytes -// - ChannelID field key + byte = 2 bytes -// - EOF field key + byte = 2 bytes -// - Bytes field key = 1 bytes -// - Uvarint length of MustMarshalBinary(bytes) = 1 or 2 bytes -// = up to 13 bytes overhead for the packet. - -func emptyPacketMsgSize(maxPayloadSize int) int { - emptyPacketMsgSize := len(cdc.MustMarshalBinary(PacketMsg{ - ChannelID: 0x01, - EOF: 1, - Bytes: make([]byte, maxPayloadSize), - })) - // -1 byte of data - // +1 byte because uvarint length of MustMarshalBinary(bytes) will be 2 bytes for big packets - // +1 byte because uvarint length of MustMarshalBinary(packet) will be 2 bytes for big packets - return emptyPacketMsgSize - maxPayloadSize + 1 + 1 -} diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 6e4c1a090..8006b37a8 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -426,7 +426,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { var packet = PacketMsg{ ChannelID: 0x01, EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgSize-emptyPacketMsgSize(mconnClient.config.MaxPacketMsgSize)), + Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), } _, err = cdc.MarshalBinaryWriter(buf, packet) assert.Nil(t, err) @@ -440,7 +440,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { packet = PacketMsg{ ChannelID: 0x01, EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgSize+1), + Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+100), } _, err = cdc.MarshalBinaryWriter(buf, packet) assert.Nil(t, err) diff --git a/p2p/switch.go b/p2p/switch.go index ae322b54f..bf5f9747f 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -100,7 +100,7 @@ func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch { mConfig.FlushThrottle = time.Duration(cfg.FlushThrottleTimeout) * time.Millisecond mConfig.SendRate = cfg.SendRate mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgSize = cfg.MaxPacketMsgSize + mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize sw.mConfig = mConfig diff --git a/privval/priv_validator.go b/privval/priv_validator.go index 9f02482ab..8a54b5ccf 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -287,7 +287,7 @@ func (pv *FilePV) saveSigned(height int64, round int, step int8, func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error { pv.mtx.Lock() defer pv.mtx.Unlock() - sig, err:= pv.PrivKey.Sign(heartbeat.SignBytes(chainID)) + sig, err := pv.PrivKey.Sign(heartbeat.SignBytes(chainID)) if err != nil { return err } diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index 4fc8f97fc..345b51438 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" ) diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 9fcb75e19..bf32c9c66 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -3,8 +3,8 @@ package core import ( "time" - crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/consensus" + crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" From 399e2fbdac5bfda42b0f34305046ca81410a6dbf Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 29 Jun 2018 12:37:06 +0400 Subject: [PATCH 490/515] update changelog --- CHANGELOG.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24ec19d79..00276a0bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,22 +2,22 @@ ## TBD -IMPROVEMENTS: - - [crypto] Make public key size into public constants - BUG FIXES: -- [rpc] limited number of HTTP/WebSocket connections +- [rpc] Limit maximum number of HTTP/WebSocket connections (`rpc.max_open_connections`) and gRPC connections - (`rpc.grpc_max_open_connections`). Check out [Running In - Production](https://tendermint.readthedocs.io/en/master/running-in-production.html) - guide if you want to increase them. + (`rpc.grpc_max_open_connections`). Check out "Running In Production" guide if + you want to increase them. +- [rpc] Limit maximum request body size to 1MB (header is limited to 1MB). BREAKING CHANGES: - [config] Rename `skip_upnp` to `upnp`, and turn it off by default. +- [config] Rename `max_packet_msg_size` to `max_packet_msg_payload_size`. IMPROVEMENT -- [rpc/client] Supports https and wss now. +- [crypto] Make public key size into public constants - [p2p] Add IPv6 support to peering. +- [rpc/client] Supports https and wss now. +- [stdout] Txs inside blocks are now logged as hashes (plus size in bytes). ## 0.21.0 From 9752e059e1ca10e0a7a2e130fd9ce4b97d1a6e1a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 29 Jun 2018 16:03:31 +0400 Subject: [PATCH 491/515] fix nil pointer panic by checking if peer is nil Fixes #1830 remember that PeerSet#Get can return nil --- p2p/peer_set.go | 11 ++++++----- p2p/pex/pex_reactor.go | 8 +++++--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/p2p/peer_set.go b/p2p/peer_set.go index e048cf4e3..257856156 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -55,8 +55,8 @@ func (ps *PeerSet) Add(peer Peer) error { return nil } -// Has returns true iff the PeerSet contains -// the peer referred to by this peerKey. +// Has returns true if the set contains the peer referred to by this +// peerKey, otherwise false. func (ps *PeerSet) Has(peerKey ID) bool { ps.mtx.Lock() _, ok := ps.lookup[peerKey] @@ -64,8 +64,8 @@ func (ps *PeerSet) Has(peerKey ID) bool { return ok } -// HasIP returns true if the PeerSet contains the peer referred to by this IP -// address. +// HasIP returns true if the set contains the peer referred to by this IP +// address, otherwise false. func (ps *PeerSet) HasIP(peerIP net.IP) bool { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -85,7 +85,8 @@ func (ps *PeerSet) hasIP(peerIP net.IP) bool { return false } -// Get looks up a peer by the provided peerKey. +// Get looks up a peer by the provided peerKey. Returns nil if peer is not +// found. func (ps *PeerSet) Get(peerKey ID) Peer { ps.mtx.Lock() defer ps.mtx.Unlock() diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 27ed422c5..48b6d43e7 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -77,10 +77,10 @@ type PEXReactor struct { attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} } -func (pexR *PEXReactor) minReceiveRequestInterval() time.Duration { +func (r *PEXReactor) minReceiveRequestInterval() time.Duration { // NOTE: must be less than ensurePeersPeriod, otherwise we'll request // peers too quickly from others and they'll think we're bad! - return pexR.ensurePeersPeriod / 3 + return r.ensurePeersPeriod / 3 } // PEXReactorConfig holds reactor specific configuration data. @@ -628,7 +628,9 @@ func (r *PEXReactor) crawlPeers() { } // Ask for more addresses peer := r.Switch.Peers().Get(pi.Addr.ID) - r.RequestAddrs(peer) + if peer != nil { + r.RequestAddrs(peer) + } } } From bb0313d0602c15a18aa392b741847317cdfc34d5 Mon Sep 17 00:00:00 2001 From: Lawrence Tran Date: Fri, 29 Jun 2018 13:09:50 -0500 Subject: [PATCH 492/515] Fix typo (#1837) The base64 encoding for 'abcd' is incorrect for the python decoding examples. --- docs/getting-started.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/getting-started.md b/docs/getting-started.md index 2d2c77b16..1fa1405f2 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -125,8 +125,8 @@ The result should look like: Note the `value` in the result (`YWJjZA==`); this is the base64-encoding of the ASCII of `abcd`. You can verify this in a python 2 shell by -running `"61626364".decode('base64')` or in python 3 shell by running -`import codecs; codecs.decode("61626364", 'base64').decode('ascii')`. +running `"YWJjZA==".decode('base64')` or in python 3 shell by running +`import codecs; codecs.decode("YWJjZA==", 'base64').decode('ascii')`. Stay tuned for a future release that [makes this output more human-readable](https://github.com/tendermint/tendermint/issues/1794). From 6e5a01ccec260cf3f0573255cb067ec9dfd46736 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 00:50:49 -0400 Subject: [PATCH 493/515] changelog and version --- CHANGELOG.md | 49 ++++++++++++++++++++++++++++++---------------- version/version.go | 4 ++-- 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24ec19d79..154c51c83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,23 +1,45 @@ # Changelog -## TBD +## 0.22.0 -IMPROVEMENTS: - - [crypto] Make public key size into public constants - -BUG FIXES: -- [rpc] limited number of HTTP/WebSocket connections - (`rpc.max_open_connections`) and gRPC connections - (`rpc.grpc_max_open_connections`). Check out [Running In - Production](https://tendermint.readthedocs.io/en/master/running-in-production.html) - guide if you want to increase them. +*July 1st, 2018* BREAKING CHANGES: - [config] Rename `skip_upnp` to `upnp`, and turn it off by default. +- [config] `MaxPacketMsgPayloadSize` -> `MaxPacketMsgSize` +- [types] Update Amino to v0.10.1 + * Amino is now fully proto3 compatible for the basic types + * JSON-encoded types now use the type name instead of the prefix bytes + * Integers are encoded as strings +- [crypto] Update go-crypto to v0.10.0 and merge into `crypto` + * privKey.Sign returns error. + * ed25519 address is the first 20-bytes of the SHA256 of the pubkey + * `tmlibs/merkle` -> `crypto/merkle`. Uses SHA256 instead of RIPEMD160 +- [rpc] `syncing` is now called `catching_up`. + +FEATURES +- [cmd] Added metrics (served under `/metrics` using a Prometheus client; + disabled by default). See the new `instrumentation` section in the config and + [metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html) + guide. +- [p2p] Rudimentary IPv6 support IMPROVEMENT - [rpc/client] Supports https and wss now. - [p2p] Add IPv6 support to peering. +- [crypto] Make public key size into public constants +- [mempool] Log tx hash, not entire tx +- [abci] Merged in github.com/tendermint/abci +- [docs] Move from .rst to .md + +BUG FIXES: +- [rpc] Limited number of HTTP/WebSocket connections + (`rpc.max_open_connections`) and gRPC connections + (`rpc.grpc_max_open_connections`). Check out [Running In + Production](https://tendermint.readthedocs.io/en/master/running-in-production.html) + guide if you want to increase them. +- [consensus] Fix a halting bug where `create_empty_blocks=false` +- [p2p] Fix panic in seed mode ## 0.21.0 @@ -34,13 +56,6 @@ IMPROVEMENT - [pubsub] Set default capacity to 0 - [docs] Various improvements -FEATURES - -- [main] added metrics (served under `/metrics` using a Prometheus client; - disabled by default). See the new `instrumentation` section in the config and - [metrics](https://tendermint.readthedocs.io/projects/tools/en/v0.21.0/metrics.html) - guide. - BUG FIXES - [consensus] Fix an issue where we don't make blocks after `fast_sync` when `create_empty_blocks=false` diff --git a/version/version.go b/version/version.go index df553115a..9be4c9d82 100644 --- a/version/version.go +++ b/version/version.go @@ -3,14 +3,14 @@ package version // Version components const ( Maj = "0" - Min = "21" + Min = "22" Fix = "0" ) var ( // Version is the current version of Tendermint // Must be a string because scripts like dist.sh read this file. - Version = "0.21.0" + Version = "0.22.0" // GitCommit is the current HEAD set using ldflags. GitCommit string From f35ebd5cf71d1362f410ab6f016e4302e3f40f06 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 01:23:38 -0400 Subject: [PATCH 494/515] docs: minor fix for abci query peer filter --- docs/app-development.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/app-development.md b/docs/app-development.md index 32abe2151..f8c70f21b 100644 --- a/docs/app-development.md +++ b/docs/app-development.md @@ -394,13 +394,13 @@ serialize each query as a single byte array. Additionally, certain instance about which peers to connect to. Tendermint Core currently uses the Query connection to filter peers upon -connecting, according to IP address or public key. For instance, +connecting, according to IP address or node ID. For instance, returning non-OK ABCI response to either of the following queries will cause Tendermint to not connect to the corresponding peer: -- `p2p/filter/addr/`, where `` is an IP address. -- `p2p/filter/pubkey/`, where `` is the hex-encoded - ED25519 key of the node (not it's validator key) +- `p2p/filter/addr/`, where `` is an IP address. +- `p2p/filter/id/`, where `` is the hex-encoded node ID (the hash of + the node's p2p pubkey). Note: these query formats are subject to change! From da4632c651df0ff912dea3c8c328785486087fad Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 01:29:03 -0400 Subject: [PATCH 495/515] docs/spec: update address spec to sha2 for ed25519 --- docs/spec/blockchain/encoding.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 1c33aa1fa..fd8e64a42 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -84,14 +84,13 @@ Addresses for each public key types are computed as follows: #### Ed25519 -RIPEMD160 hash of the Amino encoded public key: +First 20-bytes of the SHA256 hash of the raw 32-byte public key: ``` -address = RIPEMD160(AMINO(pubkey)) +address = SHA256(pubkey)[:20] ``` -NOTE: this will soon change to the truncated 20-bytes of the SHA256 of the raw -public key +NOTE: before v0.22.0, this was the RIPEMD160 of the Amino encoded public key. #### Secp256k1 From 2d98899b9b57817b892ff1c9f2643e0b2850cadc Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sun, 1 Jul 2018 09:44:12 +0400 Subject: [PATCH 496/515] set MaxTxs to 10000 (the same that was used in the config before) --- types/params.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/params.go b/types/params.go index 6cbac47a6..0654d07b9 100644 --- a/types/params.go +++ b/types/params.go @@ -57,7 +57,7 @@ func DefaultConsensusParams() *ConsensusParams { func DefaultBlockSize() BlockSize { return BlockSize{ MaxBytes: 22020096, // 21MB - MaxTxs: 100000, + MaxTxs: 10000, MaxGas: -1, } } From 3a0dff7db2628c20cb0d77db3a01fea7981e5aec Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 11:51:06 -0400 Subject: [PATCH 497/515] fix changelog --- CHANGELOG.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f6cb6195..d73c949a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,15 +21,14 @@ FEATURES disabled by default). See the new `instrumentation` section in the config and [metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html) guide. -- [p2p] Rudimentary IPv6 support +- [p2p] Add IPv6 support to peering. IMPROVEMENT -- [crypto] Make public key size into public constants -- [p2p] Add IPv6 support to peering. - [rpc/client] Supports https and wss now. - [crypto] Make public key size into public constants - [mempool] Log tx hash, not entire tx -- [abci] Merged in github.com/tendermint/abci +- [abci] Merged in github.com/tendermint/abci and + github.com/tendermint/go-crypto - [docs] Move from .rst to .md BUG FIXES: @@ -41,7 +40,6 @@ BUG FIXES: - [consensus] Fix a halting bug where `create_empty_blocks=false` - [p2p] Fix panic in seed mode - ## 0.21.0 *June 21th, 2018* From af703620d4423478968ab602716a2a51cbb02ac6 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 12:53:51 -0400 Subject: [PATCH 498/515] consensus: stop wal --- consensus/state.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 78d528d09..a3196a2f4 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -320,10 +320,7 @@ func (cs *ConsensusState) OnStop() { cs.timeoutTicker.Stop() - // Make BaseService.Wait() wait until cs.wal.Wait() - if cs.IsRunning() { - cs.wal.Wait() - } + cs.wal.Stop() } // Wait waits for the the main routine to return. From 9ff99114b1f55b43f6e628f17da1e8d1f78659e2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:12:58 -0400 Subject: [PATCH 499/515] update for tmlibs v0.9.0 --- Gopkg.lock | 31 ++++++++++--------------------- Gopkg.toml | 2 +- blockchain/store_test.go | 10 +++++----- privval/socket_test.go | 8 ++++---- 4 files changed, 20 insertions(+), 31 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index f9729ffab..e2fadfadc 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -211,7 +211,7 @@ "nfs", "xfs" ] - revision = "94663424ae5ae9856b40a9f170762b4197024661" + revision = "40f013a808ec4fa79def444a1a56de4d1727efcb" [[projects]] branch = "master" @@ -284,20 +284,7 @@ "leveldb/table", "leveldb/util" ] - revision = "e2150783cd35f5b607daca48afd8c57ec54cc995" - -[[projects]] - name = "github.com/tendermint/abci" - packages = [ - "client", - "example/code", - "example/counter", - "example/kvstore", - "server", - "types" - ] - revision = "198dccf0ddfd1bb176f87657e3286a05a6ed9540" - version = "v0.12.0" + revision = "0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697" [[projects]] branch = "master" @@ -327,10 +314,11 @@ "flowrate", "log", "merkle", + "merkle/tmhash", "test" ] - revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38" - version = "v0.8.4" + revision = "49596e0a1f48866603813df843c9409fc19805c6" + version = "v0.9.0" [[projects]] branch = "master" @@ -342,6 +330,7 @@ "curve25519", "hkdf", "internal/chacha20", + "internal/subtle", "nacl/box", "nacl/secretbox", "openpgp/armor", @@ -350,7 +339,7 @@ "ripemd160", "salsa20/salsa" ] - revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" + revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602" [[projects]] branch = "master" @@ -365,7 +354,7 @@ "netutil", "trace" ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" + revision = "4cb1c02c05b0e749b0365f61ae859a8e0cfceed9" [[projects]] branch = "master" @@ -374,7 +363,7 @@ "cpu", "unix" ] - revision = "a9e25c09b96b8870693763211309e213c6ef299d" + revision = "7138fd3d9dc8335c567ca206f4333fb75eb05d56" [[projects]] name = "golang.org/x/text" @@ -435,6 +424,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "d17038089dd6383ff5028229d4026bb92f5c7adc7e9c1cd52584237e2e5fd431" + inputs-digest = "c25289282b94abc7f0c390e592e5e1636b7f26cb4773863ac39cde7fdc7b5bdf" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 4c32f3d80..18e2767a9 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -75,7 +75,7 @@ [[override]] name = "github.com/tendermint/tmlibs" - version = "~0.8.4" + version = "~0.9.0" [[constraint]] name = "google.golang.org/grpc" diff --git a/blockchain/store_test.go b/blockchain/store_test.go index 1e0c223ad..b74c2b35f 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -153,14 +153,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { parts: validPartSet, seenCommit: seenCommit1, corruptCommitInDB: true, // Corrupt the DB's commit entry - wantPanic: "Error reading block commit", + wantPanic: "unmarshal to types.Commit failed", }, { block: newBlock(&header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, - wantPanic: "Error reading block", + wantPanic: "unmarshal to types.BlockMeta failed", corruptBlockInDB: true, // Corrupt the DB's block entry }, @@ -179,7 +179,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { seenCommit: seenCommit1, corruptSeenCommitInDB: true, - wantPanic: "Error reading block seen commit", + wantPanic: "unmarshal to types.Commit failed", }, { @@ -287,7 +287,7 @@ func TestLoadBlockPart(t *testing.T) { db.Set(calcBlockPartKey(height, index), []byte("Tendermint")) res, _, panicErr = doFn(loadPart) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block part") + require.Contains(t, panicErr.Error(), "unmarshal to types.Part failed") // 3. A good block serialized and saved to the DB should be retrievable db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1)) @@ -316,7 +316,7 @@ func TestLoadBlockMeta(t *testing.T) { db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta")) res, _, panicErr = doFn(loadMeta) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block meta") + require.Contains(t, panicErr.Error(), "unmarshal to types.BlockMeta") // 3. A good blockMeta serialized and saved to the DB should be retrievable meta := &types.BlockMeta{} diff --git a/privval/socket_test.go b/privval/socket_test.go index fcf21e0c6..1813893af 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -119,7 +119,7 @@ func TestSocketPVAcceptDeadline(t *testing.T) { SocketPVAcceptDeadline(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestSocketPVDeadline(t *testing.T) { @@ -165,7 +165,7 @@ func TestSocketPVDeadline(t *testing.T) { time.Sleep(20 * time.Microsecond) _, err := sc.getPubKey() - assert.Equal(t, err.(cmn.Error).Cause(), ErrConnTimeout) + assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) } func TestSocketPVWait(t *testing.T) { @@ -178,7 +178,7 @@ func TestSocketPVWait(t *testing.T) { SocketPVConnWait(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestRemoteSignerRetry(t *testing.T) { @@ -221,7 +221,7 @@ func TestRemoteSignerRetry(t *testing.T) { RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnRetries(retries)(rs) - assert.Equal(t, rs.Start().(cmn.Error).Cause(), ErrDialRetryMax) + assert.Equal(t, rs.Start().(cmn.Error).Data(), ErrDialRetryMax) select { case attempts := <-attemptc: From 2902ab1a144b6c0bef9c8cdd605ef77a8e70a7cc Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:32:06 -0400 Subject: [PATCH 500/515] delete some things for the merge --- .editorconfig | 16 - .gitignore | 29 -- CHANGELOG.md | 919 --------------------------------------------- Gopkg.lock | 429 --------------------- Gopkg.toml | 99 ----- LICENSE | 204 ---------- Makefile | 236 ------------ README.md | 138 ------- version/version.go | 23 -- 9 files changed, 2093 deletions(-) delete mode 100644 .editorconfig delete mode 100644 .gitignore delete mode 100644 CHANGELOG.md delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml delete mode 100644 LICENSE delete mode 100644 Makefile delete mode 100644 README.md delete mode 100644 version/version.go diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index 481621f76..000000000 --- a/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true - -[*.{sh,Makefile}] -indent_style = tab - -[*.proto] -indent_style = space -indent_size = 2 diff --git a/.gitignore b/.gitignore deleted file mode 100644 index bcfd36db1..000000000 --- a/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -*.swp -*.swo -.bak -*.bak -.DS_Store -build/* -rpc/test/.tendermint -.tendermint -remote_dump -.revision -vendor -.vagrant -test/p2p/data/ -test/logs -coverage.txt -docs/_build -docs/tools -*.log -abci-cli -abci/types/types.pb.go - -scripts/wal2json/wal2json -scripts/cutWALUntil/cutWALUntil - -.idea/ -*.iml - -libs/pubsub/query/fuzz_test/output -shunit2 diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index d73c949a2..000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,919 +0,0 @@ -# Changelog - -## 0.22.0 - -*July 1st, 2018* - -BREAKING CHANGES: -- [config] Rename `skip_upnp` to `upnp`, and turn it off by default. -- [types] Update Amino to v0.10.1 - * Amino is now fully proto3 compatible for the basic types - * JSON-encoded types now use the type name instead of the prefix bytes - * Integers are encoded as strings -- [crypto] Update go-crypto to v0.10.0 and merge into `crypto` - * privKey.Sign returns error. - * ed25519 address is the first 20-bytes of the SHA256 of the pubkey - * `tmlibs/merkle` -> `crypto/merkle`. Uses SHA256 instead of RIPEMD160 -- [rpc] `syncing` is now called `catching_up`. - -FEATURES -- [cmd] Added metrics (served under `/metrics` using a Prometheus client; - disabled by default). See the new `instrumentation` section in the config and - [metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html) - guide. -- [p2p] Add IPv6 support to peering. - -IMPROVEMENT -- [rpc/client] Supports https and wss now. -- [crypto] Make public key size into public constants -- [mempool] Log tx hash, not entire tx -- [abci] Merged in github.com/tendermint/abci and - github.com/tendermint/go-crypto -- [docs] Move from .rst to .md - -BUG FIXES: -- [rpc] Limit maximum number of HTTP/WebSocket connections - (`rpc.max_open_connections`) and gRPC connections - (`rpc.grpc_max_open_connections`). Check out "Running In Production" guide if - you want to increase them. -- [rpc] Limit maximum request body size to 1MB (header is limited to 1MB). -- [consensus] Fix a halting bug where `create_empty_blocks=false` -- [p2p] Fix panic in seed mode - -## 0.21.0 - -*June 21th, 2018* - -BREAKING CHANGES - -- [config] Change default ports from 4665X to 2665X. Ports over 32768 are - ephemeral and reserved for use by the kernel. -- [cmd] `unsafe_reset_all` removes the addrbook.json - -IMPROVEMENT - -- [pubsub] Set default capacity to 0 -- [docs] Various improvements - -BUG FIXES - -- [consensus] Fix an issue where we don't make blocks after `fast_sync` when `create_empty_blocks=false` -- [mempool] Fix #1761 where we don't process txs if `cache_size=0` -- [rpc] Fix memory leak in Websocket (when using `/subscribe` method) -- [config] Escape paths in config - fixes config paths on Windows - -## 0.20.0 - -*June 6th, 2018* - -This is the first in a series of breaking releases coming to Tendermint after -soliciting developer feedback and conducting security audits. - -This release does not break any blockchain data structures or -protocols other than the ABCI messages between Tendermint and the application. - -Applications that upgrade for ABCI v0.11.0 should be able to continue running Tendermint -v0.20.0 on blockchains created with v0.19.X - -BREAKING CHANGES - -- [abci] Upgrade to - [v0.11.0](https://github.com/tendermint/abci/blob/master/CHANGELOG.md#0110) -- [abci] Change Query path for filtering peers by node ID from - `p2p/filter/pubkey/` to `p2p/filter/id/` - -## 0.19.9 - -*June 5th, 2018* - -BREAKING CHANGES - -- [types/priv_validator] Moved to top level `privval` package - -FEATURES - -- [config] Collapse PeerConfig into P2PConfig -- [docs] Add quick-install script -- [docs/spec] Add table of Amino prefixes - -BUG FIXES - -- [rpc] Return 404 for unknown endpoints -- [consensus] Flush WAL on stop -- [evidence] Don't send evidence to peers that are behind -- [p2p] Fix memory leak on peer disconnects -- [rpc] Fix panic when `per_page=0` - -## 0.19.8 - -*June 4th, 2018* - -BREAKING: - -- [p2p] Remove `auth_enc` config option, peer connections are always auth - encrypted. Technically a breaking change but seems no one was using it and - arguably a bug fix :) - -BUG FIXES - -- [mempool] Fix deadlock under high load when `skip_timeout_commit=true` and - `create_empty_blocks=false` - -## 0.19.7 - -*May 31st, 2018* - -BREAKING: - -- [libs/pubsub] TagMap#Get returns a string value -- [libs/pubsub] NewTagMap accepts a map of strings - -FEATURES - -- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate -- [p2p] AllowDuplicateIP config option to refuse connections from same IP. - - true by default for now, false by default in next breaking release -- [docs] Add docs for query, tx indexing, events, pubsub -- [docs] Add some notes about running Tendermint in production - -IMPROVEMENTS: - -- [consensus] Consensus reactor now receives events from a separate synchronous event bus, - which is not dependant on external RPC load -- [consensus/wal] do not look for height in older files if we've seen height - 1 -- [docs] Various cleanup and link fixes - -## 0.19.6 - -*May 29th, 2018* - -BUG FIXES - -- [blockchain] Fix fast-sync deadlock during high peer turnover - -BUG FIX: - -- [evidence] Dont send peers evidence from heights they haven't synced to yet -- [p2p] Refuse connections to more than one peer with the same IP -- [docs] Various fixes - -## 0.19.5 - -*May 20th, 2018* - -BREAKING CHANGES - -- [rpc/client] TxSearch and UnconfirmedTxs have new arguments (see below) -- [rpc/client] TxSearch returns ResultTxSearch -- [version] Breaking changes to Go APIs will not be reflected in breaking - version change, but will be included in changelog. - -FEATURES - -- [rpc] `/tx_search` takes `page` (starts at 1) and `per_page` (max 100, default 30) args to paginate results -- [rpc] `/unconfirmed_txs` takes `limit` (max 100, default 30) arg to limit the output -- [config] `mempool.size` and `mempool.cache_size` options - -IMPROVEMENTS - -- [docs] Lots of updates -- [consensus] Only Fsync() the WAL before executing msgs from ourselves - -BUG FIXES - -- [mempool] Enforce upper bound on number of transactions - -## 0.19.4 (May 17th, 2018) - -IMPROVEMENTS - -- [state] Improve tx indexing by using batches -- [consensus, state] Improve logging (more consensus logs, fewer tx logs) -- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...) - -BUG FIXES - -- [consensus] Fix issue #1575 where a late proposer can get stuck - -## 0.19.3 (May 14th, 2018) - -FEATURES - -- [rpc] New `/consensus_state` returns just the votes seen at the current height - -IMPROVEMENTS - -- [rpc] Add stringified votes and fraction of power voted to `/dump_consensus_state` -- [rpc] Add PeerStateStats to `/dump_consensus_state` - -BUG FIXES - -- [cmd] Set GenesisTime during `tendermint init` -- [consensus] fix ValidBlock rules - -## 0.19.2 (April 30th, 2018) - -FEATURES: - -- [p2p] Allow peers with different Minor versions to connect -- [rpc] `/net_info` includes `n_peers` - -IMPROVEMENTS: - -- [p2p] Various code comments, cleanup, error types -- [p2p] Change some Error logs to Debug - -BUG FIXES: - -- [p2p] Fix reconnect to persistent peer when first dial fails -- [p2p] Validate NodeInfo.ListenAddr -- [p2p] Only allow (MaxNumPeers - MaxNumOutboundPeers) inbound peers -- [p2p/pex] Limit max msg size to 64kB -- [p2p] Fix panic when pex=false -- [p2p] Allow multiple IPs per ID in AddrBook -- [p2p] Fix before/after bugs in addrbook isBad() - -## 0.19.1 (April 27th, 2018) - -Note this release includes some small breaking changes in the RPC and one in the -config that are really bug fixes. v0.19.1 will work with existing chains, and make Tendermint -easier to use and debug. With <3 - -BREAKING (MINOR) - -- [config] Removed `wal_light` setting. If you really needed this, let us know - -FEATURES: - -- [networks] moved in tooling from devops repo: terraform and ansible scripts for deploying testnets ! -- [cmd] Added `gen_node_key` command - -BUG FIXES - -Some of these are breaking in the RPC response, but they're really bugs! - -- [spec] Document address format and pubkey encoding pre and post Amino -- [rpc] Lower case JSON field names -- [rpc] Fix missing entries, improve, and lower case the fields in `/dump_consensus_state` -- [rpc] Fix NodeInfo.Channels format to hex -- [rpc] Add Validator address to `/status` -- [rpc] Fix `prove` in ABCIQuery -- [cmd] MarshalJSONIndent on init - -## 0.19.0 (April 13th, 2018) - -BREAKING: -- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details) -- [cmd] `show_node_id` now returns an error if there is no node key -- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status) - -Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is -serialized to disk or over the network. - -See github.com/tendermint/go-amino for details on the new format. - -See `scripts/wire2amino.go` for a tool to upgrade -genesis/priv_validator/node_key JSON files. - -FEATURES - -- [test] docker-compose for local testnet setup (thanks Greg!) - -## 0.18.0 (April 6th, 2018) - -BREAKING: - -- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0) -- [types] ValidtorSet.GetByAddress returns -1 if no validator found -- [p2p] require all addresses come with an ID no matter what -- [rpc] Listening address must contain tcp:// or unix:// prefix - -FEATURES: - -- [rpc] StartHTTPAndTLSServer (not used yet) -- [rpc] Include validator's voting power in `/status` -- [rpc] `/tx` and `/tx_search` responses now include the transaction hash -- [rpc] Include peer NodeIDs in `/net_info` - -IMPROVEMENTS: -- [config] trim whitespace from elements of lists (like `persistent_peers`) -- [rpc] `/tx_search` results are sorted by height -- [p2p] do not try to connect to ourselves (ok, maybe only once) -- [p2p] seeds respond with a bias towards good peers - -BUG FIXES: -- [rpc] fix subscribing using an abci.ResponseDeliverTx tag -- [rpc] fix tx_indexers matchRange -- [rpc] fix unsubscribing (see tmlibs v0.8.0) - -## 0.17.1 (March 27th, 2018) - -BUG FIXES: -- [types] Actually support `app_state` in genesis as `AppStateJSON` - -## 0.17.0 (March 27th, 2018) - -BREAKING: -- [types] WriteSignBytes -> SignBytes - -IMPROVEMENTS: -- [all] renamed `dummy` (`persistent_dummy`) to `kvstore` (`persistent_kvstore`) (name "dummy" is deprecated and will not work in the next breaking release) -- [docs] note on determinism (docs/determinism.rst) -- [genesis] `app_options` field is deprecated. please rename it to `app_state` in your genesis file(s). `app_options` will not work in the next breaking release -- [p2p] dial seeds directly without potential peers -- [p2p] exponential backoff for addrs in the address book -- [p2p] mark peer as good if it contributed enough votes or block parts -- [p2p] stop peer if it sends incorrect data, msg to unknown channel, msg we did not expect -- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address -- [spec] various improvements -- switched from glide to dep internally for package management -- [wire] prep work for upgrading to new go-wire (which is now called go-amino) - -FEATURES: -- [config] exposed `auth_enc` flag to enable/disable encryption -- [config] added the `--p2p.private_peer_ids` flag and `PrivatePeerIDs` config variable (see config for description) -- [rpc] added `/health` endpoint, which returns empty result for now -- [types/priv_validator] new format and socket client, allowing for remote signing - -BUG FIXES: -- [consensus] fix liveness bug by introducing ValidBlock mechanism - -## 0.16.0 (February 20th, 2018) - -BREAKING CHANGES: -- [config] use $TMHOME/config for all config and json files -- [p2p] old `--p2p.seeds` is now `--p2p.persistent_peers` (persistent peers to which TM will always connect to) -- [p2p] now `--p2p.seeds` only used for getting addresses (if addrbook is empty; not persistent) -- [p2p] NodeInfo: remove RemoteAddr and add Channels - - we must have at least one overlapping channel with peer - - we only send msgs for channels the peer advertised -- [p2p/conn] pong timeout -- [lite] comment out IAVL related code - -FEATURES: -- [p2p] added new `/dial_peers&persistent=_` **unsafe** endpoint -- [p2p] persistent node key in `$THMHOME/config/node_key.json` -- [p2p] introduce peer ID and authenticate peers by ID using addresses like `ID@IP:PORT` -- [p2p/pex] new seed mode crawls the network and serves as a seed. -- [config] MempoolConfig.CacheSize -- [config] P2P.SeedMode (`--p2p.seed_mode`) - -IMPROVEMENT: -- [p2p/pex] stricter rules in the PEX reactor for better handling of abuse -- [p2p] various improvements to code structure including subpackages for `pex` and `conn` -- [docs] new spec! -- [all] speed up the tests! - -BUG FIX: -- [blockchain] StopPeerForError on timeout -- [consensus] StopPeerForError on a bad Maj23 message -- [state] flush mempool conn before calling commit -- [types] fix priv val signing things that only differ by timestamp -- [mempool] fix memory leak causing zombie peers -- [p2p/conn] fix potential deadlock - -## 0.15.0 (December 29, 2017) - -BREAKING CHANGES: -- [p2p] enable the Peer Exchange reactor by default -- [types] add Timestamp field to Proposal/Vote -- [types] add new fields to Header: TotalTxs, ConsensusParamsHash, LastResultsHash, EvidenceHash -- [types] add Evidence to Block -- [types] simplify ValidateBasic -- [state] updates to support changes to the header -- [state] Enforce <1/3 of validator set can change at a time - -FEATURES: -- [state] Send indices of absent validators and addresses of byzantine validators in BeginBlock -- [state] Historical ConsensusParams and ABCIResponses -- [docs] Specification for the base Tendermint data structures. -- [evidence] New evidence reactor for gossiping and managing evidence -- [rpc] `/block_results?height=X` returns the DeliverTx results for a given height. - -IMPROVEMENTS: -- [consensus] Better handling of corrupt WAL file - -BUG FIXES: -- [lite] fix race -- [state] validate block.Header.ValidatorsHash -- [p2p] allow seed addresses to be prefixed with eg. `tcp://` -- [p2p] use consistent key to refer to peers so we dont try to connect to existing peers -- [cmd] fix `tendermint init` to ignore files that are there and generate files that aren't. - -## 0.14.0 (December 11, 2017) - -BREAKING CHANGES: -- consensus/wal: removed separator -- rpc/client: changed Subscribe/Unsubscribe/UnsubscribeAll funcs signatures to be identical to event bus. - -FEATURES: -- new `tendermint lite` command (and `lite/proxy` pkg) for running a light-client RPC proxy. - NOTE it is currently insecure and its APIs are not yet covered by semver - -IMPROVEMENTS: -- rpc/client: can act as event bus subscriber (See https://github.com/tendermint/tendermint/issues/945). -- p2p: use exponential backoff from seconds to hours when attempting to reconnect to persistent peer -- config: moniker defaults to the machine's hostname instead of "anonymous" - -BUG FIXES: -- p2p: no longer exit if one of the seed addresses is incorrect - -## 0.13.0 (December 6, 2017) - -BREAKING CHANGES: -- abci: update to v0.8 using gogo/protobuf; includes tx tags, vote info in RequestBeginBlock, data.Bytes everywhere, use int64, etc. -- types: block heights are now `int64` everywhere -- types & node: EventSwitch and EventCache have been replaced by EventBus and EventBuffer; event types have been overhauled -- node: EventSwitch methods now refer to EventBus -- rpc/lib/types: RPCResponse is no longer a pointer; WSRPCConnection interface has been modified -- rpc/client: WaitForOneEvent takes an EventsClient instead of types.EventSwitch -- rpc/client: Add/RemoveListenerForEvent are now Subscribe/Unsubscribe -- rpc/core/types: ResultABCIQuery wraps an abci.ResponseQuery -- rpc: `/subscribe` and `/unsubscribe` take `query` arg instead of `event` -- rpc: `/status` returns the LatestBlockTime in human readable form instead of in nanoseconds -- mempool: cached transactions return an error instead of an ABCI response with BadNonce - -FEATURES: -- rpc: new `/unsubscribe_all` WebSocket RPC endpoint -- rpc: new `/tx_search` endpoint for filtering transactions by more complex queries -- p2p/trust: new trust metric for tracking peers. See ADR-006 -- config: TxIndexConfig allows to set what DeliverTx tags to index - -IMPROVEMENTS: -- New asynchronous events system using `tmlibs/pubsub` -- logging: Various small improvements -- consensus: Graceful shutdown when app crashes -- tests: Fix various non-deterministic errors -- p2p: more defensive programming - -BUG FIXES: -- consensus: fix panic where prs.ProposalBlockParts is not initialized -- p2p: fix panic on bad channel - -## 0.12.1 (November 27, 2017) - -BUG FIXES: -- upgrade tmlibs dependency to enable Windows builds for Tendermint - -## 0.12.0 (October 27, 2017) - -BREAKING CHANGES: - - rpc/client: websocket ResultsCh and ErrorsCh unified in ResponsesCh. - - rpc/client: ABCIQuery no longer takes `prove` - - state: remove GenesisDoc from state. - - consensus: new binary WAL format provides efficiency and uses checksums to detect corruption - - use scripts/wal2json to convert to json for debugging - -FEATURES: - - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! - - rpc: `/genesis` includes the `app_options` . - - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. - - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. - -IMPROVEMENTS: - - rpc: `/genesis` result includes `app_options` - - rpc/lib/client: add jitter to reconnects. - - rpc/lib/types: `RPCError` satisfies the `error` interface. - -BUG FIXES: - - rpc/client: fix ws deadlock after stopping - - blockchain: fix panic on AddBlock when peer is nil - - mempool: fix sending on TxsAvailable when a tx has been invalidated - - consensus: dont run WAL catchup if we fast synced - -## 0.11.1 (October 10, 2017) - -IMPROVEMENTS: - - blockchain/reactor: respondWithNoResponseMessage for missing height - -BUG FIXES: - - rpc: fixed client WebSocket timeout - - rpc: client now resubscribes on reconnection - - rpc: fix panics on missing params - - rpc: fix `/dump_consensus_state` to have normal json output (NOTE: technically breaking, but worth a bug fix label) - - types: fixed out of range error in VoteSet.addVote - - consensus: fix wal autofile via https://github.com/tendermint/tmlibs/blob/master/CHANGELOG.md#032-october-2-2017 - -## 0.11.0 (September 22, 2017) - -BREAKING: - - genesis file: validator `amount` is now `power` - - abci: Info, BeginBlock, InitChain all take structs - - rpc: various changes to match JSONRPC spec (http://www.jsonrpc.org/specification), including breaking ones: - - requests that previously returned HTTP code 4XX now return 200 with an error code in the JSONRPC. - - `rpctypes.RPCResponse` uses new `RPCError` type instead of `string`. - - - cmd: if there is no genesis, exit immediately instead of waiting around for one to show. - - types: `Signer.Sign` returns an error. - - state: every validator set change is persisted to disk, which required some changes to the `State` structure. - - p2p: new `p2p.Peer` interface used for all reactor methods (instead of `*p2p.Peer` struct). - -FEATURES: - - rpc: `/validators?height=X` allows querying of validators at previous heights. - - rpc: Leaving the `height` param empty for `/block`, `/validators`, and `/commit` will return the value for the latest height. - -IMPROVEMENTS: - - docs: Moved all docs from the website and tools repo in, converted to `.rst`, and cleaned up for presentation on `tendermint.readthedocs.io` - -BUG FIXES: - - fix WAL openning issue on Windows - -## 0.10.4 (September 5, 2017) - -IMPROVEMENTS: -- docs: Added Slate docs to each rpc function (see rpc/core) -- docs: Ported all website docs to Read The Docs -- config: expose some p2p params to tweak performance: RecvRate, SendRate, and MaxMsgPacketPayloadSize -- rpc: Upgrade the websocket client and server, including improved auto reconnect, and proper ping/pong - -BUG FIXES: -- consensus: fix panic on getVoteBitArray -- consensus: hang instead of panicking on byzantine consensus failures -- cmd: dont load config for version command - -## 0.10.3 (August 10, 2017) - -FEATURES: -- control over empty block production: - - new flag, `--consensus.create_empty_blocks`; when set to false, blocks are only created when there are txs or when the AppHash changes. - - new config option, `consensus.create_empty_blocks_interval`; an empty block is created after this many seconds. - - in normal operation, `create_empty_blocks = true` and `create_empty_blocks_interval = 0`, so blocks are being created all the time (as in all previous versions of tendermint). The number of empty blocks can be reduced by increasing `create_empty_blocks_interval` or by setting `create_empty_blocks = false`. - - new `TxsAvailable()` method added to Mempool that returns a channel which fires when txs are available. - - new heartbeat message added to consensus reactor to notify peers that a node is waiting for txs before entering propose step. -- rpc: Add `syncing` field to response returned by `/status`. Is `true` while in fast-sync mode. - -IMPROVEMENTS: -- various improvements to documentation and code comments - -BUG FIXES: -- mempool: pass height into constructor so it doesn't always start at 0 - -## 0.10.2 (July 10, 2017) - -FEATURES: -- Enable lower latency block commits by adding consensus reactor sleep durations and p2p flush throttle timeout to the config - -IMPROVEMENTS: -- More detailed logging in the consensus reactor and state machine -- More in-code documentation for many exposed functions, especially in consensus/reactor.go and p2p/switch.go -- Improved readability for some function definitions and code blocks with long lines - -## 0.10.1 (June 28, 2017) - -FEATURES: -- Use `--trace` to get stack traces for logged errors -- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set -- types: GenesisDocFromFile parses a GenesiDoc from a JSON file - -IMPROVEMENTS: -- Add a Code of Conduct -- Variety of improvements as suggested by `megacheck` tool -- rpc: deduplicate tests between rpc/client and rpc/tests -- rpc: addresses without a protocol prefix default to `tcp://`. `http://` is also accepted as an alias for `tcp://` -- cmd: commands are more easily reuseable from other tools -- DOCKER: automate build/push - -BUG FIXES: -- Fix log statements using keys with spaces (logger does not currently support spaces) -- rpc: set logger on websocket connection -- rpc: fix ws connection stability by setting write deadline on pings - -## 0.10.0 (June 2, 2017) - -Includes major updates to configuration, logging, and json serialization. -Also includes the Grand Repo-Merge of 2017. - -BREAKING CHANGES: - -- Config and Flags: - - The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11), -containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig` - - This affects the following flags: - - `--seeds` is now `--p2p.seeds` - - `--node_laddr` is now `--p2p.laddr` - - `--pex` is now `--p2p.pex` - - `--skip_upnp` is now `--p2p.skip_upnp` - - `--rpc_laddr` is now `--rpc.laddr` - - `--grpc_laddr` is now `--rpc.grpc_laddr` - - Any configuration option now within a substract must come under that heading in the `config.toml`, for instance: - ``` - [p2p] - laddr="tcp://1.2.3.4:46656" - - [consensus] - timeout_propose=1000 - ``` - - Use viper and `DefaultConfig() / TestConfig()` functions to handle defaults, and remove `config/tendermint` and `config/tendermint_test` - - Change some function and method signatures to - - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config - -- Logger - - Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. -See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details - - Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!) - - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger - -- JSON serialization: - - Replace `[TypeByte, Xxx]` with `{"type": "some-type", "data": Xxx}` in RPC and all `.json` files by using `go-wire/data`. For instance, a public key is now: - ``` - "pub_key": { - "type": "ed25519", - "data": "83DDF8775937A4A12A2704269E2729FCFCD491B933C4B0A7FFE37FE41D7760D0" - } - ``` - - Remove type information about RPC responses, so `[TypeByte, {"jsonrpc": "2.0", ... }]` is now just `{"jsonrpc": "2.0", ... }` - - Change `[]byte` to `data.Bytes` in all serialized types (for hex encoding) - - Lowercase the JSON tags in `ValidatorSet` fields - - Introduce `EventDataInner` for serializing events - -- Other: - - Send InitChain message in handshake if `appBlockHeight == 0` - - Do not include the `Accum` field when computing the validator hash. This makes the ValidatorSetHash unique for a given validator set, rather than changing with every block (as the Accum changes) - - Unsafe RPC calls are not enabled by default. This includes `/dial_seeds`, and all calls prefixed with `unsafe`. Use the `--rpc.unsafe` flag to enable. - - -FEATURES: - -- Per-module log levels. For instance, the new default is `state:info,*:error`, which means the `state` package logs at `info` level, and everything else logs at `error` level -- Log if a node is validator or not in every consensus round -- Use ldflags to set git hash as part of the version -- Ignore `address` and `pub_key` fields in `priv_validator.json` and overwrite them with the values derrived from the `priv_key` - -IMPROVEMENTS: - -- Merge `tendermint/go-p2p -> tendermint/tendermint/p2p` and `tendermint/go-rpc -> tendermint/tendermint/rpc/lib` -- Update paths for grand repo merge: - - `go-common -> tmlibs/common` - - `go-data -> go-wire/data` - - All other `go-` libs, except `go-crypto` and `go-wire`, are merged under `tmlibs` -- No global loggers (loggers are passed into constructors, or preferably set with a SetLogger method) -- Return HTTP status codes with errors for RPC responses -- Limit `/blockchain_info` call to return a maximum of 20 blocks -- Use `.Wrap()` and `.Unwrap()` instead of eg. `PubKeyS` for `go-crypto` types -- RPC JSON responses use pretty printing (via `json.MarshalIndent`) -- Color code different instances of the consensus for tests -- Isolate viper to `cmd/tendermint/commands` and do not read config from file for tests - - -## 0.9.2 (April 26, 2017) - -BUG FIXES: - -- Fix bug in `ResetPrivValidator` where we were using the global config and log (causing external consumers, eg. basecoin, to fail). - -## 0.9.1 (April 21, 2017) - -FEATURES: - -- Transaction indexing - txs are indexed by their hash using a simple key-value store; easily extended to more advanced indexers -- New `/tx?hash=X` endpoint to query for transactions and their DeliverTx result by hash. Optionally returns a proof of the tx's inclusion in the block -- `tendermint testnet` command initializes files for a testnet - -IMPROVEMENTS: - -- CLI now uses Cobra framework -- TMROOT is now TMHOME (TMROOT will stop working in 0.10.0) -- `/broadcast_tx_XXX` also returns the Hash (can be used to query for the tx) -- `/broadcast_tx_commit` also returns the height the block was committed in -- ABCIResponses struct persisted to disk before calling Commit; makes handshake replay much cleaner -- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0) -- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks - -BUG FIXES: - -- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later -- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save() - -## 0.9.0 (March 6, 2017) - -BREAKING CHANGES: - -- Update ABCI to v0.4.0, where Query is now `Query(RequestQuery) ResponseQuery`, enabling precise proofs at particular heights: - -``` -message RequestQuery{ - bytes data = 1; - string path = 2; - uint64 height = 3; - bool prove = 4; -} - -message ResponseQuery{ - CodeType code = 1; - int64 index = 2; - bytes key = 3; - bytes value = 4; - bytes proof = 5; - uint64 height = 6; - string log = 7; -} -``` - - -- `BlockMeta` data type unifies its Hash and PartSetHash under a `BlockID`: - -``` -type BlockMeta struct { - BlockID BlockID `json:"block_id"` // the block hash and partsethash - Header *Header `json:"header"` // The block's Header -} -``` - -- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes. - -- `tendermint gen_validator` command output is now pure JSON - -FEATURES: - -- New RPC endpoint `/commit?height=X` returns header and commit for block at height `X` -- Client API for each endpoint, including mocks for testing - -IMPROVEMENTS: - -- `Node` is now a `BaseService` -- Simplified starting Tendermint in-process from another application -- Better organized Makefile -- Scripts for auto-building binaries across platforms -- Docker image improved, slimmed down (using Alpine), and changed from tendermint/tmbase to tendermint/tendermint -- New repo files: `CONTRIBUTING.md`, Github `ISSUE_TEMPLATE`, `CHANGELOG.md` -- Improvements on CircleCI for managing build/test artifacts -- Handshake replay is doen through the consensus package, possibly using a mockApp -- Graceful shutdown of RPC listeners -- Tests for the PEX reactor and DialSeeds - -BUG FIXES: - -- Check peer.Send for failure before updating PeerState in consensus -- Fix panic in `/dial_seeds` with invalid addresses -- Fix proposer selection logic in ValidatorSet by taking the address into account in the `accumComparable` -- Fix inconcistencies with `ValidatorSet.Proposer` across restarts by persisting it in the `State` - - -## 0.8.0 (January 13, 2017) - -BREAKING CHANGES: - -- New data type `BlockID` to represent blocks: - -``` -type BlockID struct { - Hash []byte `json:"hash"` - PartsHeader PartSetHeader `json:"parts"` -} -``` - -- `Vote` data type now includes validator address and index: - -``` -type Vote struct { - ValidatorAddress []byte `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Height int `json:"height"` - Round int `json:"round"` - Type byte `json:"type"` - BlockID BlockID `json:"block_id"` // zero if vote is nil. - Signature crypto.Signature `json:"signature"` -} -``` - -- Update TMSP to v0.3.0, where it is now called ABCI and AppendTx is DeliverTx -- Hex strings in the RPC are now "0x" prefixed - - -FEATURES: - -- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23, -in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts: - -``` -type VoteSetMaj23Message struct { - Height int - Round int - Type byte - BlockID types.BlockID -} -``` - -- Configurable block part set size -- Validator set changes -- Optionally skip TimeoutCommit if we have all the votes -- Handshake between Tendermint and App on startup to sync latest state and ensure consistent recovery from crashes -- GRPC server for BroadcastTx endpoint - -IMPROVEMENTS: - -- Less verbose logging -- Better test coverage (37% -> 49%) -- Canonical SignBytes for signable types -- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile -- Better in-process testing for the consensus reactor and byzantine faults -- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points -- Better abstraction over timeout mechanics - -BUG FIXES: - -- Fix memory leak in mempool peer -- Fix panic on POLRound=-1 -- Actually set the CommitTime -- Actually send BeginBlock message -- Fix a liveness issues caused by Byzantine proposals/votes. Uses the new `Maj23Msg`. - - -## 0.7.4 (December 14, 2016) - -FEATURES: - -- Enable the Peer Exchange reactor with the `--pex` flag for more resilient gossip network (feature still in development, beware dragons) - -IMPROVEMENTS: - -- Remove restrictions on RPC endpoint `/dial_seeds` to enable manual network configuration - -## 0.7.3 (October 20, 2016) - -IMPROVEMENTS: - -- Type safe FireEvent -- More WAL/replay tests -- Cleanup some docs - -BUG FIXES: - -- Fix deadlock in mempool for synchronous apps -- Replay handles non-empty blocks -- Fix race condition in HeightVoteSet - -## 0.7.2 (September 11, 2016) - -BUG FIXES: - -- Set mustConnect=false so tendermint will retry connecting to the app - -## 0.7.1 (September 10, 2016) - -FEATURES: - -- New TMSP connection for Query/Info -- New RPC endpoints: - - `tmsp_query` - - `tmsp_info` -- Allow application to filter peers through Query (off by default) - -IMPROVEMENTS: - -- TMSP connection type enforced at compile time -- All listen/client urls use a "tcp://" or "unix://" prefix - -BUG FIXES: - -- Save LastSignature/LastSignBytes to `priv_validator.json` for recovery -- Fix event unsubscribe -- Fix fastsync/blockchain reactor - -## 0.7.0 (August 7, 2016) - -BREAKING CHANGES: - -- Strict SemVer starting now! -- Update to ABCI v0.2.0 -- Validation types now called Commit -- NewBlock event only returns the block header - - -FEATURES: - -- TMSP and RPC support TCP and UNIX sockets -- Addition config options including block size and consensus parameters -- New WAL mode `cswal_light`; logs only the validator's own votes -- New RPC endpoints: - - for starting/stopping profilers, and for updating config - - `/broadcast_tx_commit`, returns when tx is included in a block, else an error - - `/unsafe_flush_mempool`, empties the mempool - - -IMPROVEMENTS: - -- Various optimizations -- Remove bad or invalidated transactions from the mempool cache (allows later duplicates) -- More elaborate testing using CircleCI including benchmarking throughput on 4 digitalocean droplets - -BUG FIXES: - -- Various fixes to WAL and replay logic -- Various race conditions - -## PreHistory - -Strict versioning only began with the release of v0.7.0, in late summer 2016. -The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year. -Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries), -many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine. -That implementation now forms the heart of [Burrow](https://github.com/hyperledger/burrow). -In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation. - -By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the -invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP). -The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine -driving an application running in another process. -The ABCI interface and implementation were iterated on and improved over the course of 2016, -until versioned history kicked in with v0.7.0. diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index e2fadfadc..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,429 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/beorn7/perks" - packages = ["quantile"] - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - branch = "master" - name = "github.com/btcsuite/btcd" - packages = ["btcec"] - revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64" - -[[projects]] - branch = "master" - name = "github.com/btcsuite/btcutil" - packages = ["base58"] - revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/ebuchman/fail-test" - packages = ["."] - revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" - -[[projects]] - branch = "master" - name = "github.com/fortytw2/leaktest" - packages = ["."] - revision = "b008db64ef8daabb22ff6daa557f33b41d8f6ccd" - -[[projects]] - name = "github.com/fsnotify/fsnotify" - packages = ["."] - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - -[[projects]] - name = "github.com/go-kit/kit" - packages = [ - "log", - "log/level", - "log/term", - "metrics", - "metrics/discard", - "metrics/internal/lv", - "metrics/prometheus" - ] - revision = "4dc7be5d2d12881735283bcab7352178e190fc71" - version = "v0.6.0" - -[[projects]] - name = "github.com/go-logfmt/logfmt" - packages = ["."] - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" - -[[projects]] - name = "github.com/go-stack/stack" - packages = ["."] - revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" - version = "v1.7.0" - -[[projects]] - name = "github.com/gogo/protobuf" - packages = [ - "gogoproto", - "jsonpb", - "proto", - "protoc-gen-gogo/descriptor", - "sortkeys", - "types" - ] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" - -[[projects]] - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp" - ] - revision = "925541529c1fa6821df4e44ce2723319eb2be768" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - name = "github.com/gorilla/websocket" - packages = ["."] - revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" - version = "v1.2.0" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/printer", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token" - ] - revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" - -[[projects]] - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - branch = "master" - name = "github.com/jmhodges/levigo" - packages = ["."] - revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" - -[[projects]] - branch = "master" - name = "github.com/kr/logfmt" - packages = ["."] - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - name = "github.com/magiconair/properties" - packages = ["."] - revision = "c2353362d570a7bfa228149c62842019201cfb71" - version = "v1.8.0" - -[[projects]] - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" - -[[projects]] - name = "github.com/pelletier/go-toml" - packages = ["."] - revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" - version = "v1.2.0" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp" - ] - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - name = "github.com/prometheus/client_model" - packages = ["go"] - revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" - -[[projects]] - branch = "master" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model" - ] - revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" - -[[projects]] - branch = "master" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs" - ] - revision = "40f013a808ec4fa79def444a1a56de4d1727efcb" - -[[projects]] - branch = "master" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - name = "github.com/spf13/afero" - packages = [ - ".", - "mem" - ] - revision = "787d034dfe70e44075ccc060d346146ef53270ad" - version = "v1.1.1" - -[[projects]] - name = "github.com/spf13/cast" - packages = ["."] - revision = "8965335b8c7107321228e3e3702cab9832751bac" - version = "v1.2.0" - -[[projects]] - name = "github.com/spf13/cobra" - packages = ["."] - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" - -[[projects]] - branch = "master" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" - -[[projects]] - name = "github.com/spf13/pflag" - packages = ["."] - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" - -[[projects]] - name = "github.com/spf13/viper" - packages = ["."] - revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" - version = "v1.0.2" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require" - ] - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - branch = "master" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util" - ] - revision = "0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697" - -[[projects]] - branch = "master" - name = "github.com/tendermint/ed25519" - packages = [ - ".", - "edwards25519", - "extra25519" - ] - revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" - -[[projects]] - name = "github.com/tendermint/go-amino" - packages = ["."] - revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" - version = "0.10.1" - -[[projects]] - name = "github.com/tendermint/tmlibs" - packages = [ - "autofile", - "cli", - "cli/flags", - "clist", - "common", - "db", - "flowrate", - "log", - "merkle", - "merkle/tmhash", - "test" - ] - revision = "49596e0a1f48866603813df843c9409fc19805c6" - version = "v0.9.0" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - "chacha20poly1305", - "curve25519", - "hkdf", - "internal/chacha20", - "internal/subtle", - "nacl/box", - "nacl/secretbox", - "openpgp/armor", - "openpgp/errors", - "poly1305", - "ripemd160", - "salsa20/salsa" - ] - revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "netutil", - "trace" - ] - revision = "4cb1c02c05b0e749b0365f61ae859a8e0cfceed9" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = [ - "cpu", - "unix" - ] - revision = "7138fd3d9dc8335c567ca206f4333fb75eb05d56" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" - -[[projects]] - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "codes", - "connectivity", - "credentials", - "grpclb/grpc_lb_v1/messages", - "grpclog", - "internal", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "stats", - "status", - "tap", - "transport" - ] - revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - version = "v1.7.5" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "c25289282b94abc7f0c390e592e5e1636b7f26cb4773863ac39cde7fdc7b5bdf" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 18e2767a9..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,99 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/ebuchman/fail-test" - branch = "master" - -[[constraint]] - name = "github.com/fortytw2/leaktest" - branch = "master" - -[[constraint]] - name = "github.com/go-kit/kit" - version = "~0.6.0" - -[[constraint]] - name = "github.com/gogo/protobuf" - version = "~1.0.0" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "~1.0.0" - -[[constraint]] - name = "github.com/gorilla/websocket" - version = "~1.2.0" - -[[constraint]] - name = "github.com/pkg/errors" - version = "~0.8.0" - -[[constraint]] - name = "github.com/rcrowley/go-metrics" - branch = "master" - -[[constraint]] - name = "github.com/spf13/cobra" - version = "~0.0.1" - -[[constraint]] - name = "github.com/spf13/viper" - version = "~1.0.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "~1.2.1" - -[[constraint]] - name = "github.com/tendermint/go-amino" - version = "~0.10.1" - -[[override]] - name = "github.com/tendermint/tmlibs" - version = "~0.9.0" - -[[constraint]] - name = "google.golang.org/grpc" - version = "~1.7.3" - -# this got updated and broke, so locked to an old working commit ... -[[override]] - name = "google.golang.org/genproto" - revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" - -[prune] - go-tests = true - unused-packages = true - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.8.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" diff --git a/LICENSE b/LICENSE deleted file mode 100644 index bb66bb350..000000000 --- a/LICENSE +++ /dev/null @@ -1,204 +0,0 @@ -Tendermint Core -License: Apache2.0 - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 All in Bits, Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/Makefile deleted file mode 100644 index 079c58f90..000000000 --- a/Makefile +++ /dev/null @@ -1,236 +0,0 @@ -GOTOOLS = \ - github.com/golang/dep/cmd/dep \ - gopkg.in/alecthomas/gometalinter.v2 -PACKAGES=$(shell go list ./... | grep -v '/vendor/') -BUILD_TAGS?=tendermint -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" - -all: check build test install - -check: check_tools ensure_deps - - -######################################## -### Build - -build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ - -build_race: - CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint - -install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint - -######################################## -### Distribution - -# dist builds binaries for all platforms and packages them for distribution -dist: - @BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" - -######################################## -### Tools & dependencies - -check_tools: - @# https://stackoverflow.com/a/25668869 - @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\ - $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" - -get_tools: - @echo "--> Installing tools" - go get -u -v $(GOTOOLS) - @gometalinter.v2 --install - -update_tools: - @echo "--> Updating tools" - @go get -u $(GOTOOLS) - -#Run this from CI -get_vendor_deps: - @rm -rf vendor/ - @echo "--> Running dep" - @dep ensure -vendor-only - - -#Run this locally. -ensure_deps: - @rm -rf vendor/ - @echo "--> Running dep" - @dep ensure - -draw_deps: - @# requires brew install graphviz or apt-get install graphviz - go get github.com/RobotsAndPencils/goviz - @goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png - -get_deps_bin_size: - @# Copy of build recipe with additional flags to perform binary size analysis - $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1)) - @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log - @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" - -######################################## -### Testing - -## required to be run first by most tests -build_docker_test_image: - docker build -t tester -f ./test/docker/Dockerfile . - -### coverage, app, persistence, and libs tests -test_cover: - # run the go unit tests with coverage - bash test/test_cover.sh - -test_apps: - # run the app tests using bash - # requires `abci-cli` and `tendermint` binaries installed - bash test/app/test.sh - -test_persistence: - # run the persistence tests using bash - # requires `abci-cli` installed - docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh - - # TODO undockerize - # bash test/persist/test_failure_indices.sh - -test_p2p: - docker rm -f rsyslog || true - rm -rf test/logs || true - mkdir test/logs - cd test/ - docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog - cd .. - # requires 'tester' the image from above - bash test/p2p/test.sh tester - -need_abci: - bash scripts/install_abci_apps.sh - -test_integrations: - make build_docker_test_image - make get_tools - make get_vendor_deps - make install - make need_abci - make test_cover - make test_apps - make test_persistence - make test_p2p - -test_release: - @go test -tags release $(PACKAGES) - -test100: - @for i in {1..100}; do make test; done - -vagrant_test: - vagrant up - vagrant ssh -c 'make test_integrations' - -### go tests -test: - @echo "--> Running go test" - @go test $(PACKAGES) - -test_race: - @echo "--> Running go test --race" - @go test -v -race $(PACKAGES) - - -######################################## -### Formatting, linting, and vetting - -fmt: - @go fmt ./... - -metalinter: - @echo "--> Running linter" - @gometalinter.v2 --vendor --deadline=600s --disable-all \ - --enable=deadcode \ - --enable=gosimple \ - --enable=misspell \ - --enable=safesql \ - ./... - #--enable=gas \ - #--enable=maligned \ - #--enable=dupl \ - #--enable=errcheck \ - #--enable=goconst \ - #--enable=gocyclo \ - #--enable=goimports \ - #--enable=golint \ <== comments on anything exported - #--enable=gotype \ - #--enable=ineffassign \ - #--enable=interfacer \ - #--enable=megacheck \ - #--enable=staticcheck \ - #--enable=structcheck \ - #--enable=unconvert \ - #--enable=unparam \ - #--enable=unused \ - #--enable=varcheck \ - #--enable=vet \ - #--enable=vetshadow \ - -metalinter_all: - @echo "--> Running linter (all)" - gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... - -########################################################### -### Docker image - -build-docker: - cp build/tendermint DOCKER/tendermint - docker build --label=tendermint --tag="tendermint/tendermint" DOCKER - rm -rf DOCKER/tendermint - -########################################################### -### Local testnet using docker - -# Build linux binary on other platforms -build-linux: - GOOS=linux GOARCH=amd64 $(MAKE) build - -build-docker-localnode: - cd networks/local - make - -# Run a 4-node testnet locally -localnet-start: localnet-stop - @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi - docker-compose up - -# Stop testnet -localnet-stop: - docker-compose down - -########################################################### -### Remote full-nodes (sentry) using terraform and ansible - -# Server management -sentry-start: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" - @if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi - cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml - @echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)" - -# Configuration management -sentry-config: - cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build - -sentry-stop: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" - -# meant for the CI, inspect script & adapt accordingly -build-slate: - bash scripts/slate.sh - -# To avoid unintended conflicts with file names, always add to .PHONY -# unless there is a reason not to. -# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate diff --git a/README.md b/README.md deleted file mode 100644 index daba4f59a..000000000 --- a/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Tendermint - -[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance) -[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication). -Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short. - -[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) -[![API Reference]( -https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://godoc.org/github.com/tendermint/tendermint) -[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm) -[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org) -[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) -[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) - - -Branch | Tests | Coverage -----------|-------|---------- -master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) -develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) - -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - -and securely replicates it on many machines. - -For protocol details, see [the specification](/docs/spec). - -## A Note on Production Readiness - -While Tendermint is being used in production in private, permissioned -environments, we are still working actively to harden and audit it in preparation -for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/). -We are also still making breaking changes to the protocol and the APIs. -Thus we tag the releases as *alpha software*. - -In any case, if you intend to run Tendermint in production, -please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :) - -## Security - -To report a security vulnerability, see our [bug bounty -program](https://tendermint.com/security). - -For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md) - -## Minimum requirements - -Requirement|Notes ----|--- -Go version | Go1.9 or higher - -## Install - -See the [install instructions](/docs/install.rst) - -## Quick Start - -- [Single node](/docs/using-tendermint.rst) -- [Local cluster using docker-compose](/networks/local) -- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.md) -- [Join the public testnet](https://cosmos.network/testnet) - -## Resources - -### Tendermint Core - -For details about the blockchain data structures and the p2p protocols, see the -the [Tendermint specification](/docs/spec). - -For details on using the software, [Read The Docs](https://tendermint.readthedocs.io/en/master/). -Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs. - - -### Sub-projects - -* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface -* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library -* [Go-Crypto](http://github.com/tendermint/tendermint/crypto), an elliptic curve cryptography library -* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally -* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation - -### Tools -* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools) - -### Applications - -* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework -* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint -* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications) - -### More - -* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) -* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf) -* [Tendermint Blog](https://blog.cosmos.network/tendermint/home) -* [Cosmos Blog](https://blog.cosmos.network) - -## Contributing - -Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md). - -## Versioning - -### SemVer - -Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes. -According to SemVer, anything in the public API can change at any time before version 1.0.0 - -To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used -to signal breaking changes across a subset of the total public API. This subset includes all -interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not -include the in-process Go APIs. - -That said, breaking changes in the following packages will be documented in the -CHANGELOG even if they don't lead to MINOR version bumps: - -- types -- rpc/client -- config -- node - -Exported objects in these packages that are not covered by the versioning scheme -are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time. -Functions, types, and values in any other package may also change at any time. - -### Upgrades - -In an effort to avoid accumulating technical debt prior to 1.0.0, -we do not guarantee that breaking changes (ie. bumps in the MINOR version) -will work with existing tendermint blockchains. In these cases you will -have to start a new blockchain, or write something custom to get the old -data into the new chain. - -However, any bump in the PATCH version should be compatible with existing histories -(if not please open an [issue](https://github.com/tendermint/tendermint/issues)). - -## Code of Conduct - -Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md). diff --git a/version/version.go b/version/version.go deleted file mode 100644 index 9be4c9d82..000000000 --- a/version/version.go +++ /dev/null @@ -1,23 +0,0 @@ -package version - -// Version components -const ( - Maj = "0" - Min = "22" - Fix = "0" -) - -var ( - // Version is the current version of Tendermint - // Must be a string because scripts like dist.sh read this file. - Version = "0.22.0" - - // GitCommit is the current HEAD set using ldflags. - GitCommit string -) - -func init() { - if GitCommit != "" { - Version += "-" + GitCommit - } -} From ae3bf8183389476f737f6650285b1bcaedb92851 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:36:03 -0400 Subject: [PATCH 501/515] mv tmlibs files to libs dir --- .editorconfig => libs/.editorconfig | 0 .gitignore => libs/.gitignore | 0 CHANGELOG.md => libs/CHANGELOG.md | 0 CODEOWNERS => libs/CODEOWNERS | 0 Gopkg.lock => libs/Gopkg.lock | 0 Gopkg.toml => libs/Gopkg.toml | 0 LICENSE => libs/LICENSE | 0 Makefile => libs/Makefile | 0 README.md => libs/README.md | 0 {autofile => libs/autofile}/README.md | 0 {autofile => libs/autofile}/autofile.go | 0 {autofile => libs/autofile}/autofile_test.go | 0 {autofile => libs/autofile}/cmd/logjack.go | 0 {autofile => libs/autofile}/group.go | 0 {autofile => libs/autofile}/group_test.go | 0 {autofile => libs/autofile}/sighup_watcher.go | 0 {bech32 => libs/bech32}/bech32.go | 0 {bech32 => libs/bech32}/bech32_test.go | 0 circle.yml => libs/circle.yml | 0 {cli => libs/cli}/flags/log_level.go | 0 {cli => libs/cli}/flags/log_level_test.go | 0 {cli => libs/cli}/helper.go | 0 {cli => libs/cli}/setup.go | 0 {cli => libs/cli}/setup_test.go | 0 {clist => libs/clist}/clist.go | 0 {clist => libs/clist}/clist_test.go | 0 {common => libs/common}/LICENSE | 0 {common => libs/common}/array.go | 0 {common => libs/common}/async.go | 0 {common => libs/common}/async_test.go | 0 {common => libs/common}/bit_array.go | 0 {common => libs/common}/bit_array_test.go | 0 {common => libs/common}/bytes.go | 0 {common => libs/common}/bytes_test.go | 0 {common => libs/common}/byteslice.go | 0 {common => libs/common}/byteslice_test.go | 0 {common => libs/common}/cmap.go | 0 {common => libs/common}/cmap_test.go | 0 {common => libs/common}/colors.go | 0 {common => libs/common}/date.go | 0 {common => libs/common}/date_test.go | 0 {common => libs/common}/errors.go | 0 {common => libs/common}/errors_test.go | 0 {common => libs/common}/heap.go | 0 {common => libs/common}/int.go | 0 {common => libs/common}/int_test.go | 0 {common => libs/common}/io.go | 0 {common => libs/common}/kvpair.go | 0 {common => libs/common}/math.go | 0 {common => libs/common}/net.go | 0 {common => libs/common}/net_test.go | 0 {common => libs/common}/nil.go | 0 {common => libs/common}/os.go | 0 {common => libs/common}/os_test.go | 0 {common => libs/common}/random.go | 0 {common => libs/common}/random_test.go | 0 {common => libs/common}/repeat_timer.go | 0 {common => libs/common}/repeat_timer_test.go | 0 {common => libs/common}/service.go | 0 {common => libs/common}/service_test.go | 0 {common => libs/common}/string.go | 0 {common => libs/common}/string_test.go | 0 {common => libs/common}/throttle_timer.go | 0 {common => libs/common}/throttle_timer_test.go | 0 {common => libs/common}/types.pb.go | 0 {common => libs/common}/types.proto | 0 {common => libs/common}/word.go | 0 {db => libs/db}/LICENSE.md | 0 {db => libs/db}/README.md | 0 {db => libs/db}/backend_test.go | 0 {db => libs/db}/c_level_db.go | 0 {db => libs/db}/c_level_db_test.go | 0 {db => libs/db}/common_test.go | 0 {db => libs/db}/db.go | 0 {db => libs/db}/db_test.go | 0 {db => libs/db}/debug_db.go | 0 {db => libs/db}/fsdb.go | 0 {db => libs/db}/go_level_db.go | 0 {db => libs/db}/go_level_db_test.go | 0 {db => libs/db}/mem_batch.go | 0 {db => libs/db}/mem_db.go | 0 {db => libs/db}/prefix_db.go | 0 {db => libs/db}/prefix_db_test.go | 0 {db => libs/db}/remotedb/doc.go | 0 {db => libs/db}/remotedb/grpcdb/client.go | 0 {db => libs/db}/remotedb/grpcdb/doc.go | 0 {db => libs/db}/remotedb/grpcdb/example_test.go | 0 {db => libs/db}/remotedb/grpcdb/server.go | 0 {db => libs/db}/remotedb/proto/defs.pb.go | 0 {db => libs/db}/remotedb/proto/defs.proto | 0 {db => libs/db}/remotedb/remotedb.go | 0 {db => libs/db}/remotedb/remotedb_test.go | 0 {db => libs/db}/types.go | 0 {db => libs/db}/util.go | 0 {db => libs/db}/util_test.go | 0 {flowrate => libs/flowrate}/README.md | 0 {flowrate => libs/flowrate}/flowrate.go | 0 {flowrate => libs/flowrate}/io.go | 0 {flowrate => libs/flowrate}/io_test.go | 0 {flowrate => libs/flowrate}/util.go | 0 glide.lock => libs/glide.lock | 0 glide.yaml => libs/glide.yaml | 0 {log => libs/log}/filter.go | 0 {log => libs/log}/filter_test.go | 0 {log => libs/log}/logger.go | 0 {log => libs/log}/nop_logger.go | 0 {log => libs/log}/testing_logger.go | 0 {log => libs/log}/tm_json_logger.go | 0 {log => libs/log}/tm_logger.go | 0 {log => libs/log}/tm_logger_test.go | 0 {log => libs/log}/tmfmt_logger.go | 0 {log => libs/log}/tmfmt_logger_test.go | 0 {log => libs/log}/tracing_logger.go | 0 {log => libs/log}/tracing_logger_test.go | 0 merge.sh => libs/merge.sh | 0 {merkle => libs/merkle}/README.md | 0 {merkle => libs/merkle}/simple_map.go | 0 {merkle => libs/merkle}/simple_map_test.go | 0 {merkle => libs/merkle}/simple_proof.go | 0 {merkle => libs/merkle}/simple_tree.go | 0 {merkle => libs/merkle}/simple_tree_test.go | 0 {merkle => libs/merkle}/tmhash/hash.go | 0 {merkle => libs/merkle}/tmhash/hash_test.go | 0 {merkle => libs/merkle}/types.go | 0 test.sh => libs/test.sh | 0 {test => libs/test}/README.md | 0 {test => libs/test}/app/clean.sh | 0 {test => libs/test}/app/counter_test.sh | 0 {test => libs/test}/app/grpc_client.go | 0 {test => libs/test}/app/kvstore_test.sh | 0 {test => libs/test}/app/test.sh | 0 {test => libs/test}/assert.go | 0 {test => libs/test}/docker/Dockerfile | 0 {test => libs/test}/docker/build.sh | 0 {test => libs/test}/mutate.go | 0 {test => libs/test}/p2p/README.md | 0 {test => libs/test}/p2p/atomic_broadcast/test.sh | 0 {test => libs/test}/p2p/basic/test.sh | 0 {test => libs/test}/p2p/circleci.sh | 0 {test => libs/test}/p2p/client.sh | 0 {test => libs/test}/p2p/data/mach1/core/config/genesis.json | 0 {test => libs/test}/p2p/data/mach1/core/config/node_key.json | 0 .../test}/p2p/data/mach1/core/config/priv_validator.json | 0 {test => libs/test}/p2p/data/mach2/core/config/genesis.json | 0 {test => libs/test}/p2p/data/mach2/core/config/node_key.json | 0 .../test}/p2p/data/mach2/core/config/priv_validator.json | 0 {test => libs/test}/p2p/data/mach3/core/config/genesis.json | 0 {test => libs/test}/p2p/data/mach3/core/config/node_key.json | 0 .../test}/p2p/data/mach3/core/config/priv_validator.json | 0 {test => libs/test}/p2p/data/mach4/core/config/genesis.json | 0 {test => libs/test}/p2p/data/mach4/core/config/node_key.json | 0 .../test}/p2p/data/mach4/core/config/priv_validator.json | 0 {test => libs/test}/p2p/fast_sync/check_peer.sh | 0 {test => libs/test}/p2p/fast_sync/test.sh | 0 {test => libs/test}/p2p/fast_sync/test_peer.sh | 0 {test => libs/test}/p2p/ip.sh | 0 {test => libs/test}/p2p/ip_plus_id.sh | 0 {test => libs/test}/p2p/kill_all/check_peers.sh | 0 {test => libs/test}/p2p/kill_all/test.sh | 0 {test => libs/test}/p2p/local_testnet_start.sh | 0 {test => libs/test}/p2p/local_testnet_stop.sh | 0 {test => libs/test}/p2p/peer.sh | 0 {test => libs/test}/p2p/persistent_peers.sh | 0 {test => libs/test}/p2p/pex/check_peer.sh | 0 {test => libs/test}/p2p/pex/dial_peers.sh | 0 {test => libs/test}/p2p/pex/test.sh | 0 {test => libs/test}/p2p/pex/test_addrbook.sh | 0 {test => libs/test}/p2p/pex/test_dial_peers.sh | 0 {test => libs/test}/p2p/test.sh | 0 {test => libs/test}/persist/test_failure_indices.sh | 0 {test => libs/test}/persist/test_simple.sh | 0 {test => libs/test}/persist/txs.sh | 0 {test => libs/test}/test_cover.sh | 0 {version => libs/version}/version.go | 0 174 files changed, 0 insertions(+), 0 deletions(-) rename .editorconfig => libs/.editorconfig (100%) rename .gitignore => libs/.gitignore (100%) rename CHANGELOG.md => libs/CHANGELOG.md (100%) rename CODEOWNERS => libs/CODEOWNERS (100%) rename Gopkg.lock => libs/Gopkg.lock (100%) rename Gopkg.toml => libs/Gopkg.toml (100%) rename LICENSE => libs/LICENSE (100%) rename Makefile => libs/Makefile (100%) rename README.md => libs/README.md (100%) rename {autofile => libs/autofile}/README.md (100%) rename {autofile => libs/autofile}/autofile.go (100%) rename {autofile => libs/autofile}/autofile_test.go (100%) rename {autofile => libs/autofile}/cmd/logjack.go (100%) rename {autofile => libs/autofile}/group.go (100%) rename {autofile => libs/autofile}/group_test.go (100%) rename {autofile => libs/autofile}/sighup_watcher.go (100%) rename {bech32 => libs/bech32}/bech32.go (100%) rename {bech32 => libs/bech32}/bech32_test.go (100%) rename circle.yml => libs/circle.yml (100%) rename {cli => libs/cli}/flags/log_level.go (100%) rename {cli => libs/cli}/flags/log_level_test.go (100%) rename {cli => libs/cli}/helper.go (100%) rename {cli => libs/cli}/setup.go (100%) rename {cli => libs/cli}/setup_test.go (100%) rename {clist => libs/clist}/clist.go (100%) rename {clist => libs/clist}/clist_test.go (100%) rename {common => libs/common}/LICENSE (100%) rename {common => libs/common}/array.go (100%) rename {common => libs/common}/async.go (100%) rename {common => libs/common}/async_test.go (100%) rename {common => libs/common}/bit_array.go (100%) rename {common => libs/common}/bit_array_test.go (100%) rename {common => libs/common}/bytes.go (100%) rename {common => libs/common}/bytes_test.go (100%) rename {common => libs/common}/byteslice.go (100%) rename {common => libs/common}/byteslice_test.go (100%) rename {common => libs/common}/cmap.go (100%) rename {common => libs/common}/cmap_test.go (100%) rename {common => libs/common}/colors.go (100%) rename {common => libs/common}/date.go (100%) rename {common => libs/common}/date_test.go (100%) rename {common => libs/common}/errors.go (100%) rename {common => libs/common}/errors_test.go (100%) rename {common => libs/common}/heap.go (100%) rename {common => libs/common}/int.go (100%) rename {common => libs/common}/int_test.go (100%) rename {common => libs/common}/io.go (100%) rename {common => libs/common}/kvpair.go (100%) rename {common => libs/common}/math.go (100%) rename {common => libs/common}/net.go (100%) rename {common => libs/common}/net_test.go (100%) rename {common => libs/common}/nil.go (100%) rename {common => libs/common}/os.go (100%) rename {common => libs/common}/os_test.go (100%) rename {common => libs/common}/random.go (100%) rename {common => libs/common}/random_test.go (100%) rename {common => libs/common}/repeat_timer.go (100%) rename {common => libs/common}/repeat_timer_test.go (100%) rename {common => libs/common}/service.go (100%) rename {common => libs/common}/service_test.go (100%) rename {common => libs/common}/string.go (100%) rename {common => libs/common}/string_test.go (100%) rename {common => libs/common}/throttle_timer.go (100%) rename {common => libs/common}/throttle_timer_test.go (100%) rename {common => libs/common}/types.pb.go (100%) rename {common => libs/common}/types.proto (100%) rename {common => libs/common}/word.go (100%) rename {db => libs/db}/LICENSE.md (100%) rename {db => libs/db}/README.md (100%) rename {db => libs/db}/backend_test.go (100%) rename {db => libs/db}/c_level_db.go (100%) rename {db => libs/db}/c_level_db_test.go (100%) rename {db => libs/db}/common_test.go (100%) rename {db => libs/db}/db.go (100%) rename {db => libs/db}/db_test.go (100%) rename {db => libs/db}/debug_db.go (100%) rename {db => libs/db}/fsdb.go (100%) rename {db => libs/db}/go_level_db.go (100%) rename {db => libs/db}/go_level_db_test.go (100%) rename {db => libs/db}/mem_batch.go (100%) rename {db => libs/db}/mem_db.go (100%) rename {db => libs/db}/prefix_db.go (100%) rename {db => libs/db}/prefix_db_test.go (100%) rename {db => libs/db}/remotedb/doc.go (100%) rename {db => libs/db}/remotedb/grpcdb/client.go (100%) rename {db => libs/db}/remotedb/grpcdb/doc.go (100%) rename {db => libs/db}/remotedb/grpcdb/example_test.go (100%) rename {db => libs/db}/remotedb/grpcdb/server.go (100%) rename {db => libs/db}/remotedb/proto/defs.pb.go (100%) rename {db => libs/db}/remotedb/proto/defs.proto (100%) rename {db => libs/db}/remotedb/remotedb.go (100%) rename {db => libs/db}/remotedb/remotedb_test.go (100%) rename {db => libs/db}/types.go (100%) rename {db => libs/db}/util.go (100%) rename {db => libs/db}/util_test.go (100%) rename {flowrate => libs/flowrate}/README.md (100%) rename {flowrate => libs/flowrate}/flowrate.go (100%) rename {flowrate => libs/flowrate}/io.go (100%) rename {flowrate => libs/flowrate}/io_test.go (100%) rename {flowrate => libs/flowrate}/util.go (100%) rename glide.lock => libs/glide.lock (100%) rename glide.yaml => libs/glide.yaml (100%) rename {log => libs/log}/filter.go (100%) rename {log => libs/log}/filter_test.go (100%) rename {log => libs/log}/logger.go (100%) rename {log => libs/log}/nop_logger.go (100%) rename {log => libs/log}/testing_logger.go (100%) rename {log => libs/log}/tm_json_logger.go (100%) rename {log => libs/log}/tm_logger.go (100%) rename {log => libs/log}/tm_logger_test.go (100%) rename {log => libs/log}/tmfmt_logger.go (100%) rename {log => libs/log}/tmfmt_logger_test.go (100%) rename {log => libs/log}/tracing_logger.go (100%) rename {log => libs/log}/tracing_logger_test.go (100%) rename merge.sh => libs/merge.sh (100%) rename {merkle => libs/merkle}/README.md (100%) rename {merkle => libs/merkle}/simple_map.go (100%) rename {merkle => libs/merkle}/simple_map_test.go (100%) rename {merkle => libs/merkle}/simple_proof.go (100%) rename {merkle => libs/merkle}/simple_tree.go (100%) rename {merkle => libs/merkle}/simple_tree_test.go (100%) rename {merkle => libs/merkle}/tmhash/hash.go (100%) rename {merkle => libs/merkle}/tmhash/hash_test.go (100%) rename {merkle => libs/merkle}/types.go (100%) rename test.sh => libs/test.sh (100%) rename {test => libs/test}/README.md (100%) rename {test => libs/test}/app/clean.sh (100%) rename {test => libs/test}/app/counter_test.sh (100%) rename {test => libs/test}/app/grpc_client.go (100%) rename {test => libs/test}/app/kvstore_test.sh (100%) rename {test => libs/test}/app/test.sh (100%) rename {test => libs/test}/assert.go (100%) rename {test => libs/test}/docker/Dockerfile (100%) rename {test => libs/test}/docker/build.sh (100%) rename {test => libs/test}/mutate.go (100%) rename {test => libs/test}/p2p/README.md (100%) rename {test => libs/test}/p2p/atomic_broadcast/test.sh (100%) rename {test => libs/test}/p2p/basic/test.sh (100%) rename {test => libs/test}/p2p/circleci.sh (100%) rename {test => libs/test}/p2p/client.sh (100%) rename {test => libs/test}/p2p/data/mach1/core/config/genesis.json (100%) rename {test => libs/test}/p2p/data/mach1/core/config/node_key.json (100%) rename {test => libs/test}/p2p/data/mach1/core/config/priv_validator.json (100%) rename {test => libs/test}/p2p/data/mach2/core/config/genesis.json (100%) rename {test => libs/test}/p2p/data/mach2/core/config/node_key.json (100%) rename {test => libs/test}/p2p/data/mach2/core/config/priv_validator.json (100%) rename {test => libs/test}/p2p/data/mach3/core/config/genesis.json (100%) rename {test => libs/test}/p2p/data/mach3/core/config/node_key.json (100%) rename {test => libs/test}/p2p/data/mach3/core/config/priv_validator.json (100%) rename {test => libs/test}/p2p/data/mach4/core/config/genesis.json (100%) rename {test => libs/test}/p2p/data/mach4/core/config/node_key.json (100%) rename {test => libs/test}/p2p/data/mach4/core/config/priv_validator.json (100%) rename {test => libs/test}/p2p/fast_sync/check_peer.sh (100%) rename {test => libs/test}/p2p/fast_sync/test.sh (100%) rename {test => libs/test}/p2p/fast_sync/test_peer.sh (100%) rename {test => libs/test}/p2p/ip.sh (100%) rename {test => libs/test}/p2p/ip_plus_id.sh (100%) rename {test => libs/test}/p2p/kill_all/check_peers.sh (100%) rename {test => libs/test}/p2p/kill_all/test.sh (100%) rename {test => libs/test}/p2p/local_testnet_start.sh (100%) rename {test => libs/test}/p2p/local_testnet_stop.sh (100%) rename {test => libs/test}/p2p/peer.sh (100%) rename {test => libs/test}/p2p/persistent_peers.sh (100%) rename {test => libs/test}/p2p/pex/check_peer.sh (100%) rename {test => libs/test}/p2p/pex/dial_peers.sh (100%) rename {test => libs/test}/p2p/pex/test.sh (100%) rename {test => libs/test}/p2p/pex/test_addrbook.sh (100%) rename {test => libs/test}/p2p/pex/test_dial_peers.sh (100%) rename {test => libs/test}/p2p/test.sh (100%) rename {test => libs/test}/persist/test_failure_indices.sh (100%) rename {test => libs/test}/persist/test_simple.sh (100%) rename {test => libs/test}/persist/txs.sh (100%) rename {test => libs/test}/test_cover.sh (100%) rename {version => libs/version}/version.go (100%) diff --git a/.editorconfig b/libs/.editorconfig similarity index 100% rename from .editorconfig rename to libs/.editorconfig diff --git a/.gitignore b/libs/.gitignore similarity index 100% rename from .gitignore rename to libs/.gitignore diff --git a/CHANGELOG.md b/libs/CHANGELOG.md similarity index 100% rename from CHANGELOG.md rename to libs/CHANGELOG.md diff --git a/CODEOWNERS b/libs/CODEOWNERS similarity index 100% rename from CODEOWNERS rename to libs/CODEOWNERS diff --git a/Gopkg.lock b/libs/Gopkg.lock similarity index 100% rename from Gopkg.lock rename to libs/Gopkg.lock diff --git a/Gopkg.toml b/libs/Gopkg.toml similarity index 100% rename from Gopkg.toml rename to libs/Gopkg.toml diff --git a/LICENSE b/libs/LICENSE similarity index 100% rename from LICENSE rename to libs/LICENSE diff --git a/Makefile b/libs/Makefile similarity index 100% rename from Makefile rename to libs/Makefile diff --git a/README.md b/libs/README.md similarity index 100% rename from README.md rename to libs/README.md diff --git a/autofile/README.md b/libs/autofile/README.md similarity index 100% rename from autofile/README.md rename to libs/autofile/README.md diff --git a/autofile/autofile.go b/libs/autofile/autofile.go similarity index 100% rename from autofile/autofile.go rename to libs/autofile/autofile.go diff --git a/autofile/autofile_test.go b/libs/autofile/autofile_test.go similarity index 100% rename from autofile/autofile_test.go rename to libs/autofile/autofile_test.go diff --git a/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go similarity index 100% rename from autofile/cmd/logjack.go rename to libs/autofile/cmd/logjack.go diff --git a/autofile/group.go b/libs/autofile/group.go similarity index 100% rename from autofile/group.go rename to libs/autofile/group.go diff --git a/autofile/group_test.go b/libs/autofile/group_test.go similarity index 100% rename from autofile/group_test.go rename to libs/autofile/group_test.go diff --git a/autofile/sighup_watcher.go b/libs/autofile/sighup_watcher.go similarity index 100% rename from autofile/sighup_watcher.go rename to libs/autofile/sighup_watcher.go diff --git a/bech32/bech32.go b/libs/bech32/bech32.go similarity index 100% rename from bech32/bech32.go rename to libs/bech32/bech32.go diff --git a/bech32/bech32_test.go b/libs/bech32/bech32_test.go similarity index 100% rename from bech32/bech32_test.go rename to libs/bech32/bech32_test.go diff --git a/circle.yml b/libs/circle.yml similarity index 100% rename from circle.yml rename to libs/circle.yml diff --git a/cli/flags/log_level.go b/libs/cli/flags/log_level.go similarity index 100% rename from cli/flags/log_level.go rename to libs/cli/flags/log_level.go diff --git a/cli/flags/log_level_test.go b/libs/cli/flags/log_level_test.go similarity index 100% rename from cli/flags/log_level_test.go rename to libs/cli/flags/log_level_test.go diff --git a/cli/helper.go b/libs/cli/helper.go similarity index 100% rename from cli/helper.go rename to libs/cli/helper.go diff --git a/cli/setup.go b/libs/cli/setup.go similarity index 100% rename from cli/setup.go rename to libs/cli/setup.go diff --git a/cli/setup_test.go b/libs/cli/setup_test.go similarity index 100% rename from cli/setup_test.go rename to libs/cli/setup_test.go diff --git a/clist/clist.go b/libs/clist/clist.go similarity index 100% rename from clist/clist.go rename to libs/clist/clist.go diff --git a/clist/clist_test.go b/libs/clist/clist_test.go similarity index 100% rename from clist/clist_test.go rename to libs/clist/clist_test.go diff --git a/common/LICENSE b/libs/common/LICENSE similarity index 100% rename from common/LICENSE rename to libs/common/LICENSE diff --git a/common/array.go b/libs/common/array.go similarity index 100% rename from common/array.go rename to libs/common/array.go diff --git a/common/async.go b/libs/common/async.go similarity index 100% rename from common/async.go rename to libs/common/async.go diff --git a/common/async_test.go b/libs/common/async_test.go similarity index 100% rename from common/async_test.go rename to libs/common/async_test.go diff --git a/common/bit_array.go b/libs/common/bit_array.go similarity index 100% rename from common/bit_array.go rename to libs/common/bit_array.go diff --git a/common/bit_array_test.go b/libs/common/bit_array_test.go similarity index 100% rename from common/bit_array_test.go rename to libs/common/bit_array_test.go diff --git a/common/bytes.go b/libs/common/bytes.go similarity index 100% rename from common/bytes.go rename to libs/common/bytes.go diff --git a/common/bytes_test.go b/libs/common/bytes_test.go similarity index 100% rename from common/bytes_test.go rename to libs/common/bytes_test.go diff --git a/common/byteslice.go b/libs/common/byteslice.go similarity index 100% rename from common/byteslice.go rename to libs/common/byteslice.go diff --git a/common/byteslice_test.go b/libs/common/byteslice_test.go similarity index 100% rename from common/byteslice_test.go rename to libs/common/byteslice_test.go diff --git a/common/cmap.go b/libs/common/cmap.go similarity index 100% rename from common/cmap.go rename to libs/common/cmap.go diff --git a/common/cmap_test.go b/libs/common/cmap_test.go similarity index 100% rename from common/cmap_test.go rename to libs/common/cmap_test.go diff --git a/common/colors.go b/libs/common/colors.go similarity index 100% rename from common/colors.go rename to libs/common/colors.go diff --git a/common/date.go b/libs/common/date.go similarity index 100% rename from common/date.go rename to libs/common/date.go diff --git a/common/date_test.go b/libs/common/date_test.go similarity index 100% rename from common/date_test.go rename to libs/common/date_test.go diff --git a/common/errors.go b/libs/common/errors.go similarity index 100% rename from common/errors.go rename to libs/common/errors.go diff --git a/common/errors_test.go b/libs/common/errors_test.go similarity index 100% rename from common/errors_test.go rename to libs/common/errors_test.go diff --git a/common/heap.go b/libs/common/heap.go similarity index 100% rename from common/heap.go rename to libs/common/heap.go diff --git a/common/int.go b/libs/common/int.go similarity index 100% rename from common/int.go rename to libs/common/int.go diff --git a/common/int_test.go b/libs/common/int_test.go similarity index 100% rename from common/int_test.go rename to libs/common/int_test.go diff --git a/common/io.go b/libs/common/io.go similarity index 100% rename from common/io.go rename to libs/common/io.go diff --git a/common/kvpair.go b/libs/common/kvpair.go similarity index 100% rename from common/kvpair.go rename to libs/common/kvpair.go diff --git a/common/math.go b/libs/common/math.go similarity index 100% rename from common/math.go rename to libs/common/math.go diff --git a/common/net.go b/libs/common/net.go similarity index 100% rename from common/net.go rename to libs/common/net.go diff --git a/common/net_test.go b/libs/common/net_test.go similarity index 100% rename from common/net_test.go rename to libs/common/net_test.go diff --git a/common/nil.go b/libs/common/nil.go similarity index 100% rename from common/nil.go rename to libs/common/nil.go diff --git a/common/os.go b/libs/common/os.go similarity index 100% rename from common/os.go rename to libs/common/os.go diff --git a/common/os_test.go b/libs/common/os_test.go similarity index 100% rename from common/os_test.go rename to libs/common/os_test.go diff --git a/common/random.go b/libs/common/random.go similarity index 100% rename from common/random.go rename to libs/common/random.go diff --git a/common/random_test.go b/libs/common/random_test.go similarity index 100% rename from common/random_test.go rename to libs/common/random_test.go diff --git a/common/repeat_timer.go b/libs/common/repeat_timer.go similarity index 100% rename from common/repeat_timer.go rename to libs/common/repeat_timer.go diff --git a/common/repeat_timer_test.go b/libs/common/repeat_timer_test.go similarity index 100% rename from common/repeat_timer_test.go rename to libs/common/repeat_timer_test.go diff --git a/common/service.go b/libs/common/service.go similarity index 100% rename from common/service.go rename to libs/common/service.go diff --git a/common/service_test.go b/libs/common/service_test.go similarity index 100% rename from common/service_test.go rename to libs/common/service_test.go diff --git a/common/string.go b/libs/common/string.go similarity index 100% rename from common/string.go rename to libs/common/string.go diff --git a/common/string_test.go b/libs/common/string_test.go similarity index 100% rename from common/string_test.go rename to libs/common/string_test.go diff --git a/common/throttle_timer.go b/libs/common/throttle_timer.go similarity index 100% rename from common/throttle_timer.go rename to libs/common/throttle_timer.go diff --git a/common/throttle_timer_test.go b/libs/common/throttle_timer_test.go similarity index 100% rename from common/throttle_timer_test.go rename to libs/common/throttle_timer_test.go diff --git a/common/types.pb.go b/libs/common/types.pb.go similarity index 100% rename from common/types.pb.go rename to libs/common/types.pb.go diff --git a/common/types.proto b/libs/common/types.proto similarity index 100% rename from common/types.proto rename to libs/common/types.proto diff --git a/common/word.go b/libs/common/word.go similarity index 100% rename from common/word.go rename to libs/common/word.go diff --git a/db/LICENSE.md b/libs/db/LICENSE.md similarity index 100% rename from db/LICENSE.md rename to libs/db/LICENSE.md diff --git a/db/README.md b/libs/db/README.md similarity index 100% rename from db/README.md rename to libs/db/README.md diff --git a/db/backend_test.go b/libs/db/backend_test.go similarity index 100% rename from db/backend_test.go rename to libs/db/backend_test.go diff --git a/db/c_level_db.go b/libs/db/c_level_db.go similarity index 100% rename from db/c_level_db.go rename to libs/db/c_level_db.go diff --git a/db/c_level_db_test.go b/libs/db/c_level_db_test.go similarity index 100% rename from db/c_level_db_test.go rename to libs/db/c_level_db_test.go diff --git a/db/common_test.go b/libs/db/common_test.go similarity index 100% rename from db/common_test.go rename to libs/db/common_test.go diff --git a/db/db.go b/libs/db/db.go similarity index 100% rename from db/db.go rename to libs/db/db.go diff --git a/db/db_test.go b/libs/db/db_test.go similarity index 100% rename from db/db_test.go rename to libs/db/db_test.go diff --git a/db/debug_db.go b/libs/db/debug_db.go similarity index 100% rename from db/debug_db.go rename to libs/db/debug_db.go diff --git a/db/fsdb.go b/libs/db/fsdb.go similarity index 100% rename from db/fsdb.go rename to libs/db/fsdb.go diff --git a/db/go_level_db.go b/libs/db/go_level_db.go similarity index 100% rename from db/go_level_db.go rename to libs/db/go_level_db.go diff --git a/db/go_level_db_test.go b/libs/db/go_level_db_test.go similarity index 100% rename from db/go_level_db_test.go rename to libs/db/go_level_db_test.go diff --git a/db/mem_batch.go b/libs/db/mem_batch.go similarity index 100% rename from db/mem_batch.go rename to libs/db/mem_batch.go diff --git a/db/mem_db.go b/libs/db/mem_db.go similarity index 100% rename from db/mem_db.go rename to libs/db/mem_db.go diff --git a/db/prefix_db.go b/libs/db/prefix_db.go similarity index 100% rename from db/prefix_db.go rename to libs/db/prefix_db.go diff --git a/db/prefix_db_test.go b/libs/db/prefix_db_test.go similarity index 100% rename from db/prefix_db_test.go rename to libs/db/prefix_db_test.go diff --git a/db/remotedb/doc.go b/libs/db/remotedb/doc.go similarity index 100% rename from db/remotedb/doc.go rename to libs/db/remotedb/doc.go diff --git a/db/remotedb/grpcdb/client.go b/libs/db/remotedb/grpcdb/client.go similarity index 100% rename from db/remotedb/grpcdb/client.go rename to libs/db/remotedb/grpcdb/client.go diff --git a/db/remotedb/grpcdb/doc.go b/libs/db/remotedb/grpcdb/doc.go similarity index 100% rename from db/remotedb/grpcdb/doc.go rename to libs/db/remotedb/grpcdb/doc.go diff --git a/db/remotedb/grpcdb/example_test.go b/libs/db/remotedb/grpcdb/example_test.go similarity index 100% rename from db/remotedb/grpcdb/example_test.go rename to libs/db/remotedb/grpcdb/example_test.go diff --git a/db/remotedb/grpcdb/server.go b/libs/db/remotedb/grpcdb/server.go similarity index 100% rename from db/remotedb/grpcdb/server.go rename to libs/db/remotedb/grpcdb/server.go diff --git a/db/remotedb/proto/defs.pb.go b/libs/db/remotedb/proto/defs.pb.go similarity index 100% rename from db/remotedb/proto/defs.pb.go rename to libs/db/remotedb/proto/defs.pb.go diff --git a/db/remotedb/proto/defs.proto b/libs/db/remotedb/proto/defs.proto similarity index 100% rename from db/remotedb/proto/defs.proto rename to libs/db/remotedb/proto/defs.proto diff --git a/db/remotedb/remotedb.go b/libs/db/remotedb/remotedb.go similarity index 100% rename from db/remotedb/remotedb.go rename to libs/db/remotedb/remotedb.go diff --git a/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go similarity index 100% rename from db/remotedb/remotedb_test.go rename to libs/db/remotedb/remotedb_test.go diff --git a/db/types.go b/libs/db/types.go similarity index 100% rename from db/types.go rename to libs/db/types.go diff --git a/db/util.go b/libs/db/util.go similarity index 100% rename from db/util.go rename to libs/db/util.go diff --git a/db/util_test.go b/libs/db/util_test.go similarity index 100% rename from db/util_test.go rename to libs/db/util_test.go diff --git a/flowrate/README.md b/libs/flowrate/README.md similarity index 100% rename from flowrate/README.md rename to libs/flowrate/README.md diff --git a/flowrate/flowrate.go b/libs/flowrate/flowrate.go similarity index 100% rename from flowrate/flowrate.go rename to libs/flowrate/flowrate.go diff --git a/flowrate/io.go b/libs/flowrate/io.go similarity index 100% rename from flowrate/io.go rename to libs/flowrate/io.go diff --git a/flowrate/io_test.go b/libs/flowrate/io_test.go similarity index 100% rename from flowrate/io_test.go rename to libs/flowrate/io_test.go diff --git a/flowrate/util.go b/libs/flowrate/util.go similarity index 100% rename from flowrate/util.go rename to libs/flowrate/util.go diff --git a/glide.lock b/libs/glide.lock similarity index 100% rename from glide.lock rename to libs/glide.lock diff --git a/glide.yaml b/libs/glide.yaml similarity index 100% rename from glide.yaml rename to libs/glide.yaml diff --git a/log/filter.go b/libs/log/filter.go similarity index 100% rename from log/filter.go rename to libs/log/filter.go diff --git a/log/filter_test.go b/libs/log/filter_test.go similarity index 100% rename from log/filter_test.go rename to libs/log/filter_test.go diff --git a/log/logger.go b/libs/log/logger.go similarity index 100% rename from log/logger.go rename to libs/log/logger.go diff --git a/log/nop_logger.go b/libs/log/nop_logger.go similarity index 100% rename from log/nop_logger.go rename to libs/log/nop_logger.go diff --git a/log/testing_logger.go b/libs/log/testing_logger.go similarity index 100% rename from log/testing_logger.go rename to libs/log/testing_logger.go diff --git a/log/tm_json_logger.go b/libs/log/tm_json_logger.go similarity index 100% rename from log/tm_json_logger.go rename to libs/log/tm_json_logger.go diff --git a/log/tm_logger.go b/libs/log/tm_logger.go similarity index 100% rename from log/tm_logger.go rename to libs/log/tm_logger.go diff --git a/log/tm_logger_test.go b/libs/log/tm_logger_test.go similarity index 100% rename from log/tm_logger_test.go rename to libs/log/tm_logger_test.go diff --git a/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go similarity index 100% rename from log/tmfmt_logger.go rename to libs/log/tmfmt_logger.go diff --git a/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go similarity index 100% rename from log/tmfmt_logger_test.go rename to libs/log/tmfmt_logger_test.go diff --git a/log/tracing_logger.go b/libs/log/tracing_logger.go similarity index 100% rename from log/tracing_logger.go rename to libs/log/tracing_logger.go diff --git a/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go similarity index 100% rename from log/tracing_logger_test.go rename to libs/log/tracing_logger_test.go diff --git a/merge.sh b/libs/merge.sh similarity index 100% rename from merge.sh rename to libs/merge.sh diff --git a/merkle/README.md b/libs/merkle/README.md similarity index 100% rename from merkle/README.md rename to libs/merkle/README.md diff --git a/merkle/simple_map.go b/libs/merkle/simple_map.go similarity index 100% rename from merkle/simple_map.go rename to libs/merkle/simple_map.go diff --git a/merkle/simple_map_test.go b/libs/merkle/simple_map_test.go similarity index 100% rename from merkle/simple_map_test.go rename to libs/merkle/simple_map_test.go diff --git a/merkle/simple_proof.go b/libs/merkle/simple_proof.go similarity index 100% rename from merkle/simple_proof.go rename to libs/merkle/simple_proof.go diff --git a/merkle/simple_tree.go b/libs/merkle/simple_tree.go similarity index 100% rename from merkle/simple_tree.go rename to libs/merkle/simple_tree.go diff --git a/merkle/simple_tree_test.go b/libs/merkle/simple_tree_test.go similarity index 100% rename from merkle/simple_tree_test.go rename to libs/merkle/simple_tree_test.go diff --git a/merkle/tmhash/hash.go b/libs/merkle/tmhash/hash.go similarity index 100% rename from merkle/tmhash/hash.go rename to libs/merkle/tmhash/hash.go diff --git a/merkle/tmhash/hash_test.go b/libs/merkle/tmhash/hash_test.go similarity index 100% rename from merkle/tmhash/hash_test.go rename to libs/merkle/tmhash/hash_test.go diff --git a/merkle/types.go b/libs/merkle/types.go similarity index 100% rename from merkle/types.go rename to libs/merkle/types.go diff --git a/test.sh b/libs/test.sh similarity index 100% rename from test.sh rename to libs/test.sh diff --git a/test/README.md b/libs/test/README.md similarity index 100% rename from test/README.md rename to libs/test/README.md diff --git a/test/app/clean.sh b/libs/test/app/clean.sh similarity index 100% rename from test/app/clean.sh rename to libs/test/app/clean.sh diff --git a/test/app/counter_test.sh b/libs/test/app/counter_test.sh similarity index 100% rename from test/app/counter_test.sh rename to libs/test/app/counter_test.sh diff --git a/test/app/grpc_client.go b/libs/test/app/grpc_client.go similarity index 100% rename from test/app/grpc_client.go rename to libs/test/app/grpc_client.go diff --git a/test/app/kvstore_test.sh b/libs/test/app/kvstore_test.sh similarity index 100% rename from test/app/kvstore_test.sh rename to libs/test/app/kvstore_test.sh diff --git a/test/app/test.sh b/libs/test/app/test.sh similarity index 100% rename from test/app/test.sh rename to libs/test/app/test.sh diff --git a/test/assert.go b/libs/test/assert.go similarity index 100% rename from test/assert.go rename to libs/test/assert.go diff --git a/test/docker/Dockerfile b/libs/test/docker/Dockerfile similarity index 100% rename from test/docker/Dockerfile rename to libs/test/docker/Dockerfile diff --git a/test/docker/build.sh b/libs/test/docker/build.sh similarity index 100% rename from test/docker/build.sh rename to libs/test/docker/build.sh diff --git a/test/mutate.go b/libs/test/mutate.go similarity index 100% rename from test/mutate.go rename to libs/test/mutate.go diff --git a/test/p2p/README.md b/libs/test/p2p/README.md similarity index 100% rename from test/p2p/README.md rename to libs/test/p2p/README.md diff --git a/test/p2p/atomic_broadcast/test.sh b/libs/test/p2p/atomic_broadcast/test.sh similarity index 100% rename from test/p2p/atomic_broadcast/test.sh rename to libs/test/p2p/atomic_broadcast/test.sh diff --git a/test/p2p/basic/test.sh b/libs/test/p2p/basic/test.sh similarity index 100% rename from test/p2p/basic/test.sh rename to libs/test/p2p/basic/test.sh diff --git a/test/p2p/circleci.sh b/libs/test/p2p/circleci.sh similarity index 100% rename from test/p2p/circleci.sh rename to libs/test/p2p/circleci.sh diff --git a/test/p2p/client.sh b/libs/test/p2p/client.sh similarity index 100% rename from test/p2p/client.sh rename to libs/test/p2p/client.sh diff --git a/test/p2p/data/mach1/core/config/genesis.json b/libs/test/p2p/data/mach1/core/config/genesis.json similarity index 100% rename from test/p2p/data/mach1/core/config/genesis.json rename to libs/test/p2p/data/mach1/core/config/genesis.json diff --git a/test/p2p/data/mach1/core/config/node_key.json b/libs/test/p2p/data/mach1/core/config/node_key.json similarity index 100% rename from test/p2p/data/mach1/core/config/node_key.json rename to libs/test/p2p/data/mach1/core/config/node_key.json diff --git a/test/p2p/data/mach1/core/config/priv_validator.json b/libs/test/p2p/data/mach1/core/config/priv_validator.json similarity index 100% rename from test/p2p/data/mach1/core/config/priv_validator.json rename to libs/test/p2p/data/mach1/core/config/priv_validator.json diff --git a/test/p2p/data/mach2/core/config/genesis.json b/libs/test/p2p/data/mach2/core/config/genesis.json similarity index 100% rename from test/p2p/data/mach2/core/config/genesis.json rename to libs/test/p2p/data/mach2/core/config/genesis.json diff --git a/test/p2p/data/mach2/core/config/node_key.json b/libs/test/p2p/data/mach2/core/config/node_key.json similarity index 100% rename from test/p2p/data/mach2/core/config/node_key.json rename to libs/test/p2p/data/mach2/core/config/node_key.json diff --git a/test/p2p/data/mach2/core/config/priv_validator.json b/libs/test/p2p/data/mach2/core/config/priv_validator.json similarity index 100% rename from test/p2p/data/mach2/core/config/priv_validator.json rename to libs/test/p2p/data/mach2/core/config/priv_validator.json diff --git a/test/p2p/data/mach3/core/config/genesis.json b/libs/test/p2p/data/mach3/core/config/genesis.json similarity index 100% rename from test/p2p/data/mach3/core/config/genesis.json rename to libs/test/p2p/data/mach3/core/config/genesis.json diff --git a/test/p2p/data/mach3/core/config/node_key.json b/libs/test/p2p/data/mach3/core/config/node_key.json similarity index 100% rename from test/p2p/data/mach3/core/config/node_key.json rename to libs/test/p2p/data/mach3/core/config/node_key.json diff --git a/test/p2p/data/mach3/core/config/priv_validator.json b/libs/test/p2p/data/mach3/core/config/priv_validator.json similarity index 100% rename from test/p2p/data/mach3/core/config/priv_validator.json rename to libs/test/p2p/data/mach3/core/config/priv_validator.json diff --git a/test/p2p/data/mach4/core/config/genesis.json b/libs/test/p2p/data/mach4/core/config/genesis.json similarity index 100% rename from test/p2p/data/mach4/core/config/genesis.json rename to libs/test/p2p/data/mach4/core/config/genesis.json diff --git a/test/p2p/data/mach4/core/config/node_key.json b/libs/test/p2p/data/mach4/core/config/node_key.json similarity index 100% rename from test/p2p/data/mach4/core/config/node_key.json rename to libs/test/p2p/data/mach4/core/config/node_key.json diff --git a/test/p2p/data/mach4/core/config/priv_validator.json b/libs/test/p2p/data/mach4/core/config/priv_validator.json similarity index 100% rename from test/p2p/data/mach4/core/config/priv_validator.json rename to libs/test/p2p/data/mach4/core/config/priv_validator.json diff --git a/test/p2p/fast_sync/check_peer.sh b/libs/test/p2p/fast_sync/check_peer.sh similarity index 100% rename from test/p2p/fast_sync/check_peer.sh rename to libs/test/p2p/fast_sync/check_peer.sh diff --git a/test/p2p/fast_sync/test.sh b/libs/test/p2p/fast_sync/test.sh similarity index 100% rename from test/p2p/fast_sync/test.sh rename to libs/test/p2p/fast_sync/test.sh diff --git a/test/p2p/fast_sync/test_peer.sh b/libs/test/p2p/fast_sync/test_peer.sh similarity index 100% rename from test/p2p/fast_sync/test_peer.sh rename to libs/test/p2p/fast_sync/test_peer.sh diff --git a/test/p2p/ip.sh b/libs/test/p2p/ip.sh similarity index 100% rename from test/p2p/ip.sh rename to libs/test/p2p/ip.sh diff --git a/test/p2p/ip_plus_id.sh b/libs/test/p2p/ip_plus_id.sh similarity index 100% rename from test/p2p/ip_plus_id.sh rename to libs/test/p2p/ip_plus_id.sh diff --git a/test/p2p/kill_all/check_peers.sh b/libs/test/p2p/kill_all/check_peers.sh similarity index 100% rename from test/p2p/kill_all/check_peers.sh rename to libs/test/p2p/kill_all/check_peers.sh diff --git a/test/p2p/kill_all/test.sh b/libs/test/p2p/kill_all/test.sh similarity index 100% rename from test/p2p/kill_all/test.sh rename to libs/test/p2p/kill_all/test.sh diff --git a/test/p2p/local_testnet_start.sh b/libs/test/p2p/local_testnet_start.sh similarity index 100% rename from test/p2p/local_testnet_start.sh rename to libs/test/p2p/local_testnet_start.sh diff --git a/test/p2p/local_testnet_stop.sh b/libs/test/p2p/local_testnet_stop.sh similarity index 100% rename from test/p2p/local_testnet_stop.sh rename to libs/test/p2p/local_testnet_stop.sh diff --git a/test/p2p/peer.sh b/libs/test/p2p/peer.sh similarity index 100% rename from test/p2p/peer.sh rename to libs/test/p2p/peer.sh diff --git a/test/p2p/persistent_peers.sh b/libs/test/p2p/persistent_peers.sh similarity index 100% rename from test/p2p/persistent_peers.sh rename to libs/test/p2p/persistent_peers.sh diff --git a/test/p2p/pex/check_peer.sh b/libs/test/p2p/pex/check_peer.sh similarity index 100% rename from test/p2p/pex/check_peer.sh rename to libs/test/p2p/pex/check_peer.sh diff --git a/test/p2p/pex/dial_peers.sh b/libs/test/p2p/pex/dial_peers.sh similarity index 100% rename from test/p2p/pex/dial_peers.sh rename to libs/test/p2p/pex/dial_peers.sh diff --git a/test/p2p/pex/test.sh b/libs/test/p2p/pex/test.sh similarity index 100% rename from test/p2p/pex/test.sh rename to libs/test/p2p/pex/test.sh diff --git a/test/p2p/pex/test_addrbook.sh b/libs/test/p2p/pex/test_addrbook.sh similarity index 100% rename from test/p2p/pex/test_addrbook.sh rename to libs/test/p2p/pex/test_addrbook.sh diff --git a/test/p2p/pex/test_dial_peers.sh b/libs/test/p2p/pex/test_dial_peers.sh similarity index 100% rename from test/p2p/pex/test_dial_peers.sh rename to libs/test/p2p/pex/test_dial_peers.sh diff --git a/test/p2p/test.sh b/libs/test/p2p/test.sh similarity index 100% rename from test/p2p/test.sh rename to libs/test/p2p/test.sh diff --git a/test/persist/test_failure_indices.sh b/libs/test/persist/test_failure_indices.sh similarity index 100% rename from test/persist/test_failure_indices.sh rename to libs/test/persist/test_failure_indices.sh diff --git a/test/persist/test_simple.sh b/libs/test/persist/test_simple.sh similarity index 100% rename from test/persist/test_simple.sh rename to libs/test/persist/test_simple.sh diff --git a/test/persist/txs.sh b/libs/test/persist/txs.sh similarity index 100% rename from test/persist/txs.sh rename to libs/test/persist/txs.sh diff --git a/test/test_cover.sh b/libs/test/test_cover.sh similarity index 100% rename from test/test_cover.sh rename to libs/test/test_cover.sh diff --git a/version/version.go b/libs/version/version.go similarity index 100% rename from version/version.go rename to libs/version/version.go From 233b35a2a914f6b1d724c9ebb87cfc7607655c93 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:36:13 -0400 Subject: [PATCH 502/515] Revert "delete some things for the merge" This reverts commit 2902ab1a144b6c0bef9c8cdd605ef77a8e70a7cc. --- .editorconfig | 16 + .gitignore | 29 ++ CHANGELOG.md | 919 +++++++++++++++++++++++++++++++++++++++++++++ Gopkg.lock | 429 +++++++++++++++++++++ Gopkg.toml | 99 +++++ LICENSE | 204 ++++++++++ Makefile | 236 ++++++++++++ README.md | 138 +++++++ version/version.go | 23 ++ 9 files changed, 2093 insertions(+) create mode 100644 .editorconfig create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 version/version.go diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..481621f76 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,16 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*.{sh,Makefile}] +indent_style = tab + +[*.proto] +indent_style = space +indent_size = 2 diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..bcfd36db1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,29 @@ +*.swp +*.swo +.bak +*.bak +.DS_Store +build/* +rpc/test/.tendermint +.tendermint +remote_dump +.revision +vendor +.vagrant +test/p2p/data/ +test/logs +coverage.txt +docs/_build +docs/tools +*.log +abci-cli +abci/types/types.pb.go + +scripts/wal2json/wal2json +scripts/cutWALUntil/cutWALUntil + +.idea/ +*.iml + +libs/pubsub/query/fuzz_test/output +shunit2 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..d73c949a2 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,919 @@ +# Changelog + +## 0.22.0 + +*July 1st, 2018* + +BREAKING CHANGES: +- [config] Rename `skip_upnp` to `upnp`, and turn it off by default. +- [types] Update Amino to v0.10.1 + * Amino is now fully proto3 compatible for the basic types + * JSON-encoded types now use the type name instead of the prefix bytes + * Integers are encoded as strings +- [crypto] Update go-crypto to v0.10.0 and merge into `crypto` + * privKey.Sign returns error. + * ed25519 address is the first 20-bytes of the SHA256 of the pubkey + * `tmlibs/merkle` -> `crypto/merkle`. Uses SHA256 instead of RIPEMD160 +- [rpc] `syncing` is now called `catching_up`. + +FEATURES +- [cmd] Added metrics (served under `/metrics` using a Prometheus client; + disabled by default). See the new `instrumentation` section in the config and + [metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html) + guide. +- [p2p] Add IPv6 support to peering. + +IMPROVEMENT +- [rpc/client] Supports https and wss now. +- [crypto] Make public key size into public constants +- [mempool] Log tx hash, not entire tx +- [abci] Merged in github.com/tendermint/abci and + github.com/tendermint/go-crypto +- [docs] Move from .rst to .md + +BUG FIXES: +- [rpc] Limit maximum number of HTTP/WebSocket connections + (`rpc.max_open_connections`) and gRPC connections + (`rpc.grpc_max_open_connections`). Check out "Running In Production" guide if + you want to increase them. +- [rpc] Limit maximum request body size to 1MB (header is limited to 1MB). +- [consensus] Fix a halting bug where `create_empty_blocks=false` +- [p2p] Fix panic in seed mode + +## 0.21.0 + +*June 21th, 2018* + +BREAKING CHANGES + +- [config] Change default ports from 4665X to 2665X. Ports over 32768 are + ephemeral and reserved for use by the kernel. +- [cmd] `unsafe_reset_all` removes the addrbook.json + +IMPROVEMENT + +- [pubsub] Set default capacity to 0 +- [docs] Various improvements + +BUG FIXES + +- [consensus] Fix an issue where we don't make blocks after `fast_sync` when `create_empty_blocks=false` +- [mempool] Fix #1761 where we don't process txs if `cache_size=0` +- [rpc] Fix memory leak in Websocket (when using `/subscribe` method) +- [config] Escape paths in config - fixes config paths on Windows + +## 0.20.0 + +*June 6th, 2018* + +This is the first in a series of breaking releases coming to Tendermint after +soliciting developer feedback and conducting security audits. + +This release does not break any blockchain data structures or +protocols other than the ABCI messages between Tendermint and the application. + +Applications that upgrade for ABCI v0.11.0 should be able to continue running Tendermint +v0.20.0 on blockchains created with v0.19.X + +BREAKING CHANGES + +- [abci] Upgrade to + [v0.11.0](https://github.com/tendermint/abci/blob/master/CHANGELOG.md#0110) +- [abci] Change Query path for filtering peers by node ID from + `p2p/filter/pubkey/` to `p2p/filter/id/` + +## 0.19.9 + +*June 5th, 2018* + +BREAKING CHANGES + +- [types/priv_validator] Moved to top level `privval` package + +FEATURES + +- [config] Collapse PeerConfig into P2PConfig +- [docs] Add quick-install script +- [docs/spec] Add table of Amino prefixes + +BUG FIXES + +- [rpc] Return 404 for unknown endpoints +- [consensus] Flush WAL on stop +- [evidence] Don't send evidence to peers that are behind +- [p2p] Fix memory leak on peer disconnects +- [rpc] Fix panic when `per_page=0` + +## 0.19.8 + +*June 4th, 2018* + +BREAKING: + +- [p2p] Remove `auth_enc` config option, peer connections are always auth + encrypted. Technically a breaking change but seems no one was using it and + arguably a bug fix :) + +BUG FIXES + +- [mempool] Fix deadlock under high load when `skip_timeout_commit=true` and + `create_empty_blocks=false` + +## 0.19.7 + +*May 31st, 2018* + +BREAKING: + +- [libs/pubsub] TagMap#Get returns a string value +- [libs/pubsub] NewTagMap accepts a map of strings + +FEATURES + +- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate +- [p2p] AllowDuplicateIP config option to refuse connections from same IP. + - true by default for now, false by default in next breaking release +- [docs] Add docs for query, tx indexing, events, pubsub +- [docs] Add some notes about running Tendermint in production + +IMPROVEMENTS: + +- [consensus] Consensus reactor now receives events from a separate synchronous event bus, + which is not dependant on external RPC load +- [consensus/wal] do not look for height in older files if we've seen height - 1 +- [docs] Various cleanup and link fixes + +## 0.19.6 + +*May 29th, 2018* + +BUG FIXES + +- [blockchain] Fix fast-sync deadlock during high peer turnover + +BUG FIX: + +- [evidence] Dont send peers evidence from heights they haven't synced to yet +- [p2p] Refuse connections to more than one peer with the same IP +- [docs] Various fixes + +## 0.19.5 + +*May 20th, 2018* + +BREAKING CHANGES + +- [rpc/client] TxSearch and UnconfirmedTxs have new arguments (see below) +- [rpc/client] TxSearch returns ResultTxSearch +- [version] Breaking changes to Go APIs will not be reflected in breaking + version change, but will be included in changelog. + +FEATURES + +- [rpc] `/tx_search` takes `page` (starts at 1) and `per_page` (max 100, default 30) args to paginate results +- [rpc] `/unconfirmed_txs` takes `limit` (max 100, default 30) arg to limit the output +- [config] `mempool.size` and `mempool.cache_size` options + +IMPROVEMENTS + +- [docs] Lots of updates +- [consensus] Only Fsync() the WAL before executing msgs from ourselves + +BUG FIXES + +- [mempool] Enforce upper bound on number of transactions + +## 0.19.4 (May 17th, 2018) + +IMPROVEMENTS + +- [state] Improve tx indexing by using batches +- [consensus, state] Improve logging (more consensus logs, fewer tx logs) +- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...) + +BUG FIXES + +- [consensus] Fix issue #1575 where a late proposer can get stuck + +## 0.19.3 (May 14th, 2018) + +FEATURES + +- [rpc] New `/consensus_state` returns just the votes seen at the current height + +IMPROVEMENTS + +- [rpc] Add stringified votes and fraction of power voted to `/dump_consensus_state` +- [rpc] Add PeerStateStats to `/dump_consensus_state` + +BUG FIXES + +- [cmd] Set GenesisTime during `tendermint init` +- [consensus] fix ValidBlock rules + +## 0.19.2 (April 30th, 2018) + +FEATURES: + +- [p2p] Allow peers with different Minor versions to connect +- [rpc] `/net_info` includes `n_peers` + +IMPROVEMENTS: + +- [p2p] Various code comments, cleanup, error types +- [p2p] Change some Error logs to Debug + +BUG FIXES: + +- [p2p] Fix reconnect to persistent peer when first dial fails +- [p2p] Validate NodeInfo.ListenAddr +- [p2p] Only allow (MaxNumPeers - MaxNumOutboundPeers) inbound peers +- [p2p/pex] Limit max msg size to 64kB +- [p2p] Fix panic when pex=false +- [p2p] Allow multiple IPs per ID in AddrBook +- [p2p] Fix before/after bugs in addrbook isBad() + +## 0.19.1 (April 27th, 2018) + +Note this release includes some small breaking changes in the RPC and one in the +config that are really bug fixes. v0.19.1 will work with existing chains, and make Tendermint +easier to use and debug. With <3 + +BREAKING (MINOR) + +- [config] Removed `wal_light` setting. If you really needed this, let us know + +FEATURES: + +- [networks] moved in tooling from devops repo: terraform and ansible scripts for deploying testnets ! +- [cmd] Added `gen_node_key` command + +BUG FIXES + +Some of these are breaking in the RPC response, but they're really bugs! + +- [spec] Document address format and pubkey encoding pre and post Amino +- [rpc] Lower case JSON field names +- [rpc] Fix missing entries, improve, and lower case the fields in `/dump_consensus_state` +- [rpc] Fix NodeInfo.Channels format to hex +- [rpc] Add Validator address to `/status` +- [rpc] Fix `prove` in ABCIQuery +- [cmd] MarshalJSONIndent on init + +## 0.19.0 (April 13th, 2018) + +BREAKING: +- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details) +- [cmd] `show_node_id` now returns an error if there is no node key +- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status) + +Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is +serialized to disk or over the network. + +See github.com/tendermint/go-amino for details on the new format. + +See `scripts/wire2amino.go` for a tool to upgrade +genesis/priv_validator/node_key JSON files. + +FEATURES + +- [test] docker-compose for local testnet setup (thanks Greg!) + +## 0.18.0 (April 6th, 2018) + +BREAKING: + +- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0) +- [types] ValidtorSet.GetByAddress returns -1 if no validator found +- [p2p] require all addresses come with an ID no matter what +- [rpc] Listening address must contain tcp:// or unix:// prefix + +FEATURES: + +- [rpc] StartHTTPAndTLSServer (not used yet) +- [rpc] Include validator's voting power in `/status` +- [rpc] `/tx` and `/tx_search` responses now include the transaction hash +- [rpc] Include peer NodeIDs in `/net_info` + +IMPROVEMENTS: +- [config] trim whitespace from elements of lists (like `persistent_peers`) +- [rpc] `/tx_search` results are sorted by height +- [p2p] do not try to connect to ourselves (ok, maybe only once) +- [p2p] seeds respond with a bias towards good peers + +BUG FIXES: +- [rpc] fix subscribing using an abci.ResponseDeliverTx tag +- [rpc] fix tx_indexers matchRange +- [rpc] fix unsubscribing (see tmlibs v0.8.0) + +## 0.17.1 (March 27th, 2018) + +BUG FIXES: +- [types] Actually support `app_state` in genesis as `AppStateJSON` + +## 0.17.0 (March 27th, 2018) + +BREAKING: +- [types] WriteSignBytes -> SignBytes + +IMPROVEMENTS: +- [all] renamed `dummy` (`persistent_dummy`) to `kvstore` (`persistent_kvstore`) (name "dummy" is deprecated and will not work in the next breaking release) +- [docs] note on determinism (docs/determinism.rst) +- [genesis] `app_options` field is deprecated. please rename it to `app_state` in your genesis file(s). `app_options` will not work in the next breaking release +- [p2p] dial seeds directly without potential peers +- [p2p] exponential backoff for addrs in the address book +- [p2p] mark peer as good if it contributed enough votes or block parts +- [p2p] stop peer if it sends incorrect data, msg to unknown channel, msg we did not expect +- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address +- [spec] various improvements +- switched from glide to dep internally for package management +- [wire] prep work for upgrading to new go-wire (which is now called go-amino) + +FEATURES: +- [config] exposed `auth_enc` flag to enable/disable encryption +- [config] added the `--p2p.private_peer_ids` flag and `PrivatePeerIDs` config variable (see config for description) +- [rpc] added `/health` endpoint, which returns empty result for now +- [types/priv_validator] new format and socket client, allowing for remote signing + +BUG FIXES: +- [consensus] fix liveness bug by introducing ValidBlock mechanism + +## 0.16.0 (February 20th, 2018) + +BREAKING CHANGES: +- [config] use $TMHOME/config for all config and json files +- [p2p] old `--p2p.seeds` is now `--p2p.persistent_peers` (persistent peers to which TM will always connect to) +- [p2p] now `--p2p.seeds` only used for getting addresses (if addrbook is empty; not persistent) +- [p2p] NodeInfo: remove RemoteAddr and add Channels + - we must have at least one overlapping channel with peer + - we only send msgs for channels the peer advertised +- [p2p/conn] pong timeout +- [lite] comment out IAVL related code + +FEATURES: +- [p2p] added new `/dial_peers&persistent=_` **unsafe** endpoint +- [p2p] persistent node key in `$THMHOME/config/node_key.json` +- [p2p] introduce peer ID and authenticate peers by ID using addresses like `ID@IP:PORT` +- [p2p/pex] new seed mode crawls the network and serves as a seed. +- [config] MempoolConfig.CacheSize +- [config] P2P.SeedMode (`--p2p.seed_mode`) + +IMPROVEMENT: +- [p2p/pex] stricter rules in the PEX reactor for better handling of abuse +- [p2p] various improvements to code structure including subpackages for `pex` and `conn` +- [docs] new spec! +- [all] speed up the tests! + +BUG FIX: +- [blockchain] StopPeerForError on timeout +- [consensus] StopPeerForError on a bad Maj23 message +- [state] flush mempool conn before calling commit +- [types] fix priv val signing things that only differ by timestamp +- [mempool] fix memory leak causing zombie peers +- [p2p/conn] fix potential deadlock + +## 0.15.0 (December 29, 2017) + +BREAKING CHANGES: +- [p2p] enable the Peer Exchange reactor by default +- [types] add Timestamp field to Proposal/Vote +- [types] add new fields to Header: TotalTxs, ConsensusParamsHash, LastResultsHash, EvidenceHash +- [types] add Evidence to Block +- [types] simplify ValidateBasic +- [state] updates to support changes to the header +- [state] Enforce <1/3 of validator set can change at a time + +FEATURES: +- [state] Send indices of absent validators and addresses of byzantine validators in BeginBlock +- [state] Historical ConsensusParams and ABCIResponses +- [docs] Specification for the base Tendermint data structures. +- [evidence] New evidence reactor for gossiping and managing evidence +- [rpc] `/block_results?height=X` returns the DeliverTx results for a given height. + +IMPROVEMENTS: +- [consensus] Better handling of corrupt WAL file + +BUG FIXES: +- [lite] fix race +- [state] validate block.Header.ValidatorsHash +- [p2p] allow seed addresses to be prefixed with eg. `tcp://` +- [p2p] use consistent key to refer to peers so we dont try to connect to existing peers +- [cmd] fix `tendermint init` to ignore files that are there and generate files that aren't. + +## 0.14.0 (December 11, 2017) + +BREAKING CHANGES: +- consensus/wal: removed separator +- rpc/client: changed Subscribe/Unsubscribe/UnsubscribeAll funcs signatures to be identical to event bus. + +FEATURES: +- new `tendermint lite` command (and `lite/proxy` pkg) for running a light-client RPC proxy. + NOTE it is currently insecure and its APIs are not yet covered by semver + +IMPROVEMENTS: +- rpc/client: can act as event bus subscriber (See https://github.com/tendermint/tendermint/issues/945). +- p2p: use exponential backoff from seconds to hours when attempting to reconnect to persistent peer +- config: moniker defaults to the machine's hostname instead of "anonymous" + +BUG FIXES: +- p2p: no longer exit if one of the seed addresses is incorrect + +## 0.13.0 (December 6, 2017) + +BREAKING CHANGES: +- abci: update to v0.8 using gogo/protobuf; includes tx tags, vote info in RequestBeginBlock, data.Bytes everywhere, use int64, etc. +- types: block heights are now `int64` everywhere +- types & node: EventSwitch and EventCache have been replaced by EventBus and EventBuffer; event types have been overhauled +- node: EventSwitch methods now refer to EventBus +- rpc/lib/types: RPCResponse is no longer a pointer; WSRPCConnection interface has been modified +- rpc/client: WaitForOneEvent takes an EventsClient instead of types.EventSwitch +- rpc/client: Add/RemoveListenerForEvent are now Subscribe/Unsubscribe +- rpc/core/types: ResultABCIQuery wraps an abci.ResponseQuery +- rpc: `/subscribe` and `/unsubscribe` take `query` arg instead of `event` +- rpc: `/status` returns the LatestBlockTime in human readable form instead of in nanoseconds +- mempool: cached transactions return an error instead of an ABCI response with BadNonce + +FEATURES: +- rpc: new `/unsubscribe_all` WebSocket RPC endpoint +- rpc: new `/tx_search` endpoint for filtering transactions by more complex queries +- p2p/trust: new trust metric for tracking peers. See ADR-006 +- config: TxIndexConfig allows to set what DeliverTx tags to index + +IMPROVEMENTS: +- New asynchronous events system using `tmlibs/pubsub` +- logging: Various small improvements +- consensus: Graceful shutdown when app crashes +- tests: Fix various non-deterministic errors +- p2p: more defensive programming + +BUG FIXES: +- consensus: fix panic where prs.ProposalBlockParts is not initialized +- p2p: fix panic on bad channel + +## 0.12.1 (November 27, 2017) + +BUG FIXES: +- upgrade tmlibs dependency to enable Windows builds for Tendermint + +## 0.12.0 (October 27, 2017) + +BREAKING CHANGES: + - rpc/client: websocket ResultsCh and ErrorsCh unified in ResponsesCh. + - rpc/client: ABCIQuery no longer takes `prove` + - state: remove GenesisDoc from state. + - consensus: new binary WAL format provides efficiency and uses checksums to detect corruption + - use scripts/wal2json to convert to json for debugging + +FEATURES: + - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! + - rpc: `/genesis` includes the `app_options` . + - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. + - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. + +IMPROVEMENTS: + - rpc: `/genesis` result includes `app_options` + - rpc/lib/client: add jitter to reconnects. + - rpc/lib/types: `RPCError` satisfies the `error` interface. + +BUG FIXES: + - rpc/client: fix ws deadlock after stopping + - blockchain: fix panic on AddBlock when peer is nil + - mempool: fix sending on TxsAvailable when a tx has been invalidated + - consensus: dont run WAL catchup if we fast synced + +## 0.11.1 (October 10, 2017) + +IMPROVEMENTS: + - blockchain/reactor: respondWithNoResponseMessage for missing height + +BUG FIXES: + - rpc: fixed client WebSocket timeout + - rpc: client now resubscribes on reconnection + - rpc: fix panics on missing params + - rpc: fix `/dump_consensus_state` to have normal json output (NOTE: technically breaking, but worth a bug fix label) + - types: fixed out of range error in VoteSet.addVote + - consensus: fix wal autofile via https://github.com/tendermint/tmlibs/blob/master/CHANGELOG.md#032-october-2-2017 + +## 0.11.0 (September 22, 2017) + +BREAKING: + - genesis file: validator `amount` is now `power` + - abci: Info, BeginBlock, InitChain all take structs + - rpc: various changes to match JSONRPC spec (http://www.jsonrpc.org/specification), including breaking ones: + - requests that previously returned HTTP code 4XX now return 200 with an error code in the JSONRPC. + - `rpctypes.RPCResponse` uses new `RPCError` type instead of `string`. + + - cmd: if there is no genesis, exit immediately instead of waiting around for one to show. + - types: `Signer.Sign` returns an error. + - state: every validator set change is persisted to disk, which required some changes to the `State` structure. + - p2p: new `p2p.Peer` interface used for all reactor methods (instead of `*p2p.Peer` struct). + +FEATURES: + - rpc: `/validators?height=X` allows querying of validators at previous heights. + - rpc: Leaving the `height` param empty for `/block`, `/validators`, and `/commit` will return the value for the latest height. + +IMPROVEMENTS: + - docs: Moved all docs from the website and tools repo in, converted to `.rst`, and cleaned up for presentation on `tendermint.readthedocs.io` + +BUG FIXES: + - fix WAL openning issue on Windows + +## 0.10.4 (September 5, 2017) + +IMPROVEMENTS: +- docs: Added Slate docs to each rpc function (see rpc/core) +- docs: Ported all website docs to Read The Docs +- config: expose some p2p params to tweak performance: RecvRate, SendRate, and MaxMsgPacketPayloadSize +- rpc: Upgrade the websocket client and server, including improved auto reconnect, and proper ping/pong + +BUG FIXES: +- consensus: fix panic on getVoteBitArray +- consensus: hang instead of panicking on byzantine consensus failures +- cmd: dont load config for version command + +## 0.10.3 (August 10, 2017) + +FEATURES: +- control over empty block production: + - new flag, `--consensus.create_empty_blocks`; when set to false, blocks are only created when there are txs or when the AppHash changes. + - new config option, `consensus.create_empty_blocks_interval`; an empty block is created after this many seconds. + - in normal operation, `create_empty_blocks = true` and `create_empty_blocks_interval = 0`, so blocks are being created all the time (as in all previous versions of tendermint). The number of empty blocks can be reduced by increasing `create_empty_blocks_interval` or by setting `create_empty_blocks = false`. + - new `TxsAvailable()` method added to Mempool that returns a channel which fires when txs are available. + - new heartbeat message added to consensus reactor to notify peers that a node is waiting for txs before entering propose step. +- rpc: Add `syncing` field to response returned by `/status`. Is `true` while in fast-sync mode. + +IMPROVEMENTS: +- various improvements to documentation and code comments + +BUG FIXES: +- mempool: pass height into constructor so it doesn't always start at 0 + +## 0.10.2 (July 10, 2017) + +FEATURES: +- Enable lower latency block commits by adding consensus reactor sleep durations and p2p flush throttle timeout to the config + +IMPROVEMENTS: +- More detailed logging in the consensus reactor and state machine +- More in-code documentation for many exposed functions, especially in consensus/reactor.go and p2p/switch.go +- Improved readability for some function definitions and code blocks with long lines + +## 0.10.1 (June 28, 2017) + +FEATURES: +- Use `--trace` to get stack traces for logged errors +- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set +- types: GenesisDocFromFile parses a GenesiDoc from a JSON file + +IMPROVEMENTS: +- Add a Code of Conduct +- Variety of improvements as suggested by `megacheck` tool +- rpc: deduplicate tests between rpc/client and rpc/tests +- rpc: addresses without a protocol prefix default to `tcp://`. `http://` is also accepted as an alias for `tcp://` +- cmd: commands are more easily reuseable from other tools +- DOCKER: automate build/push + +BUG FIXES: +- Fix log statements using keys with spaces (logger does not currently support spaces) +- rpc: set logger on websocket connection +- rpc: fix ws connection stability by setting write deadline on pings + +## 0.10.0 (June 2, 2017) + +Includes major updates to configuration, logging, and json serialization. +Also includes the Grand Repo-Merge of 2017. + +BREAKING CHANGES: + +- Config and Flags: + - The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11), +containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig` + - This affects the following flags: + - `--seeds` is now `--p2p.seeds` + - `--node_laddr` is now `--p2p.laddr` + - `--pex` is now `--p2p.pex` + - `--skip_upnp` is now `--p2p.skip_upnp` + - `--rpc_laddr` is now `--rpc.laddr` + - `--grpc_laddr` is now `--rpc.grpc_laddr` + - Any configuration option now within a substract must come under that heading in the `config.toml`, for instance: + ``` + [p2p] + laddr="tcp://1.2.3.4:46656" + + [consensus] + timeout_propose=1000 + ``` + - Use viper and `DefaultConfig() / TestConfig()` functions to handle defaults, and remove `config/tendermint` and `config/tendermint_test` + - Change some function and method signatures to + - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config + +- Logger + - Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. +See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details + - Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!) + - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger + +- JSON serialization: + - Replace `[TypeByte, Xxx]` with `{"type": "some-type", "data": Xxx}` in RPC and all `.json` files by using `go-wire/data`. For instance, a public key is now: + ``` + "pub_key": { + "type": "ed25519", + "data": "83DDF8775937A4A12A2704269E2729FCFCD491B933C4B0A7FFE37FE41D7760D0" + } + ``` + - Remove type information about RPC responses, so `[TypeByte, {"jsonrpc": "2.0", ... }]` is now just `{"jsonrpc": "2.0", ... }` + - Change `[]byte` to `data.Bytes` in all serialized types (for hex encoding) + - Lowercase the JSON tags in `ValidatorSet` fields + - Introduce `EventDataInner` for serializing events + +- Other: + - Send InitChain message in handshake if `appBlockHeight == 0` + - Do not include the `Accum` field when computing the validator hash. This makes the ValidatorSetHash unique for a given validator set, rather than changing with every block (as the Accum changes) + - Unsafe RPC calls are not enabled by default. This includes `/dial_seeds`, and all calls prefixed with `unsafe`. Use the `--rpc.unsafe` flag to enable. + + +FEATURES: + +- Per-module log levels. For instance, the new default is `state:info,*:error`, which means the `state` package logs at `info` level, and everything else logs at `error` level +- Log if a node is validator or not in every consensus round +- Use ldflags to set git hash as part of the version +- Ignore `address` and `pub_key` fields in `priv_validator.json` and overwrite them with the values derrived from the `priv_key` + +IMPROVEMENTS: + +- Merge `tendermint/go-p2p -> tendermint/tendermint/p2p` and `tendermint/go-rpc -> tendermint/tendermint/rpc/lib` +- Update paths for grand repo merge: + - `go-common -> tmlibs/common` + - `go-data -> go-wire/data` + - All other `go-` libs, except `go-crypto` and `go-wire`, are merged under `tmlibs` +- No global loggers (loggers are passed into constructors, or preferably set with a SetLogger method) +- Return HTTP status codes with errors for RPC responses +- Limit `/blockchain_info` call to return a maximum of 20 blocks +- Use `.Wrap()` and `.Unwrap()` instead of eg. `PubKeyS` for `go-crypto` types +- RPC JSON responses use pretty printing (via `json.MarshalIndent`) +- Color code different instances of the consensus for tests +- Isolate viper to `cmd/tendermint/commands` and do not read config from file for tests + + +## 0.9.2 (April 26, 2017) + +BUG FIXES: + +- Fix bug in `ResetPrivValidator` where we were using the global config and log (causing external consumers, eg. basecoin, to fail). + +## 0.9.1 (April 21, 2017) + +FEATURES: + +- Transaction indexing - txs are indexed by their hash using a simple key-value store; easily extended to more advanced indexers +- New `/tx?hash=X` endpoint to query for transactions and their DeliverTx result by hash. Optionally returns a proof of the tx's inclusion in the block +- `tendermint testnet` command initializes files for a testnet + +IMPROVEMENTS: + +- CLI now uses Cobra framework +- TMROOT is now TMHOME (TMROOT will stop working in 0.10.0) +- `/broadcast_tx_XXX` also returns the Hash (can be used to query for the tx) +- `/broadcast_tx_commit` also returns the height the block was committed in +- ABCIResponses struct persisted to disk before calling Commit; makes handshake replay much cleaner +- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0) +- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks + +BUG FIXES: + +- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later +- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save() + +## 0.9.0 (March 6, 2017) + +BREAKING CHANGES: + +- Update ABCI to v0.4.0, where Query is now `Query(RequestQuery) ResponseQuery`, enabling precise proofs at particular heights: + +``` +message RequestQuery{ + bytes data = 1; + string path = 2; + uint64 height = 3; + bool prove = 4; +} + +message ResponseQuery{ + CodeType code = 1; + int64 index = 2; + bytes key = 3; + bytes value = 4; + bytes proof = 5; + uint64 height = 6; + string log = 7; +} +``` + + +- `BlockMeta` data type unifies its Hash and PartSetHash under a `BlockID`: + +``` +type BlockMeta struct { + BlockID BlockID `json:"block_id"` // the block hash and partsethash + Header *Header `json:"header"` // The block's Header +} +``` + +- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes. + +- `tendermint gen_validator` command output is now pure JSON + +FEATURES: + +- New RPC endpoint `/commit?height=X` returns header and commit for block at height `X` +- Client API for each endpoint, including mocks for testing + +IMPROVEMENTS: + +- `Node` is now a `BaseService` +- Simplified starting Tendermint in-process from another application +- Better organized Makefile +- Scripts for auto-building binaries across platforms +- Docker image improved, slimmed down (using Alpine), and changed from tendermint/tmbase to tendermint/tendermint +- New repo files: `CONTRIBUTING.md`, Github `ISSUE_TEMPLATE`, `CHANGELOG.md` +- Improvements on CircleCI for managing build/test artifacts +- Handshake replay is doen through the consensus package, possibly using a mockApp +- Graceful shutdown of RPC listeners +- Tests for the PEX reactor and DialSeeds + +BUG FIXES: + +- Check peer.Send for failure before updating PeerState in consensus +- Fix panic in `/dial_seeds` with invalid addresses +- Fix proposer selection logic in ValidatorSet by taking the address into account in the `accumComparable` +- Fix inconcistencies with `ValidatorSet.Proposer` across restarts by persisting it in the `State` + + +## 0.8.0 (January 13, 2017) + +BREAKING CHANGES: + +- New data type `BlockID` to represent blocks: + +``` +type BlockID struct { + Hash []byte `json:"hash"` + PartsHeader PartSetHeader `json:"parts"` +} +``` + +- `Vote` data type now includes validator address and index: + +``` +type Vote struct { + ValidatorAddress []byte `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Height int `json:"height"` + Round int `json:"round"` + Type byte `json:"type"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. + Signature crypto.Signature `json:"signature"` +} +``` + +- Update TMSP to v0.3.0, where it is now called ABCI and AppendTx is DeliverTx +- Hex strings in the RPC are now "0x" prefixed + + +FEATURES: + +- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23, +in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts: + +``` +type VoteSetMaj23Message struct { + Height int + Round int + Type byte + BlockID types.BlockID +} +``` + +- Configurable block part set size +- Validator set changes +- Optionally skip TimeoutCommit if we have all the votes +- Handshake between Tendermint and App on startup to sync latest state and ensure consistent recovery from crashes +- GRPC server for BroadcastTx endpoint + +IMPROVEMENTS: + +- Less verbose logging +- Better test coverage (37% -> 49%) +- Canonical SignBytes for signable types +- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile +- Better in-process testing for the consensus reactor and byzantine faults +- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points +- Better abstraction over timeout mechanics + +BUG FIXES: + +- Fix memory leak in mempool peer +- Fix panic on POLRound=-1 +- Actually set the CommitTime +- Actually send BeginBlock message +- Fix a liveness issues caused by Byzantine proposals/votes. Uses the new `Maj23Msg`. + + +## 0.7.4 (December 14, 2016) + +FEATURES: + +- Enable the Peer Exchange reactor with the `--pex` flag for more resilient gossip network (feature still in development, beware dragons) + +IMPROVEMENTS: + +- Remove restrictions on RPC endpoint `/dial_seeds` to enable manual network configuration + +## 0.7.3 (October 20, 2016) + +IMPROVEMENTS: + +- Type safe FireEvent +- More WAL/replay tests +- Cleanup some docs + +BUG FIXES: + +- Fix deadlock in mempool for synchronous apps +- Replay handles non-empty blocks +- Fix race condition in HeightVoteSet + +## 0.7.2 (September 11, 2016) + +BUG FIXES: + +- Set mustConnect=false so tendermint will retry connecting to the app + +## 0.7.1 (September 10, 2016) + +FEATURES: + +- New TMSP connection for Query/Info +- New RPC endpoints: + - `tmsp_query` + - `tmsp_info` +- Allow application to filter peers through Query (off by default) + +IMPROVEMENTS: + +- TMSP connection type enforced at compile time +- All listen/client urls use a "tcp://" or "unix://" prefix + +BUG FIXES: + +- Save LastSignature/LastSignBytes to `priv_validator.json` for recovery +- Fix event unsubscribe +- Fix fastsync/blockchain reactor + +## 0.7.0 (August 7, 2016) + +BREAKING CHANGES: + +- Strict SemVer starting now! +- Update to ABCI v0.2.0 +- Validation types now called Commit +- NewBlock event only returns the block header + + +FEATURES: + +- TMSP and RPC support TCP and UNIX sockets +- Addition config options including block size and consensus parameters +- New WAL mode `cswal_light`; logs only the validator's own votes +- New RPC endpoints: + - for starting/stopping profilers, and for updating config + - `/broadcast_tx_commit`, returns when tx is included in a block, else an error + - `/unsafe_flush_mempool`, empties the mempool + + +IMPROVEMENTS: + +- Various optimizations +- Remove bad or invalidated transactions from the mempool cache (allows later duplicates) +- More elaborate testing using CircleCI including benchmarking throughput on 4 digitalocean droplets + +BUG FIXES: + +- Various fixes to WAL and replay logic +- Various race conditions + +## PreHistory + +Strict versioning only began with the release of v0.7.0, in late summer 2016. +The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year. +Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries), +many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine. +That implementation now forms the heart of [Burrow](https://github.com/hyperledger/burrow). +In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation. + +By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the +invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP). +The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine +driving an application running in another process. +The ABCI interface and implementation were iterated on and improved over the course of 2016, +until versioned history kicked in with v0.7.0. diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..e2fadfadc --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,429 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + branch = "master" + name = "github.com/btcsuite/btcd" + packages = ["btcec"] + revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64" + +[[projects]] + branch = "master" + name = "github.com/btcsuite/btcutil" + packages = ["base58"] + revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/ebuchman/fail-test" + packages = ["."] + revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" + +[[projects]] + branch = "master" + name = "github.com/fortytw2/leaktest" + packages = ["."] + revision = "b008db64ef8daabb22ff6daa557f33b41d8f6ccd" + +[[projects]] + name = "github.com/fsnotify/fsnotify" + packages = ["."] + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + +[[projects]] + name = "github.com/go-kit/kit" + packages = [ + "log", + "log/level", + "log/term", + "metrics", + "metrics/discard", + "metrics/internal/lv", + "metrics/prometheus" + ] + revision = "4dc7be5d2d12881735283bcab7352178e190fc71" + version = "v0.6.0" + +[[projects]] + name = "github.com/go-logfmt/logfmt" + packages = ["."] + revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" + version = "v0.3.0" + +[[projects]] + name = "github.com/go-stack/stack" + packages = ["."] + revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" + version = "v1.7.0" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "gogoproto", + "jsonpb", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types" + ] + revision = "1adfc126b41513cc696b209667c8656ea7aac67c" + version = "v1.0.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp" + ] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + +[[projects]] + name = "github.com/gorilla/websocket" + packages = ["."] + revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" + version = "v1.2.0" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/printer", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token" + ] + revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + +[[projects]] + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + branch = "master" + name = "github.com/jmhodges/levigo" + packages = ["."] + revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" + +[[projects]] + branch = "master" + name = "github.com/kr/logfmt" + packages = ["."] + revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" + +[[projects]] + name = "github.com/magiconair/properties" + packages = ["."] + revision = "c2353362d570a7bfa228149c62842019201cfb71" + version = "v1.8.0" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" + +[[projects]] + name = "github.com/pelletier/go-toml" + packages = ["."] + revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" + version = "v1.2.0" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp" + ] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs" + ] + revision = "40f013a808ec4fa79def444a1a56de4d1727efcb" + +[[projects]] + branch = "master" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + revision = "e2704e165165ec55d062f5919b4b29494e9fa790" + +[[projects]] + name = "github.com/spf13/afero" + packages = [ + ".", + "mem" + ] + revision = "787d034dfe70e44075ccc060d346146ef53270ad" + version = "v1.1.1" + +[[projects]] + name = "github.com/spf13/cast" + packages = ["."] + revision = "8965335b8c7107321228e3e3702cab9832751bac" + version = "v1.2.0" + +[[projects]] + name = "github.com/spf13/cobra" + packages = ["."] + revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "583c0c0531f06d5278b7d917446061adc344b5cd" + version = "v1.0.1" + +[[projects]] + name = "github.com/spf13/viper" + packages = ["."] + revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" + version = "v1.0.2" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + version = "v1.2.2" + +[[projects]] + branch = "master" + name = "github.com/syndtr/goleveldb" + packages = [ + "leveldb", + "leveldb/cache", + "leveldb/comparer", + "leveldb/errors", + "leveldb/filter", + "leveldb/iterator", + "leveldb/journal", + "leveldb/memdb", + "leveldb/opt", + "leveldb/storage", + "leveldb/table", + "leveldb/util" + ] + revision = "0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697" + +[[projects]] + branch = "master" + name = "github.com/tendermint/ed25519" + packages = [ + ".", + "edwards25519", + "extra25519" + ] + revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" + +[[projects]] + name = "github.com/tendermint/go-amino" + packages = ["."] + revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" + version = "0.10.1" + +[[projects]] + name = "github.com/tendermint/tmlibs" + packages = [ + "autofile", + "cli", + "cli/flags", + "clist", + "common", + "db", + "flowrate", + "log", + "merkle", + "merkle/tmhash", + "test" + ] + revision = "49596e0a1f48866603813df843c9409fc19805c6" + version = "v0.9.0" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = [ + "bcrypt", + "blowfish", + "chacha20poly1305", + "curve25519", + "hkdf", + "internal/chacha20", + "internal/subtle", + "nacl/box", + "nacl/secretbox", + "openpgp/armor", + "openpgp/errors", + "poly1305", + "ripemd160", + "salsa20/salsa" + ] + revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "netutil", + "trace" + ] + revision = "4cb1c02c05b0e749b0365f61ae859a8e0cfceed9" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = [ + "cpu", + "unix" + ] + revision = "7138fd3d9dc8335c567ca206f4333fb75eb05d56" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable" + ] + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "codes", + "connectivity", + "credentials", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "stats", + "status", + "tap", + "transport" + ] + revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + version = "v1.7.5" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "c25289282b94abc7f0c390e592e5e1636b7f26cb4773863ac39cde7fdc7b5bdf" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..18e2767a9 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,99 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/ebuchman/fail-test" + branch = "master" + +[[constraint]] + name = "github.com/fortytw2/leaktest" + branch = "master" + +[[constraint]] + name = "github.com/go-kit/kit" + version = "~0.6.0" + +[[constraint]] + name = "github.com/gogo/protobuf" + version = "~1.0.0" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "~1.0.0" + +[[constraint]] + name = "github.com/gorilla/websocket" + version = "~1.2.0" + +[[constraint]] + name = "github.com/pkg/errors" + version = "~0.8.0" + +[[constraint]] + name = "github.com/rcrowley/go-metrics" + branch = "master" + +[[constraint]] + name = "github.com/spf13/cobra" + version = "~0.0.1" + +[[constraint]] + name = "github.com/spf13/viper" + version = "~1.0.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "~1.2.1" + +[[constraint]] + name = "github.com/tendermint/go-amino" + version = "~0.10.1" + +[[override]] + name = "github.com/tendermint/tmlibs" + version = "~0.9.0" + +[[constraint]] + name = "google.golang.org/grpc" + version = "~1.7.3" + +# this got updated and broke, so locked to an old working commit ... +[[override]] + name = "google.golang.org/genproto" + revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = "0.8.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..bb66bb350 --- /dev/null +++ b/LICENSE @@ -0,0 +1,204 @@ +Tendermint Core +License: Apache2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 All in Bits, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..079c58f90 --- /dev/null +++ b/Makefile @@ -0,0 +1,236 @@ +GOTOOLS = \ + github.com/golang/dep/cmd/dep \ + gopkg.in/alecthomas/gometalinter.v2 +PACKAGES=$(shell go list ./... | grep -v '/vendor/') +BUILD_TAGS?=tendermint +BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" + +all: check build test install + +check: check_tools ensure_deps + + +######################################## +### Build + +build: + CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ + +build_race: + CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint + +install: + CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint + +######################################## +### Distribution + +# dist builds binaries for all platforms and packages them for distribution +dist: + @BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" + +######################################## +### Tools & dependencies + +check_tools: + @# https://stackoverflow.com/a/25668869 + @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\ + $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" + +get_tools: + @echo "--> Installing tools" + go get -u -v $(GOTOOLS) + @gometalinter.v2 --install + +update_tools: + @echo "--> Updating tools" + @go get -u $(GOTOOLS) + +#Run this from CI +get_vendor_deps: + @rm -rf vendor/ + @echo "--> Running dep" + @dep ensure -vendor-only + + +#Run this locally. +ensure_deps: + @rm -rf vendor/ + @echo "--> Running dep" + @dep ensure + +draw_deps: + @# requires brew install graphviz or apt-get install graphviz + go get github.com/RobotsAndPencils/goviz + @goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png + +get_deps_bin_size: + @# Copy of build recipe with additional flags to perform binary size analysis + $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1)) + @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log + @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" + +######################################## +### Testing + +## required to be run first by most tests +build_docker_test_image: + docker build -t tester -f ./test/docker/Dockerfile . + +### coverage, app, persistence, and libs tests +test_cover: + # run the go unit tests with coverage + bash test/test_cover.sh + +test_apps: + # run the app tests using bash + # requires `abci-cli` and `tendermint` binaries installed + bash test/app/test.sh + +test_persistence: + # run the persistence tests using bash + # requires `abci-cli` installed + docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh + + # TODO undockerize + # bash test/persist/test_failure_indices.sh + +test_p2p: + docker rm -f rsyslog || true + rm -rf test/logs || true + mkdir test/logs + cd test/ + docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog + cd .. + # requires 'tester' the image from above + bash test/p2p/test.sh tester + +need_abci: + bash scripts/install_abci_apps.sh + +test_integrations: + make build_docker_test_image + make get_tools + make get_vendor_deps + make install + make need_abci + make test_cover + make test_apps + make test_persistence + make test_p2p + +test_release: + @go test -tags release $(PACKAGES) + +test100: + @for i in {1..100}; do make test; done + +vagrant_test: + vagrant up + vagrant ssh -c 'make test_integrations' + +### go tests +test: + @echo "--> Running go test" + @go test $(PACKAGES) + +test_race: + @echo "--> Running go test --race" + @go test -v -race $(PACKAGES) + + +######################################## +### Formatting, linting, and vetting + +fmt: + @go fmt ./... + +metalinter: + @echo "--> Running linter" + @gometalinter.v2 --vendor --deadline=600s --disable-all \ + --enable=deadcode \ + --enable=gosimple \ + --enable=misspell \ + --enable=safesql \ + ./... + #--enable=gas \ + #--enable=maligned \ + #--enable=dupl \ + #--enable=errcheck \ + #--enable=goconst \ + #--enable=gocyclo \ + #--enable=goimports \ + #--enable=golint \ <== comments on anything exported + #--enable=gotype \ + #--enable=ineffassign \ + #--enable=interfacer \ + #--enable=megacheck \ + #--enable=staticcheck \ + #--enable=structcheck \ + #--enable=unconvert \ + #--enable=unparam \ + #--enable=unused \ + #--enable=varcheck \ + #--enable=vet \ + #--enable=vetshadow \ + +metalinter_all: + @echo "--> Running linter (all)" + gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... + +########################################################### +### Docker image + +build-docker: + cp build/tendermint DOCKER/tendermint + docker build --label=tendermint --tag="tendermint/tendermint" DOCKER + rm -rf DOCKER/tendermint + +########################################################### +### Local testnet using docker + +# Build linux binary on other platforms +build-linux: + GOOS=linux GOARCH=amd64 $(MAKE) build + +build-docker-localnode: + cd networks/local + make + +# Run a 4-node testnet locally +localnet-start: localnet-stop + @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi + docker-compose up + +# Stop testnet +localnet-stop: + docker-compose down + +########################################################### +### Remote full-nodes (sentry) using terraform and ansible + +# Server management +sentry-start: + @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi + @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi + cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" + @if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi + cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml + @echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)" + +# Configuration management +sentry-config: + cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build + +sentry-stop: + @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi + cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" + +# meant for the CI, inspect script & adapt accordingly +build-slate: + bash scripts/slate.sh + +# To avoid unintended conflicts with file names, always add to .PHONY +# unless there is a reason not to. +# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html +.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate diff --git a/README.md b/README.md new file mode 100644 index 000000000..daba4f59a --- /dev/null +++ b/README.md @@ -0,0 +1,138 @@ +# Tendermint + +[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance) +[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication). +Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short. + +[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) +[![API Reference]( +https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 +)](https://godoc.org/github.com/tendermint/tendermint) +[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm) +[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org) +[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) +[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) + + +Branch | Tests | Coverage +----------|-------|---------- +master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) +develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - +and securely replicates it on many machines. + +For protocol details, see [the specification](/docs/spec). + +## A Note on Production Readiness + +While Tendermint is being used in production in private, permissioned +environments, we are still working actively to harden and audit it in preparation +for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/). +We are also still making breaking changes to the protocol and the APIs. +Thus we tag the releases as *alpha software*. + +In any case, if you intend to run Tendermint in production, +please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :) + +## Security + +To report a security vulnerability, see our [bug bounty +program](https://tendermint.com/security). + +For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md) + +## Minimum requirements + +Requirement|Notes +---|--- +Go version | Go1.9 or higher + +## Install + +See the [install instructions](/docs/install.rst) + +## Quick Start + +- [Single node](/docs/using-tendermint.rst) +- [Local cluster using docker-compose](/networks/local) +- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.md) +- [Join the public testnet](https://cosmos.network/testnet) + +## Resources + +### Tendermint Core + +For details about the blockchain data structures and the p2p protocols, see the +the [Tendermint specification](/docs/spec). + +For details on using the software, [Read The Docs](https://tendermint.readthedocs.io/en/master/). +Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs. + + +### Sub-projects + +* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface +* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library +* [Go-Crypto](http://github.com/tendermint/tendermint/crypto), an elliptic curve cryptography library +* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally +* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation + +### Tools +* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools) + +### Applications + +* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework +* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint +* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications) + +### More + +* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) +* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf) +* [Tendermint Blog](https://blog.cosmos.network/tendermint/home) +* [Cosmos Blog](https://blog.cosmos.network) + +## Contributing + +Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md). + +## Versioning + +### SemVer + +Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes. +According to SemVer, anything in the public API can change at any time before version 1.0.0 + +To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used +to signal breaking changes across a subset of the total public API. This subset includes all +interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not +include the in-process Go APIs. + +That said, breaking changes in the following packages will be documented in the +CHANGELOG even if they don't lead to MINOR version bumps: + +- types +- rpc/client +- config +- node + +Exported objects in these packages that are not covered by the versioning scheme +are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time. +Functions, types, and values in any other package may also change at any time. + +### Upgrades + +In an effort to avoid accumulating technical debt prior to 1.0.0, +we do not guarantee that breaking changes (ie. bumps in the MINOR version) +will work with existing tendermint blockchains. In these cases you will +have to start a new blockchain, or write something custom to get the old +data into the new chain. + +However, any bump in the PATCH version should be compatible with existing histories +(if not please open an [issue](https://github.com/tendermint/tendermint/issues)). + +## Code of Conduct + +Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md). diff --git a/version/version.go b/version/version.go new file mode 100644 index 000000000..9be4c9d82 --- /dev/null +++ b/version/version.go @@ -0,0 +1,23 @@ +package version + +// Version components +const ( + Maj = "0" + Min = "22" + Fix = "0" +) + +var ( + // Version is the current version of Tendermint + // Must be a string because scripts like dist.sh read this file. + Version = "0.22.0" + + // GitCommit is the current HEAD set using ldflags. + GitCommit string +) + +func init() { + if GitCommit != "" { + Version += "-" + GitCommit + } +} From d55243f0e640de98f7adf0ad5fed4855e0942c1e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:36:49 -0400 Subject: [PATCH 503/515] fix import paths --- abci/client/client.go | 2 +- abci/client/grpc_client.go | 2 +- abci/client/local_client.go | 2 +- abci/client/socket_client.go | 2 +- abci/cmd/abci-cli/abci-cli.go | 4 ++-- abci/example/counter/counter.go | 2 +- abci/example/example_test.go | 4 ++-- abci/example/kvstore/helpers.go | 2 +- abci/example/kvstore/kvstore.go | 4 ++-- abci/example/kvstore/kvstore_test.go | 4 ++-- abci/example/kvstore/persistent_kvstore.go | 6 +++--- abci/server/grpc_server.go | 2 +- abci/server/server.go | 2 +- abci/server/socket_server.go | 2 +- abci/tests/benchmarks/parallel/parallel.go | 2 +- abci/tests/benchmarks/simple/simple.go | 2 +- abci/tests/server/client.go | 2 +- abci/tests/test_app/app.go | 2 +- abci/types/messages_test.go | 2 +- abci/types/types.pb.go | 2 +- abci/types/util.go | 2 +- benchmarks/map_test.go | 2 +- benchmarks/os_test.go | 2 +- benchmarks/simu/counter.go | 2 +- blockchain/pool.go | 6 +++--- blockchain/pool_test.go | 4 ++-- blockchain/reactor.go | 4 ++-- blockchain/reactor_test.go | 6 +++--- blockchain/store.go | 4 ++-- blockchain/store_test.go | 4 ++-- cmd/priv_val_server/main.go | 4 ++-- cmd/tendermint/commands/gen_node_key.go | 2 +- cmd/tendermint/commands/init.go | 2 +- cmd/tendermint/commands/lite.go | 2 +- cmd/tendermint/commands/reset_priv_validator.go | 2 +- cmd/tendermint/commands/root.go | 6 +++--- cmd/tendermint/commands/root_test.go | 4 ++-- cmd/tendermint/commands/testnet.go | 2 +- cmd/tendermint/main.go | 2 +- config/toml.go | 2 +- consensus/byzantine_test.go | 2 +- consensus/common_test.go | 6 +++--- consensus/mempool_test.go | 2 +- consensus/reactor.go | 4 ++-- consensus/reactor_test.go | 4 ++-- consensus/replay.go | 8 ++++---- consensus/replay_file.go | 6 +++--- consensus/replay_test.go | 8 ++++---- consensus/state.go | 4 ++-- consensus/state_test.go | 4 ++-- consensus/ticker.go | 4 ++-- consensus/types/height_vote_set.go | 2 +- consensus/types/height_vote_set_test.go | 2 +- consensus/types/peer_round_state.go | 2 +- consensus/types/round_state.go | 2 +- consensus/types/round_state_test.go | 2 +- consensus/version.go | 2 +- consensus/wal.go | 4 ++-- consensus/wal_generator.go | 8 ++++---- consensus/wal_test.go | 2 +- crypto/merkle/simple_map.go | 2 +- crypto/merkle/simple_tree_test.go | 4 ++-- crypto/pub_key.go | 2 +- crypto/random.go | 2 +- crypto/signature.go | 2 +- crypto/symmetric.go | 2 +- evidence/pool.go | 6 +++--- evidence/pool_test.go | 2 +- evidence/reactor.go | 4 ++-- evidence/reactor_test.go | 4 ++-- evidence/store.go | 2 +- evidence/store_test.go | 2 +- libs/autofile/autofile.go | 2 +- libs/autofile/autofile_test.go | 2 +- libs/autofile/cmd/logjack.go | 4 ++-- libs/autofile/group.go | 2 +- libs/autofile/group_test.go | 2 +- libs/bech32/bech32_test.go | 2 +- libs/cli/flags/log_level.go | 2 +- libs/cli/flags/log_level_test.go | 4 ++-- libs/common/repeat_timer_test.go | 2 +- libs/common/service.go | 2 +- libs/db/backend_test.go | 2 +- libs/db/c_level_db_test.go | 2 +- libs/db/common_test.go | 2 +- libs/db/debug_db.go | 2 +- libs/db/fsdb.go | 2 +- libs/db/go_level_db.go | 2 +- libs/db/go_level_db_test.go | 2 +- libs/db/mem_db.go | 2 +- libs/db/remotedb/grpcdb/client.go | 2 +- libs/db/remotedb/grpcdb/example_test.go | 4 ++-- libs/db/remotedb/grpcdb/server.go | 6 +++--- libs/db/remotedb/remotedb.go | 6 +++--- libs/db/remotedb/remotedb_test.go | 4 ++-- libs/events/events.go | 2 +- libs/log/filter_test.go | 2 +- libs/log/tm_logger_test.go | 2 +- libs/log/tmfmt_logger_test.go | 2 +- libs/log/tracing_logger_test.go | 2 +- libs/merkle/simple_map.go | 4 ++-- libs/merkle/simple_tree.go | 2 +- libs/merkle/simple_tree_test.go | 4 ++-- libs/merkle/tmhash/hash_test.go | 2 +- libs/pubsub/example_test.go | 2 +- libs/pubsub/pubsub.go | 2 +- libs/pubsub/pubsub_test.go | 2 +- libs/test/mutate.go | 2 +- lite/files/commit_test.go | 2 +- lite/proxy/proxy.go | 2 +- lite/proxy/query.go | 2 +- lite/proxy/wrapper.go | 2 +- mempool/mempool.go | 8 ++++---- mempool/mempool_test.go | 4 ++-- mempool/reactor.go | 4 ++-- mempool/reactor_test.go | 2 +- node/node.go | 6 +++--- node/node_test.go | 2 +- p2p/base_reactor.go | 2 +- p2p/conn/connection.go | 6 +++--- p2p/conn/connection_test.go | 2 +- p2p/conn/secret_connection.go | 2 +- p2p/conn/secret_connection_test.go | 2 +- p2p/dummy/peer.go | 2 +- p2p/fuzz.go | 2 +- p2p/key.go | 2 +- p2p/key_test.go | 2 +- p2p/listener.go | 4 ++-- p2p/listener_test.go | 2 +- p2p/netaddress.go | 2 +- p2p/node_info.go | 2 +- p2p/peer.go | 4 ++-- p2p/peer_set_test.go | 2 +- p2p/peer_test.go | 4 ++-- p2p/pex/addrbook.go | 2 +- p2p/pex/addrbook_test.go | 4 ++-- p2p/pex/file.go | 2 +- p2p/pex/pex_reactor.go | 2 +- p2p/pex/pex_reactor_test.go | 4 ++-- p2p/switch.go | 2 +- p2p/switch_test.go | 2 +- p2p/test_util.go | 4 ++-- p2p/trust/metric.go | 2 +- p2p/trust/store.go | 4 ++-- p2p/trust/store_test.go | 4 ++-- p2p/upnp/probe.go | 4 ++-- privval/priv_validator.go | 2 +- privval/priv_validator_test.go | 2 +- privval/socket.go | 4 ++-- privval/socket_test.go | 4 ++-- proxy/app_conn_test.go | 4 ++-- proxy/multi_app_conn.go | 2 +- rpc/client/event_test.go | 2 +- rpc/client/httpclient.go | 2 +- rpc/client/interface.go | 2 +- rpc/client/localclient.go | 2 +- rpc/client/mock/abci.go | 2 +- rpc/client/mock/abci_test.go | 2 +- rpc/client/mock/client.go | 2 +- rpc/client/mock/status_test.go | 2 +- rpc/core/abci.go | 2 +- rpc/core/blocks.go | 2 +- rpc/core/mempool.go | 2 +- rpc/core/pipe.go | 4 ++-- rpc/core/status.go | 2 +- rpc/core/tx.go | 2 +- rpc/core/types/responses.go | 2 +- rpc/grpc/client_server.go | 2 +- rpc/lib/client/integration_test.go | 2 +- rpc/lib/client/ws_client.go | 2 +- rpc/lib/client/ws_client_test.go | 2 +- rpc/lib/rpc_test.go | 4 ++-- rpc/lib/server/handlers.go | 4 ++-- rpc/lib/server/handlers_test.go | 2 +- rpc/lib/server/http_server.go | 2 +- rpc/lib/server/http_server_test.go | 2 +- rpc/lib/server/parse_test.go | 2 +- rpc/lib/test/main.go | 4 ++-- rpc/test/helpers.go | 4 ++-- scripts/wire2amino.go | 2 +- state/errors.go | 2 +- state/execution.go | 4 ++-- state/execution_test.go | 6 +++--- state/state_test.go | 4 ++-- state/store.go | 4 ++-- state/txindex/indexer_service.go | 2 +- state/txindex/kv/kv.go | 4 ++-- state/txindex/kv/kv_test.go | 4 ++-- state/validation.go | 2 +- state/validation_test.go | 4 ++-- types/block.go | 4 ++-- types/block_test.go | 2 +- types/canonical_json.go | 2 +- types/event_bus.go | 4 ++-- types/event_bus_test.go | 2 +- types/evidence.go | 2 +- types/genesis.go | 2 +- types/heartbeat.go | 2 +- types/params.go | 4 ++-- types/part_set.go | 4 ++-- types/part_set_test.go | 2 +- types/results.go | 4 ++-- types/tx.go | 4 ++-- types/tx_test.go | 4 ++-- types/validator.go | 2 +- types/validator_set.go | 4 ++-- types/validator_set_test.go | 2 +- types/vote.go | 2 +- types/vote_set.go | 2 +- types/vote_set_test.go | 4 ++-- 210 files changed, 305 insertions(+), 305 deletions(-) diff --git a/abci/client/client.go b/abci/client/client.go index cdf2c60e7..558588107 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -5,7 +5,7 @@ import ( "sync" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index e64fcb4d6..502ee0fcd 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -10,7 +10,7 @@ import ( grpc "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var _ Client = (*grpcClient)(nil) diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 225273a96..3d1f8d8e4 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -4,7 +4,7 @@ import ( "sync" types "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var _ Client = (*localClient)(nil) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 77c3d966a..c3f88725c 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -11,7 +11,7 @@ import ( "time" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const reqQueueSize = 256 // TODO make configurable diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 0e7b908e7..e20244011 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -11,8 +11,8 @@ import ( "github.com/spf13/cobra" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 87fc7b188..857e82baf 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -6,7 +6,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type CounterApplication struct { diff --git a/abci/example/example_test.go b/abci/example/example_test.go index a3d161a2f..bbb53b5af 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -11,8 +11,8 @@ import ( "golang.org/x/net/context" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 7ddacb5bf..0e69fab9f 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -2,7 +2,7 @@ package kvstore import ( "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // RandVal creates one random validator, with a key derived diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index e078d87d1..0f72b44ea 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -8,8 +8,8 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" ) var ( diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 46c28c99e..2d8f81272 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index a9067ac14..12ccbab78 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -9,9 +9,9 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 3f8b599e9..ccbe609cc 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -6,7 +6,7 @@ import ( "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type GRPCServer struct { diff --git a/abci/server/server.go b/abci/server/server.go index 49dde4280..ada514fa8 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -13,7 +13,7 @@ import ( "fmt" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func NewServer(protoAddr, transport string, app types.Application) (cmn.Service, error) { diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index e7293ffd7..4b92f04cf 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // var maxNumberConnections = 2 diff --git a/abci/tests/benchmarks/parallel/parallel.go b/abci/tests/benchmarks/parallel/parallel.go index 0b4634492..78b69ed12 100644 --- a/abci/tests/benchmarks/parallel/parallel.go +++ b/abci/tests/benchmarks/parallel/parallel.go @@ -6,7 +6,7 @@ import ( "log" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func main() { diff --git a/abci/tests/benchmarks/simple/simple.go b/abci/tests/benchmarks/simple/simple.go index 77b98d57d..b0819799b 100644 --- a/abci/tests/benchmarks/simple/simple.go +++ b/abci/tests/benchmarks/simple/simple.go @@ -8,7 +8,7 @@ import ( "reflect" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func main() { diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 06db13d9a..f67297cd7 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -7,7 +7,7 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func InitChain(client abcicli.Client) error { diff --git a/abci/tests/test_app/app.go b/abci/tests/test_app/app.go index 42092345a..a33f4ee9e 100644 --- a/abci/tests/test_app/app.go +++ b/abci/tests/test_app/app.go @@ -7,7 +7,7 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func startClient(abciType string) abcicli.Client { diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 21d3595f0..da6595a46 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -8,7 +8,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestMarshalJSON(t *testing.T) { diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index a6b806fe6..8135db50f 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -50,7 +50,7 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" -import common "github.com/tendermint/tmlibs/common" +import common "github.com/tendermint/tendermint/libs/common" import context "golang.org/x/net/context" import grpc "google.golang.org/grpc" diff --git a/abci/types/util.go b/abci/types/util.go index 0924ab5ff..458024c58 100644 --- a/abci/types/util.go +++ b/abci/types/util.go @@ -5,7 +5,7 @@ import ( "encoding/json" "sort" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //------------------------------------------------------------------------------ diff --git a/benchmarks/map_test.go b/benchmarks/map_test.go index 2d9789026..d13a19edf 100644 --- a/benchmarks/map_test.go +++ b/benchmarks/map_test.go @@ -3,7 +3,7 @@ package benchmarks import ( "testing" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func BenchmarkSomething(b *testing.B) { diff --git a/benchmarks/os_test.go b/benchmarks/os_test.go index dfadc3128..406038b9d 100644 --- a/benchmarks/os_test.go +++ b/benchmarks/os_test.go @@ -4,7 +4,7 @@ import ( "os" "testing" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func BenchmarkFileWrite(b *testing.B) { diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go index dd00408cb..b7d2c4d63 100644 --- a/benchmarks/simu/counter.go +++ b/benchmarks/simu/counter.go @@ -7,7 +7,7 @@ import ( "time" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func main() { diff --git a/blockchain/pool.go b/blockchain/pool.go index 8b964e81a..e379d846a 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -8,9 +8,9 @@ import ( "sync/atomic" "time" - cmn "github.com/tendermint/tmlibs/common" - flow "github.com/tendermint/tmlibs/flowrate" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + flow "github.com/tendermint/tendermint/libs/flowrate" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index 82120eaef..c2f615f94 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 33dfdd288..70a599bab 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -9,8 +9,8 @@ import ( "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index c7f7e9afd..2bb6e9762 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -4,9 +4,9 @@ import ( "net" "testing" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/blockchain/store.go b/blockchain/store.go index e7608b2cc..f02d4facb 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -4,8 +4,8 @@ import ( "fmt" "sync" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/store_test.go b/blockchain/store_test.go index b74c2b35f..888040bdf 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index ac7dd91ba..20c23f4c4 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -5,8 +5,8 @@ import ( "os" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" ) diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index 4990be477..7aedcd0dc 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // GenNodeKeyCmd allows the generation of a node key. It prints node's ID to diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index ad39cd20b..a44c73ebf 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // InitFilesCmd initialises a fresh Tendermint Core instance. diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index 6987b7f19..d57598816 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite/proxy" rpcclient "github.com/tendermint/tendermint/rpc/client" diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 32d7b1433..ef0ba3019 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) // ResetAllCmd removes the database of this Tendermint core diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index f229a7889..3c67ddc14 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -7,9 +7,9 @@ import ( "github.com/spf13/viper" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tmlibs/cli" - tmflags "github.com/tendermint/tmlibs/cli/flags" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/cli" + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" ) var ( diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index 59d258af7..e8095b387 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/require" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tmlibs/cli" - cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tendermint/libs/cli" + cmn "github.com/tendermint/tendermint/libs/common" ) var ( diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index 29d29502e..f7639fb27 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var ( diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 8c7f0cd17..a5a8d2d80 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -4,7 +4,7 @@ import ( "os" "path/filepath" - "github.com/tendermint/tmlibs/cli" + "github.com/tendermint/tendermint/libs/cli" cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" cfg "github.com/tendermint/tendermint/config" diff --git a/config/toml.go b/config/toml.go index 4569291d4..37ff4d7c1 100644 --- a/config/toml.go +++ b/config/toml.go @@ -6,7 +6,7 @@ import ( "path/filepath" "text/template" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var configTemplate *template.Template diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index d3be8c358..5360a92c9 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func init() { diff --git a/consensus/common_test.go b/consensus/common_test.go index b990f525c..2df226ba1 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -22,9 +22,9 @@ import ( "github.com/tendermint/tendermint/privval" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 032cf2f32..a811de731 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" ) diff --git a/consensus/reactor.go b/consensus/reactor.go index 2034ad344..54407ae1e 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -9,8 +9,8 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cstypes "github.com/tendermint/tendermint/consensus/types" tmevents "github.com/tendermint/tendermint/libs/events" diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 498a857b9..9e2aa0a0b 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -11,8 +11,8 @@ import ( "time" "github.com/tendermint/tendermint/abci/example/kvstore" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/consensus/replay.go b/consensus/replay.go index f681828cf..3035f75d8 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -11,10 +11,10 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - //auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + //auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 57204b01a..0c0b0dcb1 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -16,9 +16,9 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/consensus/replay_test.go b/consensus/replay_test.go index f76651d72..da526d249 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -19,16 +19,16 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) var consensusReplayConfig *cfg.Config diff --git a/consensus/state.go b/consensus/state.go index a3196a2f4..d013f4231 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -10,8 +10,8 @@ import ( "time" fail "github.com/ebuchman/fail-test" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" diff --git a/consensus/state_test.go b/consensus/state_test.go index d0def6309..6a14e17b5 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -10,8 +10,8 @@ import ( cstypes "github.com/tendermint/tendermint/consensus/types" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) func init() { diff --git a/consensus/ticker.go b/consensus/ticker.go index b37b7c495..a1e2174c3 100644 --- a/consensus/ticker.go +++ b/consensus/ticker.go @@ -3,8 +3,8 @@ package consensus import ( "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) var ( diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 3c9867940..70a38668f 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type RoundVoteSet struct { diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 678d34759..0de656000 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -6,7 +6,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var config *cfg.Config // NOTE: must be reset for each _test.go file diff --git a/consensus/types/peer_round_state.go b/consensus/types/peer_round_state.go index dcb6c8e02..7a5d69b8e 100644 --- a/consensus/types/peer_round_state.go +++ b/consensus/types/peer_round_state.go @@ -5,7 +5,7 @@ import ( "time" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index 14da1f149..cca560ccf 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index 042d8de7f..080178f24 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func BenchmarkRoundStateDeepCopy(b *testing.B) { diff --git a/consensus/version.go b/consensus/version.go index 2c137bf7f..5c74a16db 100644 --- a/consensus/version.go +++ b/consensus/version.go @@ -1,7 +1,7 @@ package consensus import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // kind of arbitrary diff --git a/consensus/wal.go b/consensus/wal.go index 3d9bf8afc..8c4c10bc7 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -12,8 +12,8 @@ import ( amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/types" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 1a61c3405..f3a365809 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -17,10 +17,10 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) // WALWithNBlocks generates a consensus WAL. It does this by spining up a diff --git a/consensus/wal_test.go b/consensus/wal_test.go index eebbc85a2..3ecb4fe8f 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/consensus/types" tmtypes "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go index 86a9bad9c..ba4b9309a 100644 --- a/crypto/merkle/simple_map.go +++ b/crypto/merkle/simple_map.go @@ -2,7 +2,7 @@ package merkle import ( "github.com/tendermint/tendermint/crypto/tmhash" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Merkle tree from a map. diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index 6eef93623..488e0c907 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -3,8 +3,8 @@ package merkle import ( "bytes" - cmn "github.com/tendermint/tmlibs/common" - . "github.com/tendermint/tmlibs/test" + cmn "github.com/tendermint/tendermint/libs/common" + . "github.com/tendermint/tendermint/libs/test" "github.com/tendermint/tendermint/crypto/tmhash" "testing" diff --git a/crypto/pub_key.go b/crypto/pub_key.go index b7f954307..588c54113 100644 --- a/crypto/pub_key.go +++ b/crypto/pub_key.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/ed25519" "github.com/tendermint/ed25519/extra25519" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/crypto/tmhash" ) diff --git a/crypto/random.go b/crypto/random.go index 66da035a9..5c5057d30 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -9,7 +9,7 @@ import ( "io" "sync" - . "github.com/tendermint/tmlibs/common" + . "github.com/tendermint/tendermint/libs/common" ) var gRandInfo *randInfo diff --git a/crypto/signature.go b/crypto/signature.go index 728a2a04d..ae447da64 100644 --- a/crypto/signature.go +++ b/crypto/signature.go @@ -5,7 +5,7 @@ import ( "crypto/subtle" - . "github.com/tendermint/tmlibs/common" + . "github.com/tendermint/tendermint/libs/common" ) func SignatureFromBytes(pubKeyBytes []byte) (pubKey Signature, err error) { diff --git a/crypto/symmetric.go b/crypto/symmetric.go index d4ac9b55b..62379c15f 100644 --- a/crypto/symmetric.go +++ b/crypto/symmetric.go @@ -3,7 +3,7 @@ package crypto import ( "errors" - . "github.com/tendermint/tmlibs/common" + . "github.com/tendermint/tendermint/libs/common" "golang.org/x/crypto/nacl/secretbox" ) diff --git a/evidence/pool.go b/evidence/pool.go index 4bad355f7..247629b6b 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -4,9 +4,9 @@ import ( "fmt" "sync" - clist "github.com/tendermint/tmlibs/clist" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + clist "github.com/tendermint/tendermint/libs/clist" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 019076234..915cba327 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -9,7 +9,7 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) var mockState = sm.State{} diff --git a/evidence/reactor.go b/evidence/reactor.go index 5159572e3..7b22b8dba 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -6,8 +6,8 @@ import ( "time" "github.com/tendermint/go-amino" - clist "github.com/tendermint/tmlibs/clist" - "github.com/tendermint/tmlibs/log" + clist "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 2f1c34e6e..1687f25a3 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -10,8 +10,8 @@ import ( "github.com/go-kit/kit/log/term" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/evidence/store.go b/evidence/store.go index 6af5d75d8..20b37bdb2 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) /* diff --git a/evidence/store_test.go b/evidence/store_test.go index 3fdb3ba6e..30dc1c4d5 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) //------------------------------------------- diff --git a/libs/autofile/autofile.go b/libs/autofile/autofile.go index 790be5224..313da6789 100644 --- a/libs/autofile/autofile.go +++ b/libs/autofile/autofile.go @@ -5,7 +5,7 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) /* AutoFile usage diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go index 8f453dd07..b39fb7cf3 100644 --- a/libs/autofile/autofile_test.go +++ b/libs/autofile/autofile_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestSIGHUP(t *testing.T) { diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go index f2739a7e5..aeb810252 100644 --- a/libs/autofile/cmd/logjack.go +++ b/libs/autofile/cmd/logjack.go @@ -8,8 +8,8 @@ import ( "strconv" "strings" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" ) const Version = "0.0.1" diff --git a/libs/autofile/group.go b/libs/autofile/group.go index 1ae545032..b4368ed9e 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -15,7 +15,7 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index 2ffedcc27..72581f9e2 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // NOTE: Returned group has ticker stopped diff --git a/libs/bech32/bech32_test.go b/libs/bech32/bech32_test.go index 7cdebba2b..a2c6c83fb 100644 --- a/libs/bech32/bech32_test.go +++ b/libs/bech32/bech32_test.go @@ -5,7 +5,7 @@ import ( "crypto/sha256" "testing" - "github.com/tendermint/tmlibs/bech32" + "github.com/tendermint/tendermint/libs/bech32" ) func TestEncodeAndDecode(t *testing.T) { diff --git a/libs/cli/flags/log_level.go b/libs/cli/flags/log_level.go index ee4825cf7..156106a5a 100644 --- a/libs/cli/flags/log_level.go +++ b/libs/cli/flags/log_level.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/libs/cli/flags/log_level_test.go b/libs/cli/flags/log_level_test.go index faf9b19db..1503ec281 100644 --- a/libs/cli/flags/log_level_test.go +++ b/libs/cli/flags/log_level_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - tmflags "github.com/tendermint/tmlibs/cli/flags" - "github.com/tendermint/tmlibs/log" + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/libs/common/repeat_timer_test.go b/libs/common/repeat_timer_test.go index 160f4394a..b81720c85 100644 --- a/libs/common/repeat_timer_test.go +++ b/libs/common/repeat_timer_test.go @@ -50,7 +50,7 @@ func TestRepeatTimer(t *testing.T) { // TODO detect number of running // goroutines to ensure that // no other times will fire. - // See https://github.com/tendermint/tmlibs/issues/120. + // See https://github.com/tendermint/tendermint/libs/issues/120. time.Sleep(time.Millisecond * 100) done := true select { diff --git a/libs/common/service.go b/libs/common/service.go index 2f90fa4f9..b6f166e77 100644 --- a/libs/common/service.go +++ b/libs/common/service.go @@ -5,7 +5,7 @@ import ( "fmt" "sync/atomic" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) var ( diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go index d451b7c59..493ed83f9 100644 --- a/libs/db/backend_test.go +++ b/libs/db/backend_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func cleanupDBDir(dir, name string) { diff --git a/libs/db/c_level_db_test.go b/libs/db/c_level_db_test.go index 34bb72273..2d30500dd 100644 --- a/libs/db/c_level_db_test.go +++ b/libs/db/c_level_db_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func BenchmarkRandomReadsWrites2(b *testing.B) { diff --git a/libs/db/common_test.go b/libs/db/common_test.go index 6af6e15e6..027b8ee53 100644 --- a/libs/db/common_test.go +++ b/libs/db/common_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //---------------------------------------- diff --git a/libs/db/debug_db.go b/libs/db/debug_db.go index a3e785c24..4619a83dd 100644 --- a/libs/db/debug_db.go +++ b/libs/db/debug_db.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func _fmt(f string, az ...interface{}) string { diff --git a/libs/db/fsdb.go b/libs/db/fsdb.go index b5711ba38..fc861decc 100644 --- a/libs/db/fsdb.go +++ b/libs/db/fsdb.go @@ -10,7 +10,7 @@ import ( "sync" "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index eca8a07ff..349e447b2 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -10,7 +10,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func init() { diff --git a/libs/db/go_level_db_test.go b/libs/db/go_level_db_test.go index 266add8b5..47be216a6 100644 --- a/libs/db/go_level_db_test.go +++ b/libs/db/go_level_db_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func BenchmarkRandomReadsWrites(b *testing.B) { diff --git a/libs/db/mem_db.go b/libs/db/mem_db.go index 1521f87ac..580123017 100644 --- a/libs/db/mem_db.go +++ b/libs/db/mem_db.go @@ -114,7 +114,7 @@ func (db *MemDB) Close() { // database, we don't have a destination // to flush contents to nor do we want // any data loss on invoking Close() - // See the discussion in https://github.com/tendermint/tmlibs/pull/56 + // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 } // Implements DB. diff --git a/libs/db/remotedb/grpcdb/client.go b/libs/db/remotedb/grpcdb/client.go index 86aa12c7f..e11b7839b 100644 --- a/libs/db/remotedb/grpcdb/client.go +++ b/libs/db/remotedb/grpcdb/client.go @@ -4,7 +4,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" - protodb "github.com/tendermint/tmlibs/db/remotedb/proto" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" ) // Security defines how the client will talk to the gRPC server. diff --git a/libs/db/remotedb/grpcdb/example_test.go b/libs/db/remotedb/grpcdb/example_test.go index 827a1cf36..eba0d6914 100644 --- a/libs/db/remotedb/grpcdb/example_test.go +++ b/libs/db/remotedb/grpcdb/example_test.go @@ -5,8 +5,8 @@ import ( "context" "log" - grpcdb "github.com/tendermint/tmlibs/db/remotedb/grpcdb" - protodb "github.com/tendermint/tmlibs/db/remotedb/proto" + grpcdb "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" ) func Example() { diff --git a/libs/db/remotedb/grpcdb/server.go b/libs/db/remotedb/grpcdb/server.go index 8320c0517..3a9955ddf 100644 --- a/libs/db/remotedb/grpcdb/server.go +++ b/libs/db/remotedb/grpcdb/server.go @@ -9,8 +9,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" - "github.com/tendermint/tmlibs/db" - protodb "github.com/tendermint/tmlibs/db/remotedb/proto" + "github.com/tendermint/tendermint/libs/db" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" ) // ListenAndServe is a blocking function that sets up a gRPC based @@ -58,7 +58,7 @@ var _ protodb.DBServer = (*server)(nil) // * fsdb // * memdB // * leveldb -// See https://godoc.org/github.com/tendermint/tmlibs/db#DBBackendType +// See https://godoc.org/github.com/tendermint/tendermint/libs/db#DBBackendType func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { s.mu.Lock() defer s.mu.Unlock() diff --git a/libs/db/remotedb/remotedb.go b/libs/db/remotedb/remotedb.go index 5332bd68e..2b60d8159 100644 --- a/libs/db/remotedb/remotedb.go +++ b/libs/db/remotedb/remotedb.go @@ -4,9 +4,9 @@ import ( "context" "fmt" - "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/db/remotedb/grpcdb" - protodb "github.com/tendermint/tmlibs/db/remotedb/proto" + "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" + protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" ) type RemoteDB struct { diff --git a/libs/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go index 3cf698a65..bc980a238 100644 --- a/libs/db/remotedb/remotedb_test.go +++ b/libs/db/remotedb/remotedb_test.go @@ -7,8 +7,8 @@ import ( "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/db/remotedb" - "github.com/tendermint/tmlibs/db/remotedb/grpcdb" + "github.com/tendermint/tendermint/libs/db/remotedb" + "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" ) func TestRemoteDB(t *testing.T) { diff --git a/libs/events/events.go b/libs/events/events.go index 075f9b42b..9c7f0fd05 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -6,7 +6,7 @@ package events import ( "sync" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Generic event data can be typed and registered with tendermint/go-amino diff --git a/libs/log/filter_test.go b/libs/log/filter_test.go index 8d8b3b27c..f9957f043 100644 --- a/libs/log/filter_test.go +++ b/libs/log/filter_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestVariousLevels(t *testing.T) { diff --git a/libs/log/tm_logger_test.go b/libs/log/tm_logger_test.go index b2b600ad2..1f890cef1 100644 --- a/libs/log/tm_logger_test.go +++ b/libs/log/tm_logger_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/go-logfmt/logfmt" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestLoggerLogsItsErrors(t *testing.T) { diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go index a07b323c6..d6f039ce4 100644 --- a/libs/log/tmfmt_logger_test.go +++ b/libs/log/tmfmt_logger_test.go @@ -10,7 +10,7 @@ import ( kitlog "github.com/go-kit/kit/log" "github.com/stretchr/testify/assert" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestTMFmtLogger(t *testing.T) { diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go index 6b0838ca8..1abc6440f 100644 --- a/libs/log/tracing_logger_test.go +++ b/libs/log/tracing_logger_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/pkg/errors" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestTracingLogger(t *testing.T) { diff --git a/libs/merkle/simple_map.go b/libs/merkle/simple_map.go index bd5c88d85..65653e3c8 100644 --- a/libs/merkle/simple_map.go +++ b/libs/merkle/simple_map.go @@ -1,8 +1,8 @@ package merkle import ( - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/merkle/tmhash" ) type SimpleMap struct { diff --git a/libs/merkle/simple_tree.go b/libs/merkle/simple_tree.go index 6bd80f55f..c7bc17db7 100644 --- a/libs/merkle/simple_tree.go +++ b/libs/merkle/simple_tree.go @@ -25,7 +25,7 @@ For larger datasets, use IAVLTree. package merkle import ( - "github.com/tendermint/tmlibs/merkle/tmhash" + "github.com/tendermint/tendermint/libs/merkle/tmhash" ) func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { diff --git a/libs/merkle/simple_tree_test.go b/libs/merkle/simple_tree_test.go index 8c4ed01f8..f5c04af70 100644 --- a/libs/merkle/simple_tree_test.go +++ b/libs/merkle/simple_tree_test.go @@ -3,8 +3,8 @@ package merkle import ( "bytes" - cmn "github.com/tendermint/tmlibs/common" - . "github.com/tendermint/tmlibs/test" + cmn "github.com/tendermint/tendermint/libs/common" + . "github.com/tendermint/tendermint/libs/test" "testing" ) diff --git a/libs/merkle/tmhash/hash_test.go b/libs/merkle/tmhash/hash_test.go index c9e80f2bc..9744a8930 100644 --- a/libs/merkle/tmhash/hash_test.go +++ b/libs/merkle/tmhash/hash_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tmlibs/merkle/tmhash" + "github.com/tendermint/tendermint/libs/merkle/tmhash" ) func TestHash(t *testing.T) { diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index 260521cd9..4e4634de5 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 776e0653b..4280ca1ea 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -16,7 +16,7 @@ import ( "errors" "sync" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type operation int diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index fd6c11cf4..5e9931e40 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/libs/test/mutate.go b/libs/test/mutate.go index 76534e8b1..3bbbbd217 100644 --- a/libs/test/mutate.go +++ b/libs/test/mutate.go @@ -1,7 +1,7 @@ package test import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Contract: !bytes.Equal(input, output) && len(input) >= len(output) diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go index e0235ba29..2891e5809 100644 --- a/lite/files/commit_test.go +++ b/lite/files/commit_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" ) diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 2f068f160..0294ddf68 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -4,7 +4,7 @@ import ( "net/http" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 9c9557f8f..0ca5be174 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -3,7 +3,7 @@ package proxy import ( "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite/client" diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 5fb12a40a..f0eb6b41e 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -1,7 +1,7 @@ package proxy import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" certclient "github.com/tendermint/tendermint/lite/client" diff --git a/mempool/mempool.go b/mempool/mempool.go index 4ae179c97..06852c9af 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -11,10 +11,10 @@ import ( "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" - auto "github.com/tendermint/tmlibs/autofile" - "github.com/tendermint/tmlibs/clist" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + auto "github.com/tendermint/tendermint/libs/autofile" + "github.com/tendermint/tendermint/libs/clist" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/proxy" diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index fb664ddec..1a91de4f9 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/proxy" diff --git a/mempool/reactor.go b/mempool/reactor.go index 066118036..e63ff58e8 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -7,8 +7,8 @@ import ( amino "github.com/tendermint/go-amino" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tmlibs/clist" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index c6844dbb0..b4362032a 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -13,7 +13,7 @@ import ( "github.com/go-kit/kit/log/term" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/node/node.go b/node/node.go index 7e4a986f7..fc05fc32c 100644 --- a/node/node.go +++ b/node/node.go @@ -12,9 +12,9 @@ import ( amino "github.com/tendermint/go-amino" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" diff --git a/node/node_test.go b/node/node_test.go index cdabdbb3f..80f6f02c2 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index 83c8efa4b..da1296da0 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -2,7 +2,7 @@ package p2p import ( "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type Reactor interface { diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index b19a1ca19..9672e0117 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -12,9 +12,9 @@ import ( "time" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - flow "github.com/tendermint/tmlibs/flowrate" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + flow "github.com/tendermint/tendermint/libs/flowrate" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 8006b37a8..19e05fbc7 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) const maxPingPongPacketSize = 1024 // bytes diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index 43f84f0bf..a2cbe008d 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -21,7 +21,7 @@ import ( "golang.org/x/crypto/ripemd160" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // 4 + 1024 == 1028 total frame size diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 7f862fecd..7274dfaf7 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type kvstoreConn struct { diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go index fc2242366..d18a9f99d 100644 --- a/p2p/dummy/peer.go +++ b/p2p/dummy/peer.go @@ -5,7 +5,7 @@ import ( p2p "github.com/tendermint/tendermint/p2p" tmconn "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type peer struct { diff --git a/p2p/fuzz.go b/p2p/fuzz.go index 8d00ba40d..80e4fed6a 100644 --- a/p2p/fuzz.go +++ b/p2p/fuzz.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // FuzzedConnection wraps any net.Conn and depending on the mode either delays diff --git a/p2p/key.go b/p2p/key.go index 7e242bfc3..9548d34f0 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -7,7 +7,7 @@ import ( "io/ioutil" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // ID is a hex-encoded crypto.Address diff --git a/p2p/key_test.go b/p2p/key_test.go index c2e1f3e0e..51e1c0787 100644 --- a/p2p/key_test.go +++ b/p2p/key_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestLoadOrGenNodeKey(t *testing.T) { diff --git a/p2p/listener.go b/p2p/listener.go index bf50d2a35..cd548866b 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -8,8 +8,8 @@ import ( "time" "github.com/tendermint/tendermint/p2p/upnp" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) // Listener is a network listener for stream-oriented protocols, providing diff --git a/p2p/listener_test.go b/p2p/listener_test.go index 1aa0a93a8..3d8e40731 100644 --- a/p2p/listener_test.go +++ b/p2p/listener_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestListener(t *testing.T) { diff --git a/p2p/netaddress.go b/p2p/netaddress.go index 3e0d99d69..ebac8cc82 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -13,7 +13,7 @@ import ( "strings" "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // NetAddress defines information about a peer on the network diff --git a/p2p/node_info.go b/p2p/node_info.go index 60383bc5e..5e8160a3b 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -2,7 +2,7 @@ package p2p import ( "fmt" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "strings" ) diff --git a/p2p/peer.go b/p2p/peer.go index cf96354e6..5c615275b 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -7,8 +7,8 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" tmconn "github.com/tendermint/tendermint/p2p/conn" diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 32d312437..aa63ef949 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Returns an empty kvstore peer diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 73c0db825..281b218d9 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" tmconn "github.com/tendermint/tendermint/p2p/conn" diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 592269957..421aa135a 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -14,7 +14,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index 2e2604286..dd983f76f 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) func createTempFileName(prefix string) string { diff --git a/p2p/pex/file.go b/p2p/pex/file.go index 38142dd9d..3237e1253 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -4,7 +4,7 @@ import ( "encoding/json" "os" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) /* Loading & Saving */ diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 48b6d43e7..2d93783df 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -8,7 +8,7 @@ import ( "time" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/conn" diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index e8231c180..cdef5440a 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -13,8 +13,8 @@ import ( "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" diff --git a/p2p/switch.go b/p2p/switch.go index bf5f9747f..d1e2ef23b 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/p2p/switch_test.go b/p2p/switch_test.go index afccfd585..97539112e 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p/conn" diff --git a/p2p/test_util.go b/p2p/test_util.go index b0b801487..467532f0f 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -5,8 +5,8 @@ import ( "net" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p/conn" diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go index 47c0ca74d..c0175a93f 100644 --- a/p2p/trust/metric.go +++ b/p2p/trust/metric.go @@ -8,7 +8,7 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //--------------------------------------------------------------------------------------- diff --git a/p2p/trust/store.go b/p2p/trust/store.go index bbb4592a4..31f659a43 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -8,8 +8,8 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" ) const defaultStorePeriodicSaveInterval = 1 * time.Minute diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index 4e5553961..e1bea8636 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) func TestTrustMetricStoreSaveLoad(t *testing.T) { diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go index 55479415f..2de5e7905 100644 --- a/p2p/upnp/probe.go +++ b/p2p/upnp/probe.go @@ -5,8 +5,8 @@ import ( "net" "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) type UPNPCapabilities struct { diff --git a/privval/priv_validator.go b/privval/priv_validator.go index 8a54b5ccf..2abcf5590 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // TODO: type ? diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index 345b51438..5889c0d68 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestGenLoadValidator(t *testing.T) { diff --git a/privval/socket.go b/privval/socket.go index d0be3ba4e..1e8a3807b 100644 --- a/privval/socket.go +++ b/privval/socket.go @@ -9,8 +9,8 @@ import ( "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" p2pconn "github.com/tendermint/tendermint/p2p/conn" "github.com/tendermint/tendermint/types" diff --git a/privval/socket_test.go b/privval/socket_test.go index 1813893af..7bcacd6e1 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" p2pconn "github.com/tendermint/tendermint/p2p/conn" "github.com/tendermint/tendermint/types" diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index a50071fea..3c556d4f0 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -8,8 +8,8 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) //---------------------------------------- diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index 5d89ef195..279fa42ee 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -3,7 +3,7 @@ package proxy import ( "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------- diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 844d2b88a..79c452fc9 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -10,7 +10,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var waitForEventTimeout = 5 * time.Second diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 79967bd0c..4b85bf01d 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -11,7 +11,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) /* diff --git a/rpc/client/interface.go b/rpc/client/interface.go index afe2d8fa0..f939c855b 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -23,7 +23,7 @@ implementation. import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIClient groups together the functionality that principally diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index d89ec3b22..df3daf907 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) /* diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 244855c6b..c8ca060c6 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -6,7 +6,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIApp will send all abci related request to the named app, diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 323a42a47..bcf443cf0 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestABCIMock(t *testing.T) { diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 6af9abb27..955df6277 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -20,7 +20,7 @@ import ( "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Client wraps arbitrary implementations of the various interfaces. diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index dafd35080..8e3c15061 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestStatus(t *testing.T) { diff --git a/rpc/core/abci.go b/rpc/core/abci.go index c07724d58..a5eede3fc 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -4,7 +4,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Query the application for some information. diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index a5ad5b4cb..0e8873152 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -6,7 +6,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Get block headers for minHeight <= height <= maxHeight. diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 437f5965a..ecc41ce12 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -10,7 +10,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index bf32c9c66..128b3e9a7 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -10,8 +10,8 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) const ( diff --git a/rpc/core/status.go b/rpc/core/status.go index 2c54d0a94..63e62b2c7 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -7,7 +7,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Get Tendermint status including node info, pubkey, latest block diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 2fa7825fd..f53d82f14 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -3,7 +3,7 @@ package core import ( "fmt" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ctypes "github.com/tendermint/tendermint/rpc/core/types" diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 27302be13..4fec416ed 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -7,7 +7,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/state" diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index c06f1cf46..c88989685 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -9,7 +9,7 @@ import ( "golang.org/x/net/netutil" "google.golang.org/grpc" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Config is an gRPC server configuration. diff --git a/rpc/lib/client/integration_test.go b/rpc/lib/client/integration_test.go index d3d993374..93a32388f 100644 --- a/rpc/lib/client/integration_test.go +++ b/rpc/lib/client/integration_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestWSClientReconnectWithJitter(t *testing.T) { diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 6928dff36..9a07c8676 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/go-amino" types "github.com/tendermint/tendermint/rpc/lib/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index 73f671609..e902fe21a 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -12,7 +12,7 @@ import ( "github.com/gorilla/websocket" "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" types "github.com/tendermint/tendermint/rpc/lib/types" ) diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index fe765473d..31839dcab 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -18,8 +18,8 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" client "github.com/tendermint/tendermint/rpc/lib/client" server "github.com/tendermint/tendermint/rpc/lib/server" diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index dcacfb663..1bfe52536 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -19,8 +19,8 @@ import ( amino "github.com/tendermint/go-amino" types "github.com/tendermint/tendermint/rpc/lib/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) // RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions. diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go index af5665138..3471eb791 100644 --- a/rpc/lib/server/handlers_test.go +++ b/rpc/lib/server/handlers_test.go @@ -16,7 +16,7 @@ import ( amino "github.com/tendermint/go-amino" rs "github.com/tendermint/tendermint/rpc/lib/server" types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) ////////////////////////////////////////////////////////////////////////////// diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 6223d205c..5d816ef22 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -15,7 +15,7 @@ import ( "golang.org/x/net/netutil" types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) // Config is an RPC server configuration. diff --git a/rpc/lib/server/http_server_test.go b/rpc/lib/server/http_server_test.go index 22fd8a23b..3cbe0d906 100644 --- a/rpc/lib/server/http_server_test.go +++ b/rpc/lib/server/http_server_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" ) func TestMaxOpenConnections(t *testing.T) { diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go index d4a59c622..f71316906 100644 --- a/rpc/lib/server/parse_test.go +++ b/rpc/lib/server/parse_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestParseJSONMap(t *testing.T) { diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go index 4dd95ce05..cb9560e12 100644 --- a/rpc/lib/test/main.go +++ b/rpc/lib/test/main.go @@ -7,8 +7,8 @@ import ( amino "github.com/tendermint/go-amino" rpcserver "github.com/tendermint/tendermint/rpc/lib/server" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) var routes = map[string]*rpcserver.RPCFunc{ diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 1d6f865c2..915911818 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -8,10 +8,10 @@ import ( "strings" "time" - "github.com/tendermint/tmlibs/log" + "github.com/tendermint/tendermint/libs/log" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" cfg "github.com/tendermint/tendermint/config" nm "github.com/tendermint/tendermint/node" diff --git a/scripts/wire2amino.go b/scripts/wire2amino.go index 72f472dac..867c5735a 100644 --- a/scripts/wire2amino.go +++ b/scripts/wire2amino.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/go-amino" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" diff --git a/state/errors.go b/state/errors.go index afb5737d7..d40c7e141 100644 --- a/state/errors.go +++ b/state/errors.go @@ -1,7 +1,7 @@ package state import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type ( diff --git a/state/execution.go b/state/execution.go index 0d6ee81bf..1c0af17a8 100644 --- a/state/execution.go +++ b/state/execution.go @@ -7,8 +7,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) //----------------------------------------------------------------------------- diff --git a/state/execution_test.go b/state/execution_test.go index 71fbe3a4d..9c0635dcc 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -11,9 +11,9 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" diff --git a/state/state_test.go b/state/state_test.go index 30a87fb05..bf0c910fa 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" diff --git a/state/store.go b/state/store.go index 798932541..8db0689de 100644 --- a/state/store.go +++ b/state/store.go @@ -5,8 +5,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" ) //------------------------------------------------------------------------ diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index 264be1fd8..088252f5e 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -3,7 +3,7 @@ package txindex import ( "context" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" ) diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 718a55d15..707325929 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -10,8 +10,8 @@ import ( "time" "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/state/txindex" diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index cb718a5fa..1272f4a73 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - db "github.com/tendermint/tmlibs/db" + cmn "github.com/tendermint/tendermint/libs/common" + db "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/state/txindex" diff --git a/state/validation.go b/state/validation.go index 84a4cc824..c36339203 100644 --- a/state/validation.go +++ b/state/validation.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tendermint/libs/db" ) //----------------------------------------------------- diff --git a/state/validation_test.go b/state/validation_test.go index b4695b077..362a40737 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" ) func TestValidateBlock(t *testing.T) { diff --git a/types/block.go b/types/block.go index 43856f1f4..c3a399f67 100644 --- a/types/block.go +++ b/types/block.go @@ -8,8 +8,8 @@ import ( "sync" "time" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/merkle" "golang.org/x/crypto/ripemd160" ) diff --git a/types/block_test.go b/types/block_test.go index 1132a6f5f..0948e7b21 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestValidateBlock(t *testing.T) { diff --git a/types/canonical_json.go b/types/canonical_json.go index 258f7714b..189a8a7a2 100644 --- a/types/canonical_json.go +++ b/types/canonical_json.go @@ -3,7 +3,7 @@ package types import ( "time" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Canonical json is amino's json for structs with fields in alphabetical order diff --git a/types/event_bus.go b/types/event_bus.go index cb4b17d51..54fc60c7b 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -5,8 +5,8 @@ import ( "fmt" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) const defaultCapacity = 0 diff --git a/types/event_bus_test.go b/types/event_bus_test.go index a5de2e84d..81903004d 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -13,7 +13,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestEventBusPublishEventTx(t *testing.T) { diff --git a/types/evidence.go b/types/evidence.go index 4d8b59272..a83e2cd00 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -6,7 +6,7 @@ import ( "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tmlibs/merkle" + "github.com/tendermint/tendermint/libs/merkle" ) // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. diff --git a/types/genesis.go b/types/genesis.go index 099bb499c..0367c6b2f 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //------------------------------------------------------------ diff --git a/types/heartbeat.go b/types/heartbeat.go index fcf545f2c..cebe2864c 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Heartbeat is a simple vote-like structure so validators can diff --git a/types/params.go b/types/params.go index 0654d07b9..e2117ed4c 100644 --- a/types/params.go +++ b/types/params.go @@ -2,8 +2,8 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/merkle" ) const ( diff --git a/types/part_set.go b/types/part_set.go index 18cfe802c..7116176d3 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -9,8 +9,8 @@ import ( "golang.org/x/crypto/ripemd160" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/merkle" ) var ( diff --git a/types/part_set_test.go b/types/part_set_test.go index 545b4d42b..01437f05e 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "testing" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) const ( diff --git a/types/results.go b/types/results.go index 9f4f33c36..d9381420a 100644 --- a/types/results.go +++ b/types/results.go @@ -2,8 +2,8 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/merkle" ) //----------------------------------------------------------------------------- diff --git a/types/tx.go b/types/tx.go index cad7dda3a..d02dc7a17 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,8 +6,8 @@ import ( "fmt" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/merkle" ) // Tx is an arbitrary byte array. diff --git a/types/tx_test.go b/types/tx_test.go index 2a93ceb31..67df5c5f3 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" - ctest "github.com/tendermint/tmlibs/test" + cmn "github.com/tendermint/tendermint/libs/common" + ctest "github.com/tendermint/tendermint/libs/test" ) func makeTxs(cnt, size int) Txs { diff --git a/types/validator.go b/types/validator.go index bea975a4f..e43acf09d 100644 --- a/types/validator.go +++ b/types/validator.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // Volatile state for each Validator diff --git a/types/validator_set.go b/types/validator_set.go index f2fac2929..6c39f5be2 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -7,8 +7,8 @@ import ( "sort" "strings" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/merkle" ) // ValidatorSet represent a set of *Validator at a given height. diff --git a/types/validator_set_test.go b/types/validator_set_test.go index c78a36063..61f4dada9 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) func TestCopy(t *testing.T) { diff --git a/types/vote.go b/types/vote.go index 1e7b263b6..ed4ebd73e 100644 --- a/types/vote.go +++ b/types/vote.go @@ -7,7 +7,7 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var ( diff --git a/types/vote_set.go b/types/vote_set.go index a60d95daf..c51681053 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -8,7 +8,7 @@ import ( "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) // UNSTABLE diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 9efef41ba..32ceb7b16 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -6,8 +6,8 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tmlibs/common" - tst "github.com/tendermint/tmlibs/test" + cmn "github.com/tendermint/tendermint/libs/common" + tst "github.com/tendermint/tendermint/libs/test" ) // NOTE: privValidators are in order From 965a55d2a83f31895a2f67a290bb14356912cdbd Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:39:41 -0400 Subject: [PATCH 504/515] remove tmlibs from dep --- Gopkg.lock | 25 +++++-------------------- Gopkg.toml | 4 ---- 2 files changed, 5 insertions(+), 24 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index e2fadfadc..be930bdf9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -16,7 +16,10 @@ [[projects]] branch = "master" name = "github.com/btcsuite/btcutil" - packages = ["base58"] + packages = [ + "base58", + "bech32" + ] revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" [[projects]] @@ -302,24 +305,6 @@ revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" version = "0.10.1" -[[projects]] - name = "github.com/tendermint/tmlibs" - packages = [ - "autofile", - "cli", - "cli/flags", - "clist", - "common", - "db", - "flowrate", - "log", - "merkle", - "merkle/tmhash", - "test" - ] - revision = "49596e0a1f48866603813df843c9409fc19805c6" - version = "v0.9.0" - [[projects]] branch = "master" name = "golang.org/x/crypto" @@ -424,6 +409,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "c25289282b94abc7f0c390e592e5e1636b7f26cb4773863ac39cde7fdc7b5bdf" + inputs-digest = "520cd9a034e6be26dc6e0864dae3fb294b81850d8de41ca6c2c8cd6295425f28" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 18e2767a9..a3715e120 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -73,10 +73,6 @@ name = "github.com/tendermint/go-amino" version = "~0.10.1" -[[override]] - name = "github.com/tendermint/tmlibs" - version = "~0.9.0" - [[constraint]] name = "google.golang.org/grpc" version = "~1.7.3" From ff73e0aa55a8214131eeda95898b82f38d14e588 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:50:31 -0400 Subject: [PATCH 505/515] fix test folder mishap --- {libs/test => test}/README.md | 0 {libs/test => test}/app/clean.sh | 0 {libs/test => test}/app/counter_test.sh | 0 {libs/test => test}/app/grpc_client.go | 0 {libs/test => test}/app/kvstore_test.sh | 0 {libs/test => test}/app/test.sh | 0 {libs/test => test}/docker/Dockerfile | 0 {libs/test => test}/docker/build.sh | 0 {libs/test => test}/p2p/README.md | 0 {libs/test => test}/p2p/atomic_broadcast/test.sh | 0 {libs/test => test}/p2p/basic/test.sh | 0 {libs/test => test}/p2p/circleci.sh | 0 {libs/test => test}/p2p/client.sh | 0 {libs/test => test}/p2p/data/mach1/core/config/genesis.json | 0 {libs/test => test}/p2p/data/mach1/core/config/node_key.json | 0 .../test => test}/p2p/data/mach1/core/config/priv_validator.json | 0 {libs/test => test}/p2p/data/mach2/core/config/genesis.json | 0 {libs/test => test}/p2p/data/mach2/core/config/node_key.json | 0 .../test => test}/p2p/data/mach2/core/config/priv_validator.json | 0 {libs/test => test}/p2p/data/mach3/core/config/genesis.json | 0 {libs/test => test}/p2p/data/mach3/core/config/node_key.json | 0 .../test => test}/p2p/data/mach3/core/config/priv_validator.json | 0 {libs/test => test}/p2p/data/mach4/core/config/genesis.json | 0 {libs/test => test}/p2p/data/mach4/core/config/node_key.json | 0 .../test => test}/p2p/data/mach4/core/config/priv_validator.json | 0 {libs/test => test}/p2p/fast_sync/check_peer.sh | 0 {libs/test => test}/p2p/fast_sync/test.sh | 0 {libs/test => test}/p2p/fast_sync/test_peer.sh | 0 {libs/test => test}/p2p/ip.sh | 0 {libs/test => test}/p2p/ip_plus_id.sh | 0 {libs/test => test}/p2p/kill_all/check_peers.sh | 0 {libs/test => test}/p2p/kill_all/test.sh | 0 {libs/test => test}/p2p/local_testnet_start.sh | 0 {libs/test => test}/p2p/local_testnet_stop.sh | 0 {libs/test => test}/p2p/peer.sh | 0 {libs/test => test}/p2p/persistent_peers.sh | 0 {libs/test => test}/p2p/pex/check_peer.sh | 0 {libs/test => test}/p2p/pex/dial_peers.sh | 0 {libs/test => test}/p2p/pex/test.sh | 0 {libs/test => test}/p2p/pex/test_addrbook.sh | 0 {libs/test => test}/p2p/pex/test_dial_peers.sh | 0 {libs/test => test}/p2p/test.sh | 0 {libs/test => test}/persist/test_failure_indices.sh | 0 {libs/test => test}/persist/test_simple.sh | 0 {libs/test => test}/persist/txs.sh | 0 {libs/test => test}/test_cover.sh | 0 46 files changed, 0 insertions(+), 0 deletions(-) rename {libs/test => test}/README.md (100%) rename {libs/test => test}/app/clean.sh (100%) rename {libs/test => test}/app/counter_test.sh (100%) rename {libs/test => test}/app/grpc_client.go (100%) rename {libs/test => test}/app/kvstore_test.sh (100%) rename {libs/test => test}/app/test.sh (100%) rename {libs/test => test}/docker/Dockerfile (100%) rename {libs/test => test}/docker/build.sh (100%) rename {libs/test => test}/p2p/README.md (100%) rename {libs/test => test}/p2p/atomic_broadcast/test.sh (100%) rename {libs/test => test}/p2p/basic/test.sh (100%) rename {libs/test => test}/p2p/circleci.sh (100%) rename {libs/test => test}/p2p/client.sh (100%) rename {libs/test => test}/p2p/data/mach1/core/config/genesis.json (100%) rename {libs/test => test}/p2p/data/mach1/core/config/node_key.json (100%) rename {libs/test => test}/p2p/data/mach1/core/config/priv_validator.json (100%) rename {libs/test => test}/p2p/data/mach2/core/config/genesis.json (100%) rename {libs/test => test}/p2p/data/mach2/core/config/node_key.json (100%) rename {libs/test => test}/p2p/data/mach2/core/config/priv_validator.json (100%) rename {libs/test => test}/p2p/data/mach3/core/config/genesis.json (100%) rename {libs/test => test}/p2p/data/mach3/core/config/node_key.json (100%) rename {libs/test => test}/p2p/data/mach3/core/config/priv_validator.json (100%) rename {libs/test => test}/p2p/data/mach4/core/config/genesis.json (100%) rename {libs/test => test}/p2p/data/mach4/core/config/node_key.json (100%) rename {libs/test => test}/p2p/data/mach4/core/config/priv_validator.json (100%) rename {libs/test => test}/p2p/fast_sync/check_peer.sh (100%) rename {libs/test => test}/p2p/fast_sync/test.sh (100%) rename {libs/test => test}/p2p/fast_sync/test_peer.sh (100%) rename {libs/test => test}/p2p/ip.sh (100%) rename {libs/test => test}/p2p/ip_plus_id.sh (100%) rename {libs/test => test}/p2p/kill_all/check_peers.sh (100%) rename {libs/test => test}/p2p/kill_all/test.sh (100%) rename {libs/test => test}/p2p/local_testnet_start.sh (100%) rename {libs/test => test}/p2p/local_testnet_stop.sh (100%) rename {libs/test => test}/p2p/peer.sh (100%) rename {libs/test => test}/p2p/persistent_peers.sh (100%) rename {libs/test => test}/p2p/pex/check_peer.sh (100%) rename {libs/test => test}/p2p/pex/dial_peers.sh (100%) rename {libs/test => test}/p2p/pex/test.sh (100%) rename {libs/test => test}/p2p/pex/test_addrbook.sh (100%) rename {libs/test => test}/p2p/pex/test_dial_peers.sh (100%) rename {libs/test => test}/p2p/test.sh (100%) rename {libs/test => test}/persist/test_failure_indices.sh (100%) rename {libs/test => test}/persist/test_simple.sh (100%) rename {libs/test => test}/persist/txs.sh (100%) rename {libs/test => test}/test_cover.sh (100%) diff --git a/libs/test/README.md b/test/README.md similarity index 100% rename from libs/test/README.md rename to test/README.md diff --git a/libs/test/app/clean.sh b/test/app/clean.sh similarity index 100% rename from libs/test/app/clean.sh rename to test/app/clean.sh diff --git a/libs/test/app/counter_test.sh b/test/app/counter_test.sh similarity index 100% rename from libs/test/app/counter_test.sh rename to test/app/counter_test.sh diff --git a/libs/test/app/grpc_client.go b/test/app/grpc_client.go similarity index 100% rename from libs/test/app/grpc_client.go rename to test/app/grpc_client.go diff --git a/libs/test/app/kvstore_test.sh b/test/app/kvstore_test.sh similarity index 100% rename from libs/test/app/kvstore_test.sh rename to test/app/kvstore_test.sh diff --git a/libs/test/app/test.sh b/test/app/test.sh similarity index 100% rename from libs/test/app/test.sh rename to test/app/test.sh diff --git a/libs/test/docker/Dockerfile b/test/docker/Dockerfile similarity index 100% rename from libs/test/docker/Dockerfile rename to test/docker/Dockerfile diff --git a/libs/test/docker/build.sh b/test/docker/build.sh similarity index 100% rename from libs/test/docker/build.sh rename to test/docker/build.sh diff --git a/libs/test/p2p/README.md b/test/p2p/README.md similarity index 100% rename from libs/test/p2p/README.md rename to test/p2p/README.md diff --git a/libs/test/p2p/atomic_broadcast/test.sh b/test/p2p/atomic_broadcast/test.sh similarity index 100% rename from libs/test/p2p/atomic_broadcast/test.sh rename to test/p2p/atomic_broadcast/test.sh diff --git a/libs/test/p2p/basic/test.sh b/test/p2p/basic/test.sh similarity index 100% rename from libs/test/p2p/basic/test.sh rename to test/p2p/basic/test.sh diff --git a/libs/test/p2p/circleci.sh b/test/p2p/circleci.sh similarity index 100% rename from libs/test/p2p/circleci.sh rename to test/p2p/circleci.sh diff --git a/libs/test/p2p/client.sh b/test/p2p/client.sh similarity index 100% rename from libs/test/p2p/client.sh rename to test/p2p/client.sh diff --git a/libs/test/p2p/data/mach1/core/config/genesis.json b/test/p2p/data/mach1/core/config/genesis.json similarity index 100% rename from libs/test/p2p/data/mach1/core/config/genesis.json rename to test/p2p/data/mach1/core/config/genesis.json diff --git a/libs/test/p2p/data/mach1/core/config/node_key.json b/test/p2p/data/mach1/core/config/node_key.json similarity index 100% rename from libs/test/p2p/data/mach1/core/config/node_key.json rename to test/p2p/data/mach1/core/config/node_key.json diff --git a/libs/test/p2p/data/mach1/core/config/priv_validator.json b/test/p2p/data/mach1/core/config/priv_validator.json similarity index 100% rename from libs/test/p2p/data/mach1/core/config/priv_validator.json rename to test/p2p/data/mach1/core/config/priv_validator.json diff --git a/libs/test/p2p/data/mach2/core/config/genesis.json b/test/p2p/data/mach2/core/config/genesis.json similarity index 100% rename from libs/test/p2p/data/mach2/core/config/genesis.json rename to test/p2p/data/mach2/core/config/genesis.json diff --git a/libs/test/p2p/data/mach2/core/config/node_key.json b/test/p2p/data/mach2/core/config/node_key.json similarity index 100% rename from libs/test/p2p/data/mach2/core/config/node_key.json rename to test/p2p/data/mach2/core/config/node_key.json diff --git a/libs/test/p2p/data/mach2/core/config/priv_validator.json b/test/p2p/data/mach2/core/config/priv_validator.json similarity index 100% rename from libs/test/p2p/data/mach2/core/config/priv_validator.json rename to test/p2p/data/mach2/core/config/priv_validator.json diff --git a/libs/test/p2p/data/mach3/core/config/genesis.json b/test/p2p/data/mach3/core/config/genesis.json similarity index 100% rename from libs/test/p2p/data/mach3/core/config/genesis.json rename to test/p2p/data/mach3/core/config/genesis.json diff --git a/libs/test/p2p/data/mach3/core/config/node_key.json b/test/p2p/data/mach3/core/config/node_key.json similarity index 100% rename from libs/test/p2p/data/mach3/core/config/node_key.json rename to test/p2p/data/mach3/core/config/node_key.json diff --git a/libs/test/p2p/data/mach3/core/config/priv_validator.json b/test/p2p/data/mach3/core/config/priv_validator.json similarity index 100% rename from libs/test/p2p/data/mach3/core/config/priv_validator.json rename to test/p2p/data/mach3/core/config/priv_validator.json diff --git a/libs/test/p2p/data/mach4/core/config/genesis.json b/test/p2p/data/mach4/core/config/genesis.json similarity index 100% rename from libs/test/p2p/data/mach4/core/config/genesis.json rename to test/p2p/data/mach4/core/config/genesis.json diff --git a/libs/test/p2p/data/mach4/core/config/node_key.json b/test/p2p/data/mach4/core/config/node_key.json similarity index 100% rename from libs/test/p2p/data/mach4/core/config/node_key.json rename to test/p2p/data/mach4/core/config/node_key.json diff --git a/libs/test/p2p/data/mach4/core/config/priv_validator.json b/test/p2p/data/mach4/core/config/priv_validator.json similarity index 100% rename from libs/test/p2p/data/mach4/core/config/priv_validator.json rename to test/p2p/data/mach4/core/config/priv_validator.json diff --git a/libs/test/p2p/fast_sync/check_peer.sh b/test/p2p/fast_sync/check_peer.sh similarity index 100% rename from libs/test/p2p/fast_sync/check_peer.sh rename to test/p2p/fast_sync/check_peer.sh diff --git a/libs/test/p2p/fast_sync/test.sh b/test/p2p/fast_sync/test.sh similarity index 100% rename from libs/test/p2p/fast_sync/test.sh rename to test/p2p/fast_sync/test.sh diff --git a/libs/test/p2p/fast_sync/test_peer.sh b/test/p2p/fast_sync/test_peer.sh similarity index 100% rename from libs/test/p2p/fast_sync/test_peer.sh rename to test/p2p/fast_sync/test_peer.sh diff --git a/libs/test/p2p/ip.sh b/test/p2p/ip.sh similarity index 100% rename from libs/test/p2p/ip.sh rename to test/p2p/ip.sh diff --git a/libs/test/p2p/ip_plus_id.sh b/test/p2p/ip_plus_id.sh similarity index 100% rename from libs/test/p2p/ip_plus_id.sh rename to test/p2p/ip_plus_id.sh diff --git a/libs/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh similarity index 100% rename from libs/test/p2p/kill_all/check_peers.sh rename to test/p2p/kill_all/check_peers.sh diff --git a/libs/test/p2p/kill_all/test.sh b/test/p2p/kill_all/test.sh similarity index 100% rename from libs/test/p2p/kill_all/test.sh rename to test/p2p/kill_all/test.sh diff --git a/libs/test/p2p/local_testnet_start.sh b/test/p2p/local_testnet_start.sh similarity index 100% rename from libs/test/p2p/local_testnet_start.sh rename to test/p2p/local_testnet_start.sh diff --git a/libs/test/p2p/local_testnet_stop.sh b/test/p2p/local_testnet_stop.sh similarity index 100% rename from libs/test/p2p/local_testnet_stop.sh rename to test/p2p/local_testnet_stop.sh diff --git a/libs/test/p2p/peer.sh b/test/p2p/peer.sh similarity index 100% rename from libs/test/p2p/peer.sh rename to test/p2p/peer.sh diff --git a/libs/test/p2p/persistent_peers.sh b/test/p2p/persistent_peers.sh similarity index 100% rename from libs/test/p2p/persistent_peers.sh rename to test/p2p/persistent_peers.sh diff --git a/libs/test/p2p/pex/check_peer.sh b/test/p2p/pex/check_peer.sh similarity index 100% rename from libs/test/p2p/pex/check_peer.sh rename to test/p2p/pex/check_peer.sh diff --git a/libs/test/p2p/pex/dial_peers.sh b/test/p2p/pex/dial_peers.sh similarity index 100% rename from libs/test/p2p/pex/dial_peers.sh rename to test/p2p/pex/dial_peers.sh diff --git a/libs/test/p2p/pex/test.sh b/test/p2p/pex/test.sh similarity index 100% rename from libs/test/p2p/pex/test.sh rename to test/p2p/pex/test.sh diff --git a/libs/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh similarity index 100% rename from libs/test/p2p/pex/test_addrbook.sh rename to test/p2p/pex/test_addrbook.sh diff --git a/libs/test/p2p/pex/test_dial_peers.sh b/test/p2p/pex/test_dial_peers.sh similarity index 100% rename from libs/test/p2p/pex/test_dial_peers.sh rename to test/p2p/pex/test_dial_peers.sh diff --git a/libs/test/p2p/test.sh b/test/p2p/test.sh similarity index 100% rename from libs/test/p2p/test.sh rename to test/p2p/test.sh diff --git a/libs/test/persist/test_failure_indices.sh b/test/persist/test_failure_indices.sh similarity index 100% rename from libs/test/persist/test_failure_indices.sh rename to test/persist/test_failure_indices.sh diff --git a/libs/test/persist/test_simple.sh b/test/persist/test_simple.sh similarity index 100% rename from libs/test/persist/test_simple.sh rename to test/persist/test_simple.sh diff --git a/libs/test/persist/txs.sh b/test/persist/txs.sh similarity index 100% rename from libs/test/persist/txs.sh rename to test/persist/txs.sh diff --git a/libs/test/test_cover.sh b/test/test_cover.sh similarity index 100% rename from libs/test/test_cover.sh rename to test/test_cover.sh From 3f4847331f48641bd64c74064d91789225cd0ff6 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 23:14:04 -0400 Subject: [PATCH 506/515] update grpc version. fix remotedb tests --- Gopkg.lock | 21 +++++++++++++-------- Gopkg.toml | 14 +++++++------- libs/db/remotedb/remotedb_test.go | 4 ++-- libs/db/remotedb/test.crt | 25 +++++++++++++++++++++++++ libs/db/remotedb/test.key | 27 +++++++++++++++++++++++++++ 5 files changed, 74 insertions(+), 17 deletions(-) create mode 100644 libs/db/remotedb/test.crt create mode 100644 libs/db/remotedb/test.key diff --git a/Gopkg.lock b/Gopkg.lock index be930bdf9..6e1c41493 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -116,7 +116,6 @@ ".", "hcl/ast", "hcl/parser", - "hcl/printer", "hcl/scanner", "hcl/strconv", "hcl/token", @@ -240,8 +239,8 @@ [[projects]] name = "github.com/spf13/cobra" packages = ["."] - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" + revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" + version = "v0.0.1" [[projects]] branch = "master" @@ -258,8 +257,8 @@ [[projects]] name = "github.com/spf13/viper" packages = ["."] - revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" - version = "v1.0.2" + revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" + version = "v1.0.0" [[projects]] name = "github.com/stretchr/testify" @@ -381,9 +380,13 @@ packages = [ ".", "balancer", + "balancer/base", + "balancer/roundrobin", "codes", "connectivity", "credentials", + "encoding", + "encoding/proto", "grpclb/grpc_lb_v1/messages", "grpclog", "internal", @@ -392,13 +395,15 @@ "naming", "peer", "resolver", + "resolver/dns", + "resolver/passthrough", "stats", "status", "tap", "transport" ] - revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - version = "v1.7.5" + revision = "d11072e7ca9811b1100b80ca0269ac831f06d024" + version = "v1.11.3" [[projects]] name = "gopkg.in/yaml.v2" @@ -409,6 +414,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "520cd9a034e6be26dc6e0864dae3fb294b81850d8de41ca6c2c8cd6295425f28" + inputs-digest = "71753a9d4ece4252d23941f116f5ff66c0d5da730a099e5a9867491d223ed93b" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index a3715e120..28394b8b3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -35,15 +35,15 @@ [[constraint]] name = "github.com/go-kit/kit" - version = "~0.6.0" + version = "=0.6.0" [[constraint]] name = "github.com/gogo/protobuf" - version = "~1.0.0" + version = "=1.0.0" [[constraint]] name = "github.com/golang/protobuf" - version = "~1.0.0" + version = "=1.0.0" [[constraint]] name = "github.com/gorilla/websocket" @@ -51,7 +51,7 @@ [[constraint]] name = "github.com/pkg/errors" - version = "~0.8.0" + version = "=0.8.0" [[constraint]] name = "github.com/rcrowley/go-metrics" @@ -59,11 +59,11 @@ [[constraint]] name = "github.com/spf13/cobra" - version = "~0.0.1" + version = "=0.0.1" [[constraint]] name = "github.com/spf13/viper" - version = "~1.0.0" + version = "=1.0.0" [[constraint]] name = "github.com/stretchr/testify" @@ -75,7 +75,7 @@ [[constraint]] name = "google.golang.org/grpc" - version = "~1.7.3" + version = "~1.11.3" # this got updated and broke, so locked to an old working commit ... [[override]] diff --git a/libs/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go index bc980a238..0e7319971 100644 --- a/libs/db/remotedb/remotedb_test.go +++ b/libs/db/remotedb/remotedb_test.go @@ -12,8 +12,8 @@ import ( ) func TestRemoteDB(t *testing.T) { - cert := "::.crt" - key := "::.key" + cert := "test.crt" + key := "test.key" ln, err := net.Listen("tcp", "0.0.0.0:0") require.Nil(t, err, "expecting a port to have been assigned on which we can listen") srv, err := grpcdb.NewServer(cert, key) diff --git a/libs/db/remotedb/test.crt b/libs/db/remotedb/test.crt new file mode 100644 index 000000000..bdc8a0f29 --- /dev/null +++ b/libs/db/remotedb/test.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEQTCCAimgAwIBAgIRANqF1HD19i/uvQ3n62TAKTwwDQYJKoZIhvcNAQELBQAw +GTEXMBUGA1UEAxMOdGVuZGVybWludC5jb20wHhcNMTgwNzAyMDMwNzMyWhcNMjAw +MTAyMDMwNzMwWjANMQswCQYDVQQDEwI6OjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAOuWUMCSzYJmvKU1vsouDTe7OxnPWO3oV0FjSH8vKYoi2zpZQX35 +dQDPtLDF2/v/ANZJ5pzMJR8yMMtEQ4tWxKuGzJw1ZgTgHtASPbj/M5fDnDO7Hqg4 +D09eLTkZAUfiBf6BzDyQIHn22CUexhaS70TbIT9AOAoOsGXMZz9d+iImKIm+gbzf +pR52LNbBGesHWGjwIuGF4InstIMsKSwGv2DctzhWI+i/m5Goi3rd1V8z/lzUbsf1 +0uXqQcSfTyv3ee6YiCWj2W8vcdc5H+B6KzSlGjAR4sRcHTHOQJYO9BgA9evQ3qsJ +Pp00iez13RdheJWPtbfUqQy4gdpu8HFeZx8CAwEAAaOBjzCBjDAOBgNVHQ8BAf8E +BAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRc +XBo+bJILrLcJiGkTWeMPpXb1TDAfBgNVHSMEGDAWgBQqk1Xu65Ww7EBCROw4KLGw +KuToaDAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAAAAAAMA0GCSqGSIb3DQEBCwUA +A4ICAQAbGsIMhL8clczNmhGl9xZhmyNz6FbLq6g163x9LTgfvwHPt+7urthtd++O +uy4Ut8zFurh/yk7eooPlzf8jO7QUJBAFVy4vj8IcsvpWbFa7cuEOIulbjIzyAm/v +lgy7vUQ6xrWn8x8O9K1ww9z7wugwCyl22BD0wSHZKclJz++AwpL6vUVOD76IIuJO ++S6bE6z26/0ndpundh2AkA++2eIleD6ygnTeTl0PWu6aGoCggBmos50f8KgYHZF/ +OZVef203kDls9xCaOiMzaU91VsgLqq/gNcT+2cBd5r3IZTY3C8Rve6EEHS+/4zxf +PKlmiLN7lU9GFZogKecYzY+zPT7OArY7OVFnGTo4qdhdmxnXzHsI+anMCjxLOgEJ +381hyplQGPQOouEupCBxFcwa7oMYoGu20+1nLWYEqFcIXCeyH+s77MyteJSsseqL +xivG5PT+jKJn9hrnFb39bBmht9Vsa+Th6vk953zi5wCSe1j2wXsxFaENDq6BQZOK +f86Kp86M2elYnv3lJ3j2DE2ZTMpw+PA5ThYUnB+HVqYeeB2Y3ErRS8P1FOp1LBE8 ++eTz7yXQO5OM2wdYhNNL1zDri/41fHXi9b6337PZVqc39GM+N74x/O4Q7xEBiWgQ +T0dT8SNwf55kv63MeZh63ImxFV0FNRkCteYLcJMle3ohIY4zyQ== +-----END CERTIFICATE----- diff --git a/libs/db/remotedb/test.key b/libs/db/remotedb/test.key new file mode 100644 index 000000000..14d285584 --- /dev/null +++ b/libs/db/remotedb/test.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA65ZQwJLNgma8pTW+yi4NN7s7Gc9Y7ehXQWNIfy8piiLbOllB +ffl1AM+0sMXb+/8A1knmnMwlHzIwy0RDi1bEq4bMnDVmBOAe0BI9uP8zl8OcM7se +qDgPT14tORkBR+IF/oHMPJAgefbYJR7GFpLvRNshP0A4Cg6wZcxnP136IiYoib6B +vN+lHnYs1sEZ6wdYaPAi4YXgiey0gywpLAa/YNy3OFYj6L+bkaiLet3VXzP+XNRu +x/XS5epBxJ9PK/d57piIJaPZby9x1zkf4HorNKUaMBHixFwdMc5Alg70GAD169De +qwk+nTSJ7PXdF2F4lY+1t9SpDLiB2m7wcV5nHwIDAQABAoIBAQCB2/ilPgaUE8d2 +ldqWHa5hgw4/2uCdO04ll/GVUczm/PG1BxAnvYL2MIfcTSRGkrjGZjP9SDZKLONi +mD1XKDv+hK5yiKi0lUnGzddCC0JILKYEieeLOGOQD0yERblEA13kfW20EIomUJ+y +TnVIajQD03pPIDoDqTco1fQvpMDFYw5Q//UhH7VBC261GO1akvhT2Gqdb4aKLaYQ +iDW9IEButL5cRKIJuRxToB/JbmPVEF7xIZtm0sf9dtYVOlBQLeID0uHXgaci0enc +de6GMajmj7NFqc36ypb+Ct18fqEwQBYD+TSQdKs7/lMsAXwRjd5HW4RbYiMZyYnf +Dxgh7QVBAoGBAP9aLLIUcIG7+gk1x7xd+8wRhfo+dhsungeCluSigI9AsfDr6dpR +G9/0lEJH56noZZKQueACTmj7shmRB40xFFLc8w0IDRZCnofsl+Z15k9K84uFPA3W +hdZH9nMieU/mRKdcUYK7pHGqbicHTaJQ5ydZ+xb2E+zYQHOzYpQacHv/AoGBAOwv +TjDZSiassnAPYmmfcHtkUF4gf7PTpiZfH0hXHGAb0mJX4cXAoktAeDeHSi2tz3LW +dAc0ReP8Pdf3uSNv7wkJ1KpNRxAhU5bhnDFmjRc7gMZknVOU+az2M+4yGOn/SOiJ +I6uMHgQDS/VsI+N583n6gbGxVHbQfr9TOc4bLpThAoGBAKin0JmWMnEdzRnEMbZS +hPrWIB2Wn794XNws/qjoQ+1aF60+xGhz5etXyYy1nWd1nZDekkZIf62LgKiuR8ST +xA6u7MGQrcQkID06oWGQQZvhr1ZZm76wEBnl0ftdq66AMpwvt46XjReeL78LbdVl +hidRoSwbQDHQ61EADH4xsFXVAoGBAISXqhXSZsZ/fU1b1avmTod3MYcmR4r07vnr +vOwnu05ZUCrVm3IhSvtkHhlOYl5yjVuy+UByICp1mWJ9N/qlBFTWqAVTjOmJTBwQ +XFd/cwXv6cN3CLu7js+DCHRYu5PiNVQWaWgNKWynTSViqGM0O3PnJphTLU/mjMFs +P69toyEBAoGBALh9YsqxHdYdS5WK9chzDfGlaTQ79jwN+gEzQuP1ooLF0JkMgh5W +//2C6kCrgBsGTm1gfHAjEfC04ZDZLFbKLm56YVKUGL6JJNapm6e5kfiZGjbRKWAg +ViCeRS2qQnVbH74GfHyimeTPDI9cJMiJfDDTPbfosqWSsPEcg2jfsySJ +-----END RSA PRIVATE KEY----- From eccdce11c513c5d09adc101f3643a51859319e8a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 12:06:43 -0400 Subject: [PATCH 507/515] make linter happy --- libs/autofile/cmd/logjack.go | 1 - libs/bech32/bech32_test.go | 2 +- libs/common/async.go | 18 ++++++++---------- libs/common/errors_test.go | 2 -- libs/db/debug_db.go | 4 ---- 5 files changed, 9 insertions(+), 18 deletions(-) diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go index aeb810252..17b482bed 100644 --- a/libs/autofile/cmd/logjack.go +++ b/libs/autofile/cmd/logjack.go @@ -13,7 +13,6 @@ import ( ) const Version = "0.0.1" -const sleepSeconds = 1 // Every second const readBufferSize = 1024 // 1KB at a time // Parse command-line options diff --git a/libs/bech32/bech32_test.go b/libs/bech32/bech32_test.go index a2c6c83fb..830942061 100644 --- a/libs/bech32/bech32_test.go +++ b/libs/bech32/bech32_test.go @@ -25,7 +25,7 @@ func TestEncodeAndDecode(t *testing.T) { if hrp != "shasum" { t.Error("Invalid hrp") } - if bytes.Compare(data, sum[:]) != 0 { + if !bytes.Equal(data, sum[:]) { t.Error("Invalid decode") } } diff --git a/libs/common/async.go b/libs/common/async.go index 7be09a3c1..e3293ab4c 100644 --- a/libs/common/async.go +++ b/libs/common/async.go @@ -76,17 +76,15 @@ func (trs *TaskResultSet) Reap() *TaskResultSet { func (trs *TaskResultSet) Wait() *TaskResultSet { for i := 0; i < len(trs.results); i++ { var trch = trs.chz[i] - select { - case result, ok := <-trch: - if ok { - // Write result. - trs.results[i] = taskResultOK{ - TaskResult: result, - OK: true, - } - } else { - // We already wrote it. + result, ok := <-trch + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, } + } else { + // We already wrote it. } } return trs diff --git a/libs/common/errors_test.go b/libs/common/errors_test.go index 16aede225..52c78a765 100644 --- a/libs/common/errors_test.go +++ b/libs/common/errors_test.go @@ -17,10 +17,8 @@ func TestErrorPanic(t *testing.T) { if r := recover(); r != nil { err = ErrorWrap(r, "This is the message in ErrorWrap(r, message).") } - return }() panic(pnk{"something"}) - return nil } var err = capturePanic() diff --git a/libs/db/debug_db.go b/libs/db/debug_db.go index 4619a83dd..bb361a266 100644 --- a/libs/db/debug_db.go +++ b/libs/db/debug_db.go @@ -7,10 +7,6 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) -func _fmt(f string, az ...interface{}) string { - return fmt.Sprintf(f, az...) -} - //---------------------------------------- // debugDB From 1c018d3fd238bf7b9606d5a20344b0545168c75c Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 22:21:29 -0400 Subject: [PATCH 508/515] p2p: external address * new config option for external address to advertise * if blank, defaults to best guess from listener * if laddr ip address is also blank, default to IPv4 --- CHANGELOG.md | 2 ++ config/config.go | 4 ++++ config/toml.go | 6 ++++++ node/node.go | 3 +-- p2p/listener.go | 33 ++++++++++++++++++++++++--------- p2p/listener_test.go | 6 +++++- p2p/pex/pex_reactor_test.go | 11 +++++++---- 7 files changed, 49 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d73c949a2..97acb5855 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,8 @@ FEATURES [metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html) guide. - [p2p] Add IPv6 support to peering. +- [p2p] Add `external_address` to config to allow specifying the address for + peers to dial IMPROVEMENT - [rpc/client] Supports https and wss now. diff --git a/config/config.go b/config/config.go index e01819305..22cecf989 100644 --- a/config/config.go +++ b/config/config.go @@ -276,6 +276,9 @@ type P2PConfig struct { // Address to listen for incoming connections ListenAddress string `mapstructure:"laddr"` + // Address to advertise to peers for them to dial + ExternalAddress string `mapstructure:"external_address"` + // Comma separated list of seed nodes to connect to // We only use these if we can’t connect to peers in the addrbook Seeds string `mapstructure:"seeds"` @@ -340,6 +343,7 @@ type P2PConfig struct { func DefaultP2PConfig() *P2PConfig { return &P2PConfig{ ListenAddress: "tcp://0.0.0.0:26656", + ExternalAddress: "", UPNP: false, AddrBook: defaultAddrBookPath, AddrBookStrict: true, diff --git a/config/toml.go b/config/toml.go index 37ff4d7c1..084325baa 100644 --- a/config/toml.go +++ b/config/toml.go @@ -142,6 +142,12 @@ max_open_connections = {{ .RPC.MaxOpenConnections }} # Address to listen for incoming connections laddr = "{{ .P2P.ListenAddress }}" +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "{{ .P2P.ExternalAddress }}" + # Comma separated list of seed nodes to connect to seeds = "{{ .P2P.Seeds }}" diff --git a/node/node.go b/node/node.go index fc05fc32c..fa667d1dc 100644 --- a/node/node.go +++ b/node/node.go @@ -426,8 +426,7 @@ func (n *Node) OnStart() error { } // Create & add listener - protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress) - l := p2p.NewDefaultListener(protocol, address, n.config.P2P.UPNP, n.Logger.With("module", "p2p")) + l := p2p.NewDefaultListener(n.config.P2P, n.Logger.With("module", "p2p")) n.sw.AddListener(l) // Generate node PrivKey diff --git a/p2p/listener.go b/p2p/listener.go index cd548866b..339e26dd4 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -7,9 +7,10 @@ import ( "strings" "time" - "github.com/tendermint/tendermint/p2p/upnp" + "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p/upnp" ) // Listener is a network listener for stream-oriented protocols, providing @@ -59,8 +60,10 @@ func splitHostPort(addr string) (host string, port int) { // NewDefaultListener creates a new DefaultListener on lAddr, optionally trying // to determine external address using UPnP. -func NewDefaultListener(protocol string, lAddr string, UPNP bool, logger log.Logger) Listener { - // Local listen IP & port +func NewDefaultListener(cfg *config.P2PConfig, logger log.Logger) Listener { + + // Split protocol, address, and port. + protocol, lAddr := cmn.ProtocolAndAddress(cfg.ListenAddress) lAddrIP, lAddrPort := splitHostPort(lAddr) // Create listener @@ -88,17 +91,29 @@ func NewDefaultListener(protocol string, lAddr string, UPNP bool, logger log.Log panic(err) } + inAddrAny := lAddrIP == "" || lAddrIP == "0.0.0.0" + // Determine external address... var extAddr *NetAddress - if UPNP { + if cfg.UPNP { // If the lAddrIP is INADDR_ANY, try UPnP - if lAddrIP == "" || lAddrIP == "0.0.0.0" { + if inAddrAny { extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) } } + + if cfg.ExternalAddress != "" { + var err error + extAddr, err = NewNetAddressStringWithOptionalID(cfg.ExternalAddress) + if err != nil { + panic(fmt.Sprintf("Error in ExternalAddress: %v", err)) + } + } + // Otherwise just use the local address... if extAddr == nil { - extAddr = getNaiveExternalAddress(listenerPort, false, logger) + defaultToIPv4 := inAddrAny + extAddr = getNaiveExternalAddress(defaultToIPv4, listenerPort, false, logger) } if extAddr == nil { panic("Could not determine external address!") @@ -237,7 +252,7 @@ func isIpv6(ip net.IP) bool { } // TODO: use syscalls: see issue #712 -func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress { +func getNaiveExternalAddress(defaultToIPv4 bool, port int, settleForLocal bool, logger log.Logger) *NetAddress { addrs, err := net.InterfaceAddrs() if err != nil { panic(cmn.Fmt("Could not fetch interface addresses: %v", err)) @@ -248,7 +263,7 @@ func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) * if !ok { continue } - if !isIpv6(ipnet.IP) { + if defaultToIPv4 || !isIpv6(ipnet.IP) { v4 := ipnet.IP.To4() if v4 == nil || (!settleForLocal && v4[0] == 127) { // loopback @@ -263,5 +278,5 @@ func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) * // try again, but settle for local logger.Info("Node may not be connected to internet. Settling for local address") - return getNaiveExternalAddress(port, true, logger) + return getNaiveExternalAddress(defaultToIPv4, port, true, logger) } diff --git a/p2p/listener_test.go b/p2p/listener_test.go index 3d8e40731..3b61c98dc 100644 --- a/p2p/listener_test.go +++ b/p2p/listener_test.go @@ -4,12 +4,16 @@ import ( "bytes" "testing" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" ) func TestListener(t *testing.T) { // Create a listener - l := NewDefaultListener("tcp", ":8001", false, log.TestingLogger()) + cfg := &config.P2PConfig{ + ListenAddress: "tcp://:8001", + } + l := NewDefaultListener(cfg, log.TestingLogger()) // Dial the listener lAddr := l.ExternalAddress() diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index cdef5440a..dddad8f10 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -109,7 +109,10 @@ func TestPEXReactorRunning(t *testing.T) { addOtherNodeAddrToAddrBook(2, 1) for i, sw := range switches { - sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, false, logger.With("pex", i))) + cfg := &config.P2PConfig{ + ListenAddress: fmt.Sprintf("tcp://%v", sw.NodeInfo().ListenAddr), + } + sw.AddListener(p2p.NewDefaultListener(cfg, logger.With("pex", i))) err := sw.Start() // start switch and reactors require.Nil(t, err) @@ -230,9 +233,9 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { ) seed.AddListener( p2p.NewDefaultListener( - "tcp", - seed.NodeInfo().ListenAddr, - false, + &config.P2PConfig{ + ListenAddress: fmt.Sprintf("tcp://%v", seed.NodeInfo().ListenAddr), + }, log.TestingLogger(), ), ) From 737c5c065dd4af5c50938278e3f947ff537bd4a8 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 12:18:17 -0400 Subject: [PATCH 509/515] fixes from review --- node/node.go | 6 +++++- p2p/listener.go | 28 +++++++++++++++------------- p2p/listener_test.go | 6 +----- p2p/pex/pex_reactor_test.go | 12 ++---------- 4 files changed, 23 insertions(+), 29 deletions(-) diff --git a/node/node.go b/node/node.go index fa667d1dc..0780891ef 100644 --- a/node/node.go +++ b/node/node.go @@ -426,7 +426,11 @@ func (n *Node) OnStart() error { } // Create & add listener - l := p2p.NewDefaultListener(n.config.P2P, n.Logger.With("module", "p2p")) + l := p2p.NewDefaultListener( + n.config.P2P.ListenAddress, + n.config.P2P.ExternalAddress, + n.config.P2P.UPNP, + n.Logger.With("module", "p2p")) n.sw.AddListener(l) // Generate node PrivKey diff --git a/p2p/listener.go b/p2p/listener.go index 339e26dd4..3509ec69c 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p/upnp" @@ -60,10 +59,14 @@ func splitHostPort(addr string) (host string, port int) { // NewDefaultListener creates a new DefaultListener on lAddr, optionally trying // to determine external address using UPnP. -func NewDefaultListener(cfg *config.P2PConfig, logger log.Logger) Listener { +func NewDefaultListener( + fullListenAddrString string, + externalAddrString string, + useUPnP bool, + logger log.Logger) Listener { // Split protocol, address, and port. - protocol, lAddr := cmn.ProtocolAndAddress(cfg.ListenAddress) + protocol, lAddr := cmn.ProtocolAndAddress(fullListenAddrString) lAddrIP, lAddrPort := splitHostPort(lAddr) // Create listener @@ -93,24 +96,23 @@ func NewDefaultListener(cfg *config.P2PConfig, logger log.Logger) Listener { inAddrAny := lAddrIP == "" || lAddrIP == "0.0.0.0" - // Determine external address... + // Determine external address. var extAddr *NetAddress - if cfg.UPNP { - // If the lAddrIP is INADDR_ANY, try UPnP - if inAddrAny { - extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) - } - } - if cfg.ExternalAddress != "" { + if externalAddrString != "" { var err error - extAddr, err = NewNetAddressStringWithOptionalID(cfg.ExternalAddress) + extAddr, err = NewNetAddressStringWithOptionalID(externalAddrString) if err != nil { panic(fmt.Sprintf("Error in ExternalAddress: %v", err)) } } - // Otherwise just use the local address... + // If the lAddrIP is INADDR_ANY, try UPnP. + if extAddr == nil && useUPnP && inAddrAny { + extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) + } + + // Otherwise just use the local address. if extAddr == nil { defaultToIPv4 := inAddrAny extAddr = getNaiveExternalAddress(defaultToIPv4, listenerPort, false, logger) diff --git a/p2p/listener_test.go b/p2p/listener_test.go index 3b61c98dc..c82ae3909 100644 --- a/p2p/listener_test.go +++ b/p2p/listener_test.go @@ -4,16 +4,12 @@ import ( "bytes" "testing" - "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" ) func TestListener(t *testing.T) { // Create a listener - cfg := &config.P2PConfig{ - ListenAddress: "tcp://:8001", - } - l := NewDefaultListener(cfg, log.TestingLogger()) + l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) // Dial the listener lAddr := l.ExternalAddress() diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index dddad8f10..6d6e91c38 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -109,10 +109,7 @@ func TestPEXReactorRunning(t *testing.T) { addOtherNodeAddrToAddrBook(2, 1) for i, sw := range switches { - cfg := &config.P2PConfig{ - ListenAddress: fmt.Sprintf("tcp://%v", sw.NodeInfo().ListenAddr), - } - sw.AddListener(p2p.NewDefaultListener(cfg, logger.With("pex", i))) + sw.AddListener(p2p.NewDefaultListener("tcp://"+sw.NodeInfo().ListenAddr, "", false, logger.With("pex", i))) err := sw.Start() // start switch and reactors require.Nil(t, err) @@ -232,12 +229,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { }, ) seed.AddListener( - p2p.NewDefaultListener( - &config.P2PConfig{ - ListenAddress: fmt.Sprintf("tcp://%v", seed.NodeInfo().ListenAddr), - }, - log.TestingLogger(), - ), + p2p.NewDefaultListener("tcp://"+seed.NodeInfo().ListenAddr, "", false, log.TestingLogger()), ) require.Nil(t, seed.Start()) defer seed.Stop() From c3504c111e409f9c7783a8ca6bddee218efa4ba1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 13:04:34 -0400 Subject: [PATCH 510/515] add test --- p2p/listener_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/p2p/listener_test.go b/p2p/listener_test.go index c82ae3909..f87b5d6f5 100644 --- a/p2p/listener_test.go +++ b/p2p/listener_test.go @@ -2,8 +2,11 @@ package p2p import ( "bytes" + "net" + "strings" "testing" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" ) @@ -45,3 +48,32 @@ func TestListener(t *testing.T) { // Close the server, no longer needed. l.Stop() } + +func TestExternalAddress(t *testing.T) { + { + // Create a listener with no external addr. Should default + // to local ipv4. + l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) + lAddr := l.ExternalAddress().String() + _, _, err := net.SplitHostPort(lAddr) + require.Nil(t, err) + spl := strings.Split(lAddr, ".") + require.Equal(t, len(spl), 4) + l.Stop() + } + + { + // Create a listener with set external ipv4 addr. + setExAddr := "8.8.8.8:8080" + l := NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) + lAddr := l.ExternalAddress().String() + require.Equal(t, lAddr, setExAddr) + l.Stop() + } + + { + // Invalid external addr causes panic + setExAddr := "awrlsckjnal:8080" + require.Panics(t, func() { NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) }) + } +} From d47b4ef12dbb787298d72529828c17a21f79553e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 14:20:27 -0400 Subject: [PATCH 511/515] update some docs --- CHANGELOG.md | 2 +- README.md | 13 ++-- docs/spec/blockchain/encoding.md | 20 +++-- docs/spec/scripts/crypto.go | 129 ++----------------------------- networks/local/README.md | 2 +- 5 files changed, 22 insertions(+), 144 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 97acb5855..c0d7896d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ BREAKING CHANGES: * Integers are encoded as strings - [crypto] Update go-crypto to v0.10.0 and merge into `crypto` * privKey.Sign returns error. - * ed25519 address is the first 20-bytes of the SHA256 of the pubkey + * ed25519 address changed to the first 20-bytes of the SHA256 of the raw pubkey bytes * `tmlibs/merkle` -> `crypto/merkle`. Uses SHA256 instead of RIPEMD160 - [rpc] `syncing` is now called `catching_up`. diff --git a/README.md b/README.md index daba4f59a..2f7d13cdd 100644 --- a/README.md +++ b/README.md @@ -50,11 +50,11 @@ Go version | Go1.9 or higher ## Install -See the [install instructions](/docs/install.rst) +See the [install instructions](/docs/install.md) ## Quick Start -- [Single node](/docs/using-tendermint.rst) +- [Single node](/docs/using-tendermint.md) - [Local cluster using docker-compose](/networks/local) - [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.md) - [Join the public testnet](https://cosmos.network/testnet) @@ -72,10 +72,7 @@ Additional information about some - and eventually all - of the sub-projects bel ### Sub-projects -* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface -* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library -* [Go-Crypto](http://github.com/tendermint/tendermint/crypto), an elliptic curve cryptography library -* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally +* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3 * [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation ### Tools @@ -119,8 +116,8 @@ CHANGELOG even if they don't lead to MINOR version bumps: - node Exported objects in these packages that are not covered by the versioning scheme -are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time. -Functions, types, and values in any other package may also change at any time. +are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any +time without notice. Functions, types, and values in any other package may also change at any time. ### Upgrades diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index fd8e64a42..df056ab2b 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -54,17 +54,15 @@ familiar with amino encoding. You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes ( while || stands for byte concatenation here). -| Type | Name | Prefix | Length | -| ---- | ---- | ------ | ----- | -| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE62 | 0x20 | -| PubKeyLedgerEd25519 | tendermint/PubKeyLedgerEd25519 | 0x5C3453B2 | 0x20 | -| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE982 | 0x21 | -| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288912 | 0x40 | -| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79A | 0x20 | -| PrivKeyLedgerSecp256k1 | tendermint/PrivKeyLedgerSecp256k1 | 0x10CAB393 | variable | -| PrivKeyLedgerEd25519 | tendermint/PrivKeyLedgerEd25519 | 0x0CFEEF9B | variable | -| SignatureEd25519 | tendermint/SignatureKeyEd25519 | 0x3DA1DB2A | 0x40 | -| SignatureSecp256k1 | tendermint/SignatureKeySecp256k1 | 0x16E1FEEA | variable | +| Type | Name | Prefix | Length | Notes | +| ---- | ---- | ------ | ----- | ------ | +| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | +| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | +| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | +| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | +| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | +| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | +| ### Examples diff --git a/docs/spec/scripts/crypto.go b/docs/spec/scripts/crypto.go index aeca07eeb..9ae800f8f 100644 --- a/docs/spec/scripts/crypto.go +++ b/docs/spec/scripts/crypto.go @@ -2,132 +2,15 @@ package main import ( "fmt" + "os" - "github.com/tendermint/tendermint/crypto" + amino "github.com/tendermint/go-amino" + crypto "github.com/tendermint/tendermint/crypto" ) -// SECRET -var SECRET = []byte("some secret") - -func printEd() { - priv := crypto.GenPrivKeyEd25519FromSecret(SECRET) - pub := priv.PubKey().(crypto.PubKeyEd25519) - sigV, err := priv.Sign([]byte("hello")) - if err != nil { - fmt.Println("Unexpected error:", err) - } - sig := sigV.(crypto.SignatureEd25519) - - name := "tendermint/PubKeyEd25519" - length := len(pub[:]) - - fmt.Println("### PubKeyEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", pub.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: raw 32-byte Ed25519 pubkey") - fmt.Println("type PubKeyEd25519 [32]byte") - fmt.Println("") - fmt.Println(`func (pubkey PubKeyEd25519) Address() []byte { - // NOTE: hash of the Amino encoded bytes! - return RIPEMD160(AminoEncode(pubkey)) -}`) - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 32-byte Ed25519 pubkey `%X` would be encoded as `%X`.\n\n", pub[:], pub.Bytes()) - fmt.Printf("The address would then be `RIPEMD160(0x%X)` or `%X`\n", pub.Bytes(), pub.Address()) - fmt.Println("") - - name = "tendermint/SignatureKeyEd25519" - length = len(sig[:]) - - fmt.Println("### SignatureEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", sig.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: raw 64-byte Ed25519 signature") - fmt.Println("type SignatureEd25519 [64]byte") - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 64-byte Ed25519 signature `%X` would be encoded as `%X`\n", sig[:], sig.Bytes()) - fmt.Println("") - - name = "tendermint/PrivKeyEd25519" - - fmt.Println("### PrivKeyEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Println("// Name:", name) - fmt.Println("// Notes: raw 32-byte priv key concatenated to raw 32-byte pub key") - fmt.Println("type PrivKeyEd25519 [64]byte") - fmt.Println("```") -} - -func printSecp() { - priv := crypto.GenPrivKeySecp256k1FromSecret(SECRET) - pub := priv.PubKey().(crypto.PubKeySecp256k1) - sigV, err := priv.Sign([]byte("hello")) - if err != nil { - fmt.Println("Unexpected error:", err) - } - sig := sigV.(crypto.SignatureSecp256k1) - - name := "tendermint/PubKeySecp256k1" - length := len(pub[:]) - - fmt.Println("### PubKeySecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", pub.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: OpenSSL compressed pubkey prefixed with 0x02 or 0x03") - fmt.Println("type PubKeySecp256k1 [33]byte") - fmt.Println("") - fmt.Println(`func (pubkey PubKeySecp256k1) Address() []byte { - // NOTE: hash of the raw pubkey bytes (not Amino encoded!). - // Compatible with Bitcoin addresses. - return RIPEMD160(SHA256(pubkey[:])) -}`) - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 33-byte Secp256k1 pubkey `%X` would be encoded as `%X`\n\n", pub[:], pub.Bytes()) - fmt.Printf("The address would then be `RIPEMD160(SHA256(0x%X))` or `%X`\n", pub[:], pub.Address()) - fmt.Println("") - - name = "tendermint/SignatureKeySecp256k1" - - fmt.Println("### SignatureSecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", sig.Bytes()[:4]) - fmt.Printf("// Length: Variable\n") - fmt.Printf("// Encoding prefix: Variable\n") - fmt.Println("// Notes: raw bytes of the Secp256k1 signature") - fmt.Println("type SignatureSecp256k1 []byte") - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the Secp256k1 signature `%X` would be encoded as `%X`\n", []byte(sig[:]), sig.Bytes()) - fmt.Println("") - - name = "tendermint/PrivKeySecp256k1" - - fmt.Println("### PrivKeySecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Println("// Name:", name) - fmt.Println("// Notes: raw 32-byte priv key") - fmt.Println("type PrivKeySecp256k1 [32]byte") - fmt.Println("```") -} - func main() { - printEd() + cdc := amino.NewCodec() + crypto.RegisterAmino(cdc) + cdc.PrintTypes(os.Stdout) fmt.Println("") - printSecp() } diff --git a/networks/local/README.md b/networks/local/README.md index 554abdf4b..09a0b12cb 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -2,7 +2,7 @@ ## Requirements -- [Install tendermint](/docs/install.rst) +- [Install tendermint](/docs/install.md) - [Install docker](https://docs.docker.com/engine/installation/) - [Install docker-compose](https://docs.docker.com/compose/install/) From ec710395b79cf0d96a60a5801f11c9204c701f48 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 1 Jul 2018 01:40:03 -0400 Subject: [PATCH 512/515] RIPEMD160 -> SHA256 --- Gopkg.lock | 2 +- docs/spec/blockchain/encoding.md | 16 ++++++++++++---- types/block.go | 6 +++--- types/evidence.go | 5 +++-- types/params.go | 2 +- types/part_set.go | 7 +++---- types/results.go | 4 +++- types/tx.go | 12 ++++++------ types/validator_set.go | 2 +- 9 files changed, 33 insertions(+), 23 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 6e1c41493..17b74d74b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -286,7 +286,7 @@ "leveldb/table", "leveldb/util" ] - revision = "0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697" + revision = "e2150783cd35f5b607daca48afd8c57ec54cc995" [[projects]] branch = "master" diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index df056ab2b..16902d099 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -151,7 +151,15 @@ func MakeParts(obj interface{}, partSize int) []Part Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. -RIPEMD160 is always used as the hashing function. +Tendermint always uses the `TMHASH` hash function, which is the first 20-bytes +of the SHA256: + +``` +func TMHASH(bz []byte) []byte { + shasum := SHA256(bz) + return shasum[:20] +} +``` ### Simple Merkle Root @@ -174,7 +182,7 @@ func SimpleMerkleRoot(hashes [][]byte) []byte{ func SimpleConcatHash(left, right []byte) []byte{ left = encodeByteSlice(left) right = encodeByteSlice(right) - return RIPEMD160 (append(left, right)) + return TMHASH(append(left, right)) } ``` @@ -182,8 +190,8 @@ Note that the leaves are Amino encoded as byte-arrays (ie. simple Uvarint length prefix) before being concatenated together and hashed. Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`. -For `struct` arguments, we compute a `[][]byte` by sorting elements of the `struct` according to -field name and then hashing them. +For `struct` arguments, we compute a `[][]byte` containing the hash of each +field in the struct sorted by the hash of the field name. For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements. ### Simple Merkle Proof diff --git a/types/block.go b/types/block.go index c3a399f67..bc018ee89 100644 --- a/types/block.go +++ b/types/block.go @@ -8,9 +8,9 @@ import ( "sync" "time" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/merkle" - "golang.org/x/crypto/ripemd160" ) // Block defines the atomic unit of a Tendermint blockchain. @@ -552,7 +552,7 @@ type hasher struct { } func (h hasher) Hash() []byte { - hasher := ripemd160.New() + hasher := tmhash.New() if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) { bz, err := cdc.MarshalBinaryBare(h.item) if err != nil { diff --git a/types/evidence.go b/types/evidence.go index a83e2cd00..266375ec3 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -5,8 +5,9 @@ import ( "fmt" "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/libs/merkle" + "github.com/tendermint/tendermint/crypto/merkle" ) // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. @@ -180,7 +181,7 @@ type EvidenceList []Evidence // Hash returns the simple merkle root hash of the EvidenceList. func (evl EvidenceList) Hash() []byte { // Recursive impl. - // Copied from tmlibs/merkle to avoid allocations + // Copied from crypto/merkle to avoid allocations switch len(evl) { case 0: return nil diff --git a/types/params.go b/types/params.go index e2117ed4c..3056c82a0 100644 --- a/types/params.go +++ b/types/params.go @@ -2,8 +2,8 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/merkle" ) const ( diff --git a/types/part_set.go b/types/part_set.go index 7116176d3..f6d7f6b6e 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -7,10 +7,9 @@ import ( "io" "sync" - "golang.org/x/crypto/ripemd160" - + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/merkle" ) var ( @@ -31,7 +30,7 @@ func (part *Part) Hash() []byte { if part.hash != nil { return part.hash } - hasher := ripemd160.New() + hasher := tmhash.New() hasher.Write(part.Bytes) // nolint: errcheck, gas part.hash = hasher.Sum(nil) return part.hash diff --git a/types/results.go b/types/results.go index d9381420a..7f8e6093a 100644 --- a/types/results.go +++ b/types/results.go @@ -2,8 +2,8 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/merkle" ) //----------------------------------------------------------------------------- @@ -51,6 +51,8 @@ func (a ABCIResults) Bytes() []byte { // Hash returns a merkle hash of all results func (a ABCIResults) Hash() []byte { + // NOTE: we copy the impl of the merkle tree for txs - + // we should be consistent and either do it for both or not. return merkle.SimpleHashFromHashers(a.toHashers()) } diff --git a/types/tx.go b/types/tx.go index d02dc7a17..489f0b232 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,19 +6,19 @@ import ( "fmt" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/merkle" ) // Tx is an arbitrary byte array. // NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed. -// Alternatively, it may make sense to add types here and let -// []byte be type 0x1 so we can have versioned txs if need be in the future. +// Might we want types here ? type Tx []byte -// Hash computes the RIPEMD160 hash of the wire encoded transaction. +// Hash computes the TMHASH hash of the wire encoded transaction. func (tx Tx) Hash() []byte { - return aminoHasher(tx).Hash() + return tmhash.Sum(tx) } // String returns the hex-encoded transaction as a string. @@ -32,7 +32,7 @@ type Txs []Tx // Hash returns the simple Merkle root hash of the transactions. func (txs Txs) Hash() []byte { // Recursive impl. - // Copied from tmlibs/merkle to avoid allocations + // Copied from tendermint/crypto/merkle to avoid allocations switch len(txs) { case 0: return nil diff --git a/types/validator_set.go b/types/validator_set.go index 6c39f5be2..191f8b428 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -7,8 +7,8 @@ import ( "sort" "strings" + "github.com/tendermint/tendermint/crypto/merkle" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/merkle" ) // ValidatorSet represent a set of *Validator at a given height. From ca3e337ef9f978e308f4885281b81d17d6e4db29 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 14:32:27 -0400 Subject: [PATCH 513/515] remove libs/merkle --- libs/merkle/README.md | 4 - libs/merkle/simple_map.go | 84 ------------------- libs/merkle/simple_map_test.go | 53 ------------ libs/merkle/simple_proof.go | 144 -------------------------------- libs/merkle/simple_tree.go | 91 -------------------- libs/merkle/simple_tree_test.go | 87 ------------------- libs/merkle/tmhash/hash.go | 41 --------- libs/merkle/tmhash/hash_test.go | 23 ----- libs/merkle/types.go | 47 ----------- 9 files changed, 574 deletions(-) delete mode 100644 libs/merkle/README.md delete mode 100644 libs/merkle/simple_map.go delete mode 100644 libs/merkle/simple_map_test.go delete mode 100644 libs/merkle/simple_proof.go delete mode 100644 libs/merkle/simple_tree.go delete mode 100644 libs/merkle/simple_tree_test.go delete mode 100644 libs/merkle/tmhash/hash.go delete mode 100644 libs/merkle/tmhash/hash_test.go delete mode 100644 libs/merkle/types.go diff --git a/libs/merkle/README.md b/libs/merkle/README.md deleted file mode 100644 index c44978368..000000000 --- a/libs/merkle/README.md +++ /dev/null @@ -1,4 +0,0 @@ -## Simple Merkle Tree - -For smaller static data structures that don't require immutable snapshots or mutability; -for instance the transactions and validation signatures of a block can be hashed using this simple merkle tree logic. diff --git a/libs/merkle/simple_map.go b/libs/merkle/simple_map.go deleted file mode 100644 index 65653e3c8..000000000 --- a/libs/merkle/simple_map.go +++ /dev/null @@ -1,84 +0,0 @@ -package merkle - -import ( - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/merkle/tmhash" -) - -type SimpleMap struct { - kvs cmn.KVPairs - sorted bool -} - -func NewSimpleMap() *SimpleMap { - return &SimpleMap{ - kvs: nil, - sorted: false, - } -} - -func (sm *SimpleMap) Set(key string, value Hasher) { - sm.sorted = false - - // Hash the key to blind it... why not? - khash := SimpleHashFromBytes([]byte(key)) - - // And the value is hashed too, so you can - // check for equality with a cached value (say) - // and make a determination to fetch or not. - vhash := value.Hash() - - sm.kvs = append(sm.kvs, cmn.KVPair{ - Key: khash, - Value: vhash, - }) -} - -// Merkle root hash of items sorted by key -// (UNSTABLE: and by value too if duplicate key). -func (sm *SimpleMap) Hash() []byte { - sm.Sort() - return hashKVPairs(sm.kvs) -} - -func (sm *SimpleMap) Sort() { - if sm.sorted { - return - } - sm.kvs.Sort() - sm.sorted = true -} - -// Returns a copy of sorted KVPairs. -func (sm *SimpleMap) KVPairs() cmn.KVPairs { - sm.Sort() - kvs := make(cmn.KVPairs, len(sm.kvs)) - copy(kvs, sm.kvs) - return kvs -} - -//---------------------------------------- - -// A local extension to KVPair that can be hashed. -type KVPair cmn.KVPair - -func (kv KVPair) Hash() []byte { - hasher := tmhash.New() - err := encodeByteSlice(hasher, kv.Key) - if err != nil { - panic(err) - } - err = encodeByteSlice(hasher, kv.Value) - if err != nil { - panic(err) - } - return hasher.Sum(nil) -} - -func hashKVPairs(kvs cmn.KVPairs) []byte { - kvsH := make([]Hasher, 0, len(kvs)) - for _, kvp := range kvs { - kvsH = append(kvsH, KVPair(kvp)) - } - return SimpleHashFromHashers(kvsH) -} diff --git a/libs/merkle/simple_map_test.go b/libs/merkle/simple_map_test.go deleted file mode 100644 index 6e1004db2..000000000 --- a/libs/merkle/simple_map_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package merkle - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -type strHasher string - -func (str strHasher) Hash() []byte { - return SimpleHashFromBytes([]byte(str)) -} - -func TestSimpleMap(t *testing.T) { - { - db := NewSimpleMap() - db.Set("key1", strHasher("value1")) - assert.Equal(t, "3dafc06a52039d029be57c75c9d16356a4256ef4", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := NewSimpleMap() - db.Set("key1", strHasher("value2")) - assert.Equal(t, "03eb5cfdff646bc4e80fec844e72fd248a1c6b2c", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := NewSimpleMap() - db.Set("key1", strHasher("value1")) - db.Set("key2", strHasher("value2")) - assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := NewSimpleMap() - db.Set("key2", strHasher("value2")) // NOTE: out of order - db.Set("key1", strHasher("value1")) - assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := NewSimpleMap() - db.Set("key1", strHasher("value1")) - db.Set("key2", strHasher("value2")) - db.Set("key3", strHasher("value3")) - assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := NewSimpleMap() - db.Set("key2", strHasher("value2")) // NOTE: out of order - db.Set("key1", strHasher("value1")) - db.Set("key3", strHasher("value3")) - assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } -} diff --git a/libs/merkle/simple_proof.go b/libs/merkle/simple_proof.go deleted file mode 100644 index ca6ccf372..000000000 --- a/libs/merkle/simple_proof.go +++ /dev/null @@ -1,144 +0,0 @@ -package merkle - -import ( - "bytes" - "fmt" -) - -type SimpleProof struct { - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. -} - -// proofs[0] is the proof for items[0]. -func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromHashers(items) - rootHash = rootSPN.Hash - proofs = make([]*SimpleProof, len(items)) - for i, trail := range trails { - proofs[i] = &SimpleProof{ - Aunts: trail.FlattenAunts(), - } - } - return -} - -func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*SimpleProof) { - sm := NewSimpleMap() - for k, v := range m { - sm.Set(k, v) - } - sm.Sort() - kvs := sm.kvs - kvsH := make([]Hasher, 0, len(kvs)) - for _, kvp := range kvs { - kvsH = append(kvsH, KVPair(kvp)) - } - return SimpleProofsFromHashers(kvsH) -} - -// Verify that leafHash is a leaf hash of the simple-merkle-tree -// which hashes to rootHash. -func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { - computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) - return computedHash != nil && bytes.Equal(computedHash, rootHash) -} - -func (sp *SimpleProof) String() string { - return sp.StringIndented("") -} - -func (sp *SimpleProof) StringIndented(indent string) string { - return fmt.Sprintf(`SimpleProof{ -%s Aunts: %X -%s}`, - indent, sp.Aunts, - indent) -} - -// Use the leafHash and innerHashes to get the root merkle hash. -// If the length of the innerHashes slice isn't exactly correct, the result is nil. -// Recursive impl. -func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { - if index >= total || index < 0 || total <= 0 { - return nil - } - switch total { - case 0: - panic("Cannot call computeHashFromAunts() with 0 total") - case 1: - if len(innerHashes) != 0 { - return nil - } - return leafHash - default: - if len(innerHashes) == 0 { - return nil - } - numLeft := (total + 1) / 2 - if index < numLeft { - leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if leftHash == nil { - return nil - } - return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) - } - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil - } - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) - } -} - -// Helper structure to construct merkle proof. -// The node and the tree is thrown away afterwards. -// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. -// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or -// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. -type SimpleProofNode struct { - Hash []byte - Parent *SimpleProofNode - Left *SimpleProofNode // Left sibling (only one of Left,Right is set) - Right *SimpleProofNode // Right sibling (only one of Left,Right is set) -} - -// Starting from a leaf SimpleProofNode, FlattenAunts() will return -// the inner hashes for the item corresponding to the leaf. -func (spn *SimpleProofNode) FlattenAunts() [][]byte { - // Nonrecursive impl. - innerHashes := [][]byte{} - for spn != nil { - if spn.Left != nil { - innerHashes = append(innerHashes, spn.Left.Hash) - } else if spn.Right != nil { - innerHashes = append(innerHashes, spn.Right.Hash) - } else { - break - } - spn = spn.Parent - } - return innerHashes -} - -// trails[0].Hash is the leaf hash for items[0]. -// trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) { - // Recursive impl. - switch len(items) { - case 0: - return nil, nil - case 1: - trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} - return []*SimpleProofNode{trail}, trail - default: - lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:]) - rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) - root := &SimpleProofNode{rootHash, nil, nil, nil} - leftRoot.Parent = root - leftRoot.Right = rightRoot - rightRoot.Parent = root - rightRoot.Left = leftRoot - return append(lefts, rights...), root - } -} diff --git a/libs/merkle/simple_tree.go b/libs/merkle/simple_tree.go deleted file mode 100644 index c7bc17db7..000000000 --- a/libs/merkle/simple_tree.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Computes a deterministic minimal height merkle tree hash. -If the number of items is not a power of two, some leaves -will be at different levels. Tries to keep both sides of -the tree the same size, but the left may be one greater. - -Use this for short deterministic trees, such as the validator list. -For larger datasets, use IAVLTree. - - * - / \ - / \ - / \ - / \ - * * - / \ / \ - / \ / \ - / \ / \ - * * * h6 - / \ / \ / \ - h0 h1 h2 h3 h4 h5 - -*/ - -package merkle - -import ( - "github.com/tendermint/tendermint/libs/merkle/tmhash" -) - -func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { - var hasher = tmhash.New() - err := encodeByteSlice(hasher, left) - if err != nil { - panic(err) - } - err = encodeByteSlice(hasher, right) - if err != nil { - panic(err) - } - return hasher.Sum(nil) -} - -func SimpleHashFromHashes(hashes [][]byte) []byte { - // Recursive impl. - switch len(hashes) { - case 0: - return nil - case 1: - return hashes[0] - default: - left := SimpleHashFromHashes(hashes[:(len(hashes)+1)/2]) - right := SimpleHashFromHashes(hashes[(len(hashes)+1)/2:]) - return SimpleHashFromTwoHashes(left, right) - } -} - -// NOTE: Do not implement this, use SimpleHashFromByteslices instead. -// type Byteser interface { Bytes() []byte } -// func SimpleHashFromBytesers(items []Byteser) []byte { ... } - -func SimpleHashFromByteslices(bzs [][]byte) []byte { - hashes := make([][]byte, len(bzs)) - for i, bz := range bzs { - hashes[i] = SimpleHashFromBytes(bz) - } - return SimpleHashFromHashes(hashes) -} - -func SimpleHashFromBytes(bz []byte) []byte { - hasher := tmhash.New() - hasher.Write(bz) - return hasher.Sum(nil) -} - -func SimpleHashFromHashers(items []Hasher) []byte { - hashes := make([][]byte, len(items)) - for i, item := range items { - hash := item.Hash() - hashes[i] = hash - } - return SimpleHashFromHashes(hashes) -} - -func SimpleHashFromMap(m map[string]Hasher) []byte { - sm := NewSimpleMap() - for k, v := range m { - sm.Set(k, v) - } - return sm.Hash() -} diff --git a/libs/merkle/simple_tree_test.go b/libs/merkle/simple_tree_test.go deleted file mode 100644 index f5c04af70..000000000 --- a/libs/merkle/simple_tree_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package merkle - -import ( - "bytes" - - cmn "github.com/tendermint/tendermint/libs/common" - . "github.com/tendermint/tendermint/libs/test" - - "testing" -) - -type testItem []byte - -func (tI testItem) Hash() []byte { - return []byte(tI) -} - -func TestSimpleProof(t *testing.T) { - - total := 100 - - items := make([]Hasher, total) - for i := 0; i < total; i++ { - items[i] = testItem(cmn.RandBytes(32)) - } - - rootHash := SimpleHashFromHashers(items) - - rootHash2, proofs := SimpleProofsFromHashers(items) - - if !bytes.Equal(rootHash, rootHash2) { - t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) - } - - // For each item, check the trail. - for i, item := range items { - itemHash := item.Hash() - proof := proofs[i] - - // Verify success - ok := proof.Verify(i, total, itemHash, rootHash) - if !ok { - t.Errorf("Verification failed for index %v.", i) - } - - // Wrong item index should make it fail - { - ok = proof.Verify((i+1)%total, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong index %v.", i) - } - } - - // Trail too long should make it fail - origAunts := proof.Aunts - proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } - proof.Aunts = origAunts - - // Trail too short should make it fail - proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } - proof.Aunts = origAunts - - // Mutating the itemHash should make it fail. - ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) - if ok { - t.Errorf("Expected verification to fail for mutated leaf hash") - } - - // Mutating the rootHash should make it fail. - ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) - if ok { - t.Errorf("Expected verification to fail for mutated root hash") - } - } -} diff --git a/libs/merkle/tmhash/hash.go b/libs/merkle/tmhash/hash.go deleted file mode 100644 index de69c406f..000000000 --- a/libs/merkle/tmhash/hash.go +++ /dev/null @@ -1,41 +0,0 @@ -package tmhash - -import ( - "crypto/sha256" - "hash" -) - -var ( - Size = 20 - BlockSize = sha256.BlockSize -) - -type sha256trunc struct { - sha256 hash.Hash -} - -func (h sha256trunc) Write(p []byte) (n int, err error) { - return h.sha256.Write(p) -} -func (h sha256trunc) Sum(b []byte) []byte { - shasum := h.sha256.Sum(b) - return shasum[:Size] -} - -func (h sha256trunc) Reset() { - h.sha256.Reset() -} - -func (h sha256trunc) Size() int { - return Size -} - -func (h sha256trunc) BlockSize() int { - return h.sha256.BlockSize() -} - -func New() hash.Hash { - return sha256trunc{ - sha256: sha256.New(), - } -} diff --git a/libs/merkle/tmhash/hash_test.go b/libs/merkle/tmhash/hash_test.go deleted file mode 100644 index 9744a8930..000000000 --- a/libs/merkle/tmhash/hash_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package tmhash_test - -import ( - "crypto/sha256" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/libs/merkle/tmhash" -) - -func TestHash(t *testing.T) { - testVector := []byte("abc") - hasher := tmhash.New() - hasher.Write(testVector) - bz := hasher.Sum(nil) - - hasher = sha256.New() - hasher.Write(testVector) - bz2 := hasher.Sum(nil) - bz2 = bz2[:20] - - assert.Equal(t, bz, bz2) -} diff --git a/libs/merkle/types.go b/libs/merkle/types.go deleted file mode 100644 index a0c491a7e..000000000 --- a/libs/merkle/types.go +++ /dev/null @@ -1,47 +0,0 @@ -package merkle - -import ( - "encoding/binary" - "io" -) - -type Tree interface { - Size() (size int) - Height() (height int8) - Has(key []byte) (has bool) - Proof(key []byte) (value []byte, proof []byte, exists bool) // TODO make it return an index - Get(key []byte) (index int, value []byte, exists bool) - GetByIndex(index int) (key []byte, value []byte) - Set(key []byte, value []byte) (updated bool) - Remove(key []byte) (value []byte, removed bool) - HashWithCount() (hash []byte, count int) - Hash() (hash []byte) - Save() (hash []byte) - Load(hash []byte) - Copy() Tree - Iterate(func(key []byte, value []byte) (stop bool)) (stopped bool) - IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) -} - -type Hasher interface { - Hash() []byte -} - -//----------------------------------------------------------------------- -// NOTE: these are duplicated from go-amino so we dont need go-amino as a dep - -func encodeByteSlice(w io.Writer, bz []byte) (err error) { - err = encodeUvarint(w, uint64(len(bz))) - if err != nil { - return - } - _, err = w.Write(bz) - return -} - -func encodeUvarint(w io.Writer, i uint64) (err error) { - var buf [10]byte - n := binary.PutUvarint(buf[:], i) - _, err = w.Write(buf[0:n]) - return -} From 5923b6288fe8ce9581936ee97c2bf9cf9c02c2f4 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 14:40:59 -0400 Subject: [PATCH 514/515] update changelog --- CHANGELOG.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0d7896d0..9c9100157 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## 0.22.0 -*July 1st, 2018* +*July 2nd, 2018* BREAKING CHANGES: - [config] Rename `skip_upnp` to `upnp`, and turn it off by default. @@ -14,6 +14,8 @@ BREAKING CHANGES: * privKey.Sign returns error. * ed25519 address changed to the first 20-bytes of the SHA256 of the raw pubkey bytes * `tmlibs/merkle` -> `crypto/merkle`. Uses SHA256 instead of RIPEMD160 +- [tmlibs] Update to v0.9.0 and merge into `libs` + * remove `merkle` package (moved to `crypto/merkle`) - [rpc] `syncing` is now called `catching_up`. FEATURES @@ -29,8 +31,9 @@ IMPROVEMENT - [rpc/client] Supports https and wss now. - [crypto] Make public key size into public constants - [mempool] Log tx hash, not entire tx -- [abci] Merged in github.com/tendermint/abci and - github.com/tendermint/go-crypto +- [abci] Merged in github.com/tendermint/abci +- [crypto] Merged in github.com/tendermint/go-crypto +- [libs] Merged in github.com/tendermint/tmlibs - [docs] Move from .rst to .md BUG FIXES: From 931b465a05efe05bd9a6f8579f731fb94ff02a22 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 14:50:57 -0400 Subject: [PATCH 515/515] dev version bump --- CHANGELOG.md | 4 ++++ version/version.go | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c9100157..3e910e365 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 0.23.0 + +*TBD* + ## 0.22.0 *July 2nd, 2018* diff --git a/version/version.go b/version/version.go index 9be4c9d82..aac5dd587 100644 --- a/version/version.go +++ b/version/version.go @@ -3,14 +3,14 @@ package version // Version components const ( Maj = "0" - Min = "22" + Min = "23" Fix = "0" ) var ( // Version is the current version of Tendermint // Must be a string because scripts like dist.sh read this file. - Version = "0.22.0" + Version = "0.23.0-dev" // GitCommit is the current HEAD set using ldflags. GitCommit string