From 348c494c996b306056dcb11cf20043ca79a1e501 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 16 Mar 2022 14:08:43 +0000
Subject: [PATCH 1/6] build(deps): Bump github.com/stretchr/testify from 1.7.0
to 1.7.1 (#8131)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.0 to 1.7.1.
Commits
083ff1c
Fixed didPanic to now detect panic(nil).
1e36bfe
Use cross Go version compatible build tag syntax
e798dc2
Add docs on 1.17 build tags
83198c2
assert: guard CanConvert call in backward compatible wrapper
087b655
assert: allow comparing time.Time
7bcf74e
fix msgAndArgs forwarding
c29de71
add tests for correct msgAndArgs forwarding
f87e2b2
Update builds
ab6dc32
fix linting errors in /assert package
edff5a0
fix funtion name
- Additional commits viewable in compare view
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/stretchr/testify&package-manager=go_modules&previous-version=1.7.0&new-version=1.7.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
---
go.mod | 2 +-
go.sum | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/go.mod b/go.mod
index 80db11417..a4a14cb3d 100644
--- a/go.mod
+++ b/go.mod
@@ -28,7 +28,7 @@ require (
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.4.0
github.com/spf13/viper v1.10.1
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.7.1
github.com/tendermint/tm-db v0.6.6
github.com/vektra/mockery/v2 v2.10.0
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce
diff --git a/go.sum b/go.sum
index 5c1ebecc9..cd5b6efc6 100644
--- a/go.sum
+++ b/go.sum
@@ -971,8 +971,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04=
From bedb68078c86914a3df4b9182c69fe1756c42178 Mon Sep 17 00:00:00 2001
From: Sam Kleinman
Date: Wed, 16 Mar 2022 11:57:41 -0400
Subject: [PATCH 2/6] libs/clist: remove unused surface area (#8134)
---
internal/libs/clist/bench_test.go | 2 +-
internal/libs/clist/clist.go | 78 +++++--------------------------
internal/libs/clist/clist_test.go | 26 +----------
3 files changed, 13 insertions(+), 93 deletions(-)
diff --git a/internal/libs/clist/bench_test.go b/internal/libs/clist/bench_test.go
index 95973cc76..ee5d836a7 100644
--- a/internal/libs/clist/bench_test.go
+++ b/internal/libs/clist/bench_test.go
@@ -12,7 +12,7 @@ func BenchmarkDetaching(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
start.removed = true
- start.DetachNext()
+ start.detachNext()
start.DetachPrev()
tmp := nxt
nxt = nxt.Next()
diff --git a/internal/libs/clist/clist.go b/internal/libs/clist/clist.go
index 99e5f05bf..3969c94cc 100644
--- a/internal/libs/clist/clist.go
+++ b/internal/libs/clist/clist.go
@@ -44,7 +44,6 @@ waiting on NextWait() (since it's just a read operation).
type CElement struct {
mtx sync.RWMutex
prev *CElement
- prevWaitCh chan struct{}
next *CElement
nextWaitCh chan struct{}
removed bool
@@ -72,33 +71,6 @@ func (e *CElement) NextWait() *CElement {
}
}
-// Blocking implementation of Prev().
-// May return nil iff CElement was head and got removed.
-func (e *CElement) PrevWait() *CElement {
- for {
- e.mtx.RLock()
- prev := e.prev
- removed := e.removed
- signal := e.prevWaitCh
- e.mtx.RUnlock()
-
- if prev != nil || removed {
- return prev
- }
-
- <-signal
- }
-}
-
-// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does,
-// channel will be closed.
-func (e *CElement) PrevWaitChan() <-chan struct{} {
- e.mtx.RLock()
- defer e.mtx.RUnlock()
-
- return e.prevWaitCh
-}
-
// NextWaitChan can be used to wait until Next becomes not nil. Once it does,
// channel will be closed.
func (e *CElement) NextWaitChan() <-chan struct{} {
@@ -131,7 +103,7 @@ func (e *CElement) Removed() bool {
return isRemoved
}
-func (e *CElement) DetachNext() {
+func (e *CElement) detachNext() {
e.mtx.Lock()
if !e.removed {
e.mtx.Unlock()
@@ -153,7 +125,7 @@ func (e *CElement) DetachPrev() {
// NOTE: This function needs to be safe for
// concurrent goroutines waiting on nextWg.
-func (e *CElement) SetNext(newNext *CElement) {
+func (e *CElement) setNext(newNext *CElement) {
e.mtx.Lock()
oldNext := e.next
@@ -174,30 +146,20 @@ func (e *CElement) SetNext(newNext *CElement) {
// NOTE: This function needs to be safe for
// concurrent goroutines waiting on prevWg
-func (e *CElement) SetPrev(newPrev *CElement) {
+func (e *CElement) setPrev(newPrev *CElement) {
e.mtx.Lock()
defer e.mtx.Unlock()
- oldPrev := e.prev
e.prev = newPrev
- if oldPrev != nil && newPrev == nil {
- e.prevWaitCh = make(chan struct{})
- }
- if oldPrev == nil && newPrev != nil {
- close(e.prevWaitCh)
- }
}
-func (e *CElement) SetRemoved() {
+func (e *CElement) setRemoved() {
e.mtx.Lock()
defer e.mtx.Unlock()
e.removed = true
- // This wakes up anyone waiting in either direction.
- if e.prev == nil {
- close(e.prevWaitCh)
- }
+ // This wakes up anyone waiting.
if e.next == nil {
close(e.nextWaitCh)
}
@@ -211,7 +173,6 @@ func (e *CElement) SetRemoved() {
// Panics if length grows beyond the max.
type CList struct {
mtx sync.RWMutex
- wg *sync.WaitGroup
waitCh chan struct{}
head *CElement // first element
tail *CElement // last element
@@ -250,7 +211,7 @@ func (l *CList) Front() *CElement {
return head
}
-func (l *CList) FrontWait() *CElement {
+func (l *CList) frontWait() *CElement {
// Loop until the head is non-nil else wait and try again
for {
l.mtx.RLock()
@@ -273,22 +234,6 @@ func (l *CList) Back() *CElement {
return back
}
-func (l *CList) BackWait() *CElement {
- for {
- l.mtx.RLock()
- tail := l.tail
- wg := l.wg
- l.mtx.RUnlock()
-
- if tail != nil {
- return tail
- }
- wg.Wait()
- // l.tail doesn't necessarily exist here.
- // That's why we need to continue a for-loop.
- }
-}
-
// WaitChan can be used to wait until Front or Back becomes not nil. Once it
// does, channel will be closed.
func (l *CList) WaitChan() <-chan struct{} {
@@ -305,7 +250,6 @@ func (l *CList) PushBack(v interface{}) *CElement {
// Construct a new element
e := &CElement{
prev: nil,
- prevWaitCh: make(chan struct{}),
next: nil,
nextWaitCh: make(chan struct{}),
removed: false,
@@ -326,8 +270,8 @@ func (l *CList) PushBack(v interface{}) *CElement {
l.head = e
l.tail = e
} else {
- e.SetPrev(l.tail) // We must init e first.
- l.tail.SetNext(e) // This will make e accessible.
+ e.setPrev(l.tail) // We must init e first.
+ l.tail.setNext(e) // This will make e accessible.
l.tail = e // Update the list.
}
l.mtx.Unlock()
@@ -365,16 +309,16 @@ func (l *CList) Remove(e *CElement) interface{} {
if prev == nil {
l.head = next
} else {
- prev.SetNext(next)
+ prev.setNext(next)
}
if next == nil {
l.tail = prev
} else {
- next.SetPrev(prev)
+ next.setPrev(prev)
}
// Set .Done() on e, otherwise waiters will wait forever.
- e.SetRemoved()
+ e.setRemoved()
return e.Value
}
diff --git a/internal/libs/clist/clist_test.go b/internal/libs/clist/clist_test.go
index a0482fc40..a0449a985 100644
--- a/internal/libs/clist/clist_test.go
+++ b/internal/libs/clist/clist_test.go
@@ -218,7 +218,7 @@ func TestScanRightDeleteRandom(t *testing.T) {
default:
}
if el == nil {
- el = l.FrontWait()
+ el = l.frontWait()
restartCounter++
}
el = el.Next()
@@ -314,30 +314,6 @@ FOR_LOOP:
t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen)
}
- // 4) test iterating backwards (PrevWaitChan and Prev)
- prev := next
- seen = 0
-FOR_LOOP2:
- for {
- select {
- case <-prev.PrevWaitChan():
- prev = prev.Prev()
- seen++
- if prev == nil {
- t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem")
- }
- if pushed == seen {
- break FOR_LOOP2
- }
-
- case <-time.After(250 * time.Millisecond):
- break FOR_LOOP2
- }
- }
-
- if pushed != seen {
- t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen)
- }
}
func TestRemoved(t *testing.T) {
From 7a0b05f22d51a51fdffedc37ea53d5efe4f975db Mon Sep 17 00:00:00 2001
From: Sam Kleinman
Date: Thu, 17 Mar 2022 09:02:02 -0400
Subject: [PATCH 3/6] libs/events: remove unneccessary unsubscription code
(#8135)
The events switch code is largely vestigal and is responsible for
wiring between the consensus state machine and the consensus
reactor. While there might have been a need, historicallly to managed
these subscriptions at runtime, it's nolonger used: subscriptions are
registered during startup, and then the switch shuts down at at the
end.
Eventually the EventSwitch should be replaced by a much smaller
implementation of an eventloop in the consensus state machine, but
cutting down on the scope of the event switch will help clarify the
requirements from the consensus side.
---
internal/consensus/reactor.go | 6 --
libs/events/events.go | 55 +----------
libs/events/events_test.go | 180 +---------------------------------
3 files changed, 4 insertions(+), 237 deletions(-)
diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go
index efb3f2d04..822e8c627 100644
--- a/internal/consensus/reactor.go
+++ b/internal/consensus/reactor.go
@@ -219,8 +219,6 @@ func (r *Reactor) OnStart(ctx context.Context) error {
// blocking until they all exit, as well as unsubscribing from events and stopping
// state.
func (r *Reactor) OnStop() {
- r.unsubscribeFromBroadcastEvents()
-
r.state.Stop()
if !r.WaitSync() {
@@ -394,10 +392,6 @@ func (r *Reactor) subscribeToBroadcastEvents() {
}
}
-func (r *Reactor) unsubscribeFromBroadcastEvents() {
- r.state.evsw.RemoveListener(listenerIDConsensus)
-}
-
func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep {
return &tmcons.NewRoundStep{
Height: rs.Height,
diff --git a/libs/events/events.go b/libs/events/events.go
index 636aa102d..5ab5961f6 100644
--- a/libs/events/events.go
+++ b/libs/events/events.go
@@ -50,8 +50,6 @@ type EventSwitch interface {
Stop()
AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error
- RemoveListenerForEvent(event string, listenerID string)
- RemoveListener(listenerID string)
}
type eventSwitch struct {
@@ -71,11 +69,8 @@ func NewEventSwitch(logger log.Logger) EventSwitch {
return evsw
}
-func (evsw *eventSwitch) OnStart(ctx context.Context) error {
- return nil
-}
-
-func (evsw *eventSwitch) OnStop() {}
+func (evsw *eventSwitch) OnStart(ctx context.Context) error { return nil }
+func (evsw *eventSwitch) OnStop() {}
func (evsw *eventSwitch) AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error {
// Get/Create eventCell and listener.
@@ -103,52 +98,6 @@ func (evsw *eventSwitch) AddListenerForEvent(listenerID, eventValue string, cb E
return nil
}
-func (evsw *eventSwitch) RemoveListener(listenerID string) {
- // Get and remove listener.
- evsw.mtx.RLock()
- listener := evsw.listeners[listenerID]
- evsw.mtx.RUnlock()
- if listener == nil {
- return
- }
-
- evsw.mtx.Lock()
- delete(evsw.listeners, listenerID)
- evsw.mtx.Unlock()
-
- // Remove callback for each event.
- listener.SetRemoved()
- for _, event := range listener.GetEvents() {
- evsw.RemoveListenerForEvent(event, listenerID)
- }
-}
-
-func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) {
- // Get eventCell
- evsw.mtx.Lock()
- eventCell := evsw.eventCells[event]
- evsw.mtx.Unlock()
-
- if eventCell == nil {
- return
- }
-
- // Remove listenerID from eventCell
- numListeners := eventCell.RemoveListener(listenerID)
-
- // Maybe garbage collect eventCell.
- if numListeners == 0 {
- // Lock again and double check.
- evsw.mtx.Lock() // OUTER LOCK
- eventCell.mtx.Lock() // INNER LOCK
- if len(eventCell.listeners) == 0 {
- delete(evsw.eventCells, event)
- }
- eventCell.mtx.Unlock() // INNER LOCK
- evsw.mtx.Unlock() // OUTER LOCK
- }
-}
-
func (evsw *eventSwitch) FireEvent(ctx context.Context, event string, data EventData) {
// Get the eventCell
evsw.mtx.RLock()
diff --git a/libs/events/events_test.go b/libs/events/events_test.go
index 6d0c8b4e7..15f208e06 100644
--- a/libs/events/events_test.go
+++ b/libs/events/events_test.go
@@ -7,7 +7,6 @@ import (
"testing"
"time"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/log"
@@ -28,8 +27,6 @@ func TestAddListenerForEventFireOnce(t *testing.T) {
messages := make(chan EventData)
require.NoError(t, evsw.AddListenerForEvent("listener", "event",
func(ctx context.Context, data EventData) error {
- // test there's no deadlock if we remove the listener inside a callback
- evsw.RemoveListener("listener")
select {
case messages <- data:
return nil
@@ -234,171 +231,7 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) {
}
}
-func TestAddAndRemoveListenerConcurrency(t *testing.T) {
- var (
- stopInputEvent = false
- roundCount = 2000
- )
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- logger := log.NewTestingLogger(t)
-
- evsw := NewEventSwitch(logger)
- require.NoError(t, evsw.Start(ctx))
- t.Cleanup(evsw.Wait)
-
- done1 := make(chan struct{})
- done2 := make(chan struct{})
-
- // Must be executed concurrently to uncover the data race.
- // 1. RemoveListener
- go func() {
- defer close(done1)
- for i := 0; i < roundCount; i++ {
- evsw.RemoveListener("listener")
- }
- }()
-
- // 2. AddListenerForEvent
- go func() {
- defer close(done2)
- for i := 0; i < roundCount; i++ {
- index := i
- // we explicitly ignore errors here, since the listener will sometimes be removed
- // (that's what we're testing)
- _ = evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index),
- func(ctx context.Context, data EventData) error {
- t.Errorf("should not run callback for %d.\n", index)
- stopInputEvent = true
- return nil
- })
- }
- }()
-
- <-done1
- <-done2
-
- evsw.RemoveListener("listener") // remove the last listener
-
- for i := 0; i < roundCount && !stopInputEvent; i++ {
- evsw.FireEvent(ctx, fmt.Sprintf("event%d", i), uint64(1001))
- }
-}
-
-// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to
-// two events, fires a thousand integers for the first event, then unsubscribes
-// the listener and fires a thousand integers for the second event.
-func TestAddAndRemoveListener(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- logger := log.NewTestingLogger(t)
- evsw := NewEventSwitch(logger)
- require.NoError(t, evsw.Start(ctx))
- t.Cleanup(evsw.Wait)
-
- doneSum1 := make(chan uint64)
- doneSum2 := make(chan uint64)
- doneSending1 := make(chan uint64)
- doneSending2 := make(chan uint64)
- numbers1 := make(chan uint64, 4)
- numbers2 := make(chan uint64, 4)
- // subscribe two listener to three events
- require.NoError(t, evsw.AddListenerForEvent("listener", "event1",
- func(ctx context.Context, data EventData) error {
- select {
- case numbers1 <- data.(uint64):
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
- }))
- require.NoError(t, evsw.AddListenerForEvent("listener", "event2",
- func(ctx context.Context, data EventData) error {
- select {
- case numbers2 <- data.(uint64):
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
- }))
- // collect received events for event1
- go sumReceivedNumbers(numbers1, doneSum1)
- // collect received events for event2
- go sumReceivedNumbers(numbers2, doneSum2)
- // go fire events
- go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1))
- checkSumEvent1 := <-doneSending1
- // after sending all event1, unsubscribe for all events
- evsw.RemoveListener("listener")
- go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001))
- checkSumEvent2 := <-doneSending2
- close(numbers1)
- close(numbers2)
- eventSum1 := <-doneSum1
- eventSum2 := <-doneSum2
- if checkSumEvent1 != eventSum1 ||
- // correct value asserted by preceding tests, suffices to be non-zero
- checkSumEvent2 == uint64(0) ||
- eventSum2 != uint64(0) {
- t.Errorf("not all messages sent were received or unsubscription did not register.\n")
- }
-}
-
-// TestRemoveListener does basic tests on adding and removing
-func TestRemoveListener(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- logger := log.NewTestingLogger(t)
-
- evsw := NewEventSwitch(logger)
- require.NoError(t, evsw.Start(ctx))
- t.Cleanup(evsw.Wait)
-
- count := 10
- sum1, sum2 := 0, 0
- // add some listeners and make sure they work
- require.NoError(t, evsw.AddListenerForEvent("listener", "event1",
- func(ctx context.Context, data EventData) error {
- sum1++
- return nil
- }))
- require.NoError(t, evsw.AddListenerForEvent("listener", "event2",
- func(ctx context.Context, data EventData) error {
- sum2++
- return nil
- }))
-
- for i := 0; i < count; i++ {
- evsw.FireEvent(ctx, "event1", true)
- evsw.FireEvent(ctx, "event2", true)
- }
- assert.Equal(t, count, sum1)
- assert.Equal(t, count, sum2)
-
- // remove one by event and make sure it is gone
- evsw.RemoveListenerForEvent("event2", "listener")
- for i := 0; i < count; i++ {
- evsw.FireEvent(ctx, "event1", true)
- evsw.FireEvent(ctx, "event2", true)
- }
- assert.Equal(t, count*2, sum1)
- assert.Equal(t, count, sum2)
-
- // remove the listener entirely and make sure both gone
- evsw.RemoveListener("listener")
- for i := 0; i < count; i++ {
- evsw.FireEvent(ctx, "event1", true)
- evsw.FireEvent(ctx, "event2", true)
- }
- assert.Equal(t, count*2, sum1)
- assert.Equal(t, count, sum2)
-}
-
-// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two
+// TestManagerLiistenersAsync sets up an EventSwitch, subscribes two
// listeners to three events, and fires a thousand integers for each event.
// These two listeners serve as the baseline validation while other listeners
// are randomly subscribed and unsubscribed.
@@ -408,7 +241,7 @@ func TestRemoveListener(t *testing.T) {
// at that point subscribed to.
// NOTE: it is important to run this test with race conditions tracking on,
// `go test -race`, to examine for possible race conditions.
-func TestRemoveListenersAsync(t *testing.T) {
+func TestManageListenersAsync(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
@@ -494,18 +327,9 @@ func TestRemoveListenersAsync(t *testing.T) {
func(context.Context, EventData) error { return nil })
}
}
- removeListenersStress := func() {
- r2 := rand.New(rand.NewSource(time.Now().Unix()))
- r2.Seed(time.Now().UnixNano())
- for k := uint16(0); k < 80; k++ {
- listenerNumber := r2.Intn(100) + 3
- go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber))
- }
- }
addListenersStress()
// go fire events
go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1))
- removeListenersStress()
go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001))
go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001))
checkSumEvent1 := <-doneSending1
From 07b46d5a054365b576b8dbab9744c944a4d6e4f4 Mon Sep 17 00:00:00 2001
From: Sam Kleinman
Date: Thu, 17 Mar 2022 13:30:13 -0400
Subject: [PATCH 4/6] blocksync: drop redundant shutdown mechanisms (#8136)
---
internal/blocksync/pool.go | 16 ----------------
internal/blocksync/reactor.go | 2 --
2 files changed, 18 deletions(-)
diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go
index 4c905c660..7f133a7a1 100644
--- a/internal/blocksync/pool.go
+++ b/internal/blocksync/pool.go
@@ -86,7 +86,6 @@ type BlockPool struct {
requestsCh chan<- BlockRequest
errorsCh chan<- peerError
- exitedCh chan struct{}
startHeight int64
lastHundredBlockTimeStamp time.Time
@@ -109,7 +108,6 @@ func NewBlockPool(
height: start,
startHeight: start,
numPending: 0,
- exitedCh: make(chan struct{}),
requestsCh: requestsCh,
errorsCh: errorsCh,
lastSyncRate: 0,
@@ -125,11 +123,6 @@ func (pool *BlockPool) OnStart(ctx context.Context) error {
pool.lastHundredBlockTimeStamp = pool.lastAdvance
go pool.makeRequestersRoutine(ctx)
- go func() {
- defer close(pool.exitedCh)
- pool.Wait()
- }()
-
return nil
}
@@ -637,12 +630,6 @@ func (bpr *bpRequester) redo(peerID types.NodeID) {
// Responsible for making more requests as necessary
// Returns only when a block is found (e.g. AddBlock() is called)
func (bpr *bpRequester) requestRoutine(ctx context.Context) {
- bprPoolDone := make(chan struct{})
- go func() {
- defer close(bprPoolDone)
- bpr.pool.Wait()
- }()
-
OUTER_LOOP:
for {
// Pick a peer to send request to.
@@ -670,9 +657,6 @@ OUTER_LOOP:
select {
case <-ctx.Done():
return
- case <-bpr.pool.exitedCh:
- bpr.Stop()
- return
case peerID := <-bpr.redoCh:
if peerID == bpr.peerID {
bpr.reset()
diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go
index cf1a10623..89c8c642b 100644
--- a/internal/blocksync/reactor.go
+++ b/internal/blocksync/reactor.go
@@ -445,8 +445,6 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool) {
select {
case <-ctx.Done():
return
- case <-r.pool.exitedCh:
- return
case <-switchToConsensusTicker.C:
var (
height, numPending, lenRequesters = r.pool.GetStatus()
From 1dd8807cc32e2ca5d59ec032fb708809119a6129 Mon Sep 17 00:00:00 2001
From: Sam Kleinman
Date: Thu, 17 Mar 2022 13:45:27 -0400
Subject: [PATCH 5/6] mempool: test harness should expose application (#8143)
This is minor, but I was trying to write a test and realized that the
application reference in the harness isn't actually used, which is
quite confusing.
---
internal/mempool/mempool_bench_test.go | 10 ++-
internal/mempool/mempool_test.go | 105 +++++++++++++++++++------
internal/mempool/reactor_test.go | 34 +++++---
3 files changed, 113 insertions(+), 36 deletions(-)
diff --git a/internal/mempool/mempool_bench_test.go b/internal/mempool/mempool_bench_test.go
index 088af174a..843e42e87 100644
--- a/internal/mempool/mempool_bench_test.go
+++ b/internal/mempool/mempool_bench_test.go
@@ -8,15 +8,23 @@ import (
"time"
"github.com/stretchr/testify/require"
+ abciclient "github.com/tendermint/tendermint/abci/client"
+ "github.com/tendermint/tendermint/abci/example/kvstore"
+ "github.com/tendermint/tendermint/libs/log"
)
func BenchmarkTxMempool_CheckTx(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
+ client := abciclient.NewLocalClient(log.NewNopLogger(), kvstore.NewApplication())
+ if err := client.Start(ctx); err != nil {
+ b.Fatal(err)
+ }
+
// setup the cache and the mempool number for hitting GetEvictableTxs during the
// benchmark. 5000 is the current default mempool size in the TM config.
- txmp := setup(ctx, b, 10000)
+ txmp := setup(ctx, b, client, 10000)
txmp.config.Size = 5000
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go
index e4f604cb1..165e4bd20 100644
--- a/internal/mempool/mempool_test.go
+++ b/internal/mempool/mempool_test.go
@@ -72,30 +72,18 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
}
}
-func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
+func setup(ctx context.Context, t testing.TB, app abciclient.Client, cacheSize int, options ...TxMempoolOption) *TxMempool {
t.Helper()
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
-
logger := log.TestingLogger()
- conn := abciclient.NewLocalClient(logger, &application{
- kvstore.NewApplication(),
- })
-
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
require.NoError(t, err)
cfg.Mempool.CacheSize = cacheSize
- require.NoError(t, conn.Start(ctx))
- t.Cleanup(func() {
- os.RemoveAll(cfg.RootDir)
- cancel()
- conn.Wait()
- })
+ t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })
- return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, conn, options...)
+ return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, app, options...)
}
func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
@@ -137,7 +125,13 @@ func TestTxMempool_TxsAvailable(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 0)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 0)
txmp.EnableTxsAvailable()
ensureNoTxFire := func() {
@@ -194,7 +188,13 @@ func TestTxMempool_Size(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 0)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 0)
txs := checkTxs(ctx, t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@@ -221,7 +221,13 @@ func TestTxMempool_Flush(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 0)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 0)
txs := checkTxs(ctx, t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@@ -249,7 +255,13 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 0)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 0)
tTxs := checkTxs(ctx, t, txmp, 100, 0) // all txs request 1 gas unit
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@@ -302,7 +314,13 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 0)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 0)
tTxs := checkTxs(ctx, t, txmp, 100, 0)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@@ -354,7 +372,12 @@ func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 0)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+ txmp := setup(ctx, t, client, 0)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxBytes+1)
@@ -374,7 +397,13 @@ func TestTxMempool_CheckTxSamePeer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 100)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 100)
peerID := uint16(1)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
@@ -392,7 +421,13 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 100)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 100)
peerID := uint16(1)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
@@ -417,7 +452,13 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 100)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 100)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
checkTxDone := make(chan struct{})
@@ -484,7 +525,13 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- txmp := setup(ctx, t, 500)
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
+ txmp := setup(ctx, t, client, 500)
txmp.height = 100
txmp.config.TTLNumBlocks = 10
@@ -556,10 +603,16 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
+ client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
+ if err := client.Start(ctx); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(client.Wait)
+
postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
return testCase.err
}
- txmp := setup(ctx, t, 0, WithPostCheck(postCheckFn))
+ txmp := setup(ctx, t, client, 0, WithPostCheck(postCheckFn))
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxBytes-1)
_, err := rng.Read(tx)
diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go
index 04e51ca8d..dab63af73 100644
--- a/internal/mempool/reactor_test.go
+++ b/internal/mempool/reactor_test.go
@@ -13,6 +13,7 @@ import (
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/require"
+ abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
@@ -39,7 +40,7 @@ type reactorTestSuite struct {
nodes []types.NodeID
}
-func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) *reactorTestSuite {
+func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNodes int, chBuf uint) *reactorTestSuite {
t.Helper()
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
@@ -63,7 +64,11 @@ func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint)
for nodeID := range rts.network.Nodes {
rts.kvstores[nodeID] = kvstore.NewApplication()
- mempool := setup(ctx, t, 0)
+ client := abciclient.NewLocalClient(logger, rts.kvstores[nodeID])
+ require.NoError(t, client.Start(ctx))
+ t.Cleanup(client.Wait)
+
+ mempool := setup(ctx, t, client, 0)
rts.mempools[nodeID] = mempool
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
@@ -151,7 +156,9 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) {
defer cancel()
const numNodes = 2
- rts := setupReactors(ctx, t, numNodes, 0)
+
+ logger := log.NewNopLogger()
+ rts := setupReactors(ctx, t, logger, numNodes, 0)
observePanic := func(r interface{}) {
t.Fatal("panic detected in reactor")
@@ -194,7 +201,9 @@ func TestReactorBroadcastTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rts := setupReactors(ctx, t, numNodes, uint(numTxs))
+ logger := log.NewNopLogger()
+
+ rts := setupReactors(ctx, t, logger, numNodes, uint(numTxs))
primary := rts.nodes[0]
secondaries := rts.nodes[1:]
@@ -218,7 +227,8 @@ func TestReactorConcurrency(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rts := setupReactors(ctx, t, numNodes, 0)
+ logger := log.NewNopLogger()
+ rts := setupReactors(ctx, t, logger, numNodes, 0)
primary := rts.nodes[0]
secondary := rts.nodes[1]
@@ -276,7 +286,8 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rts := setupReactors(ctx, t, numNodes, uint(numTxs))
+ logger := log.NewNopLogger()
+ rts := setupReactors(ctx, t, logger, numNodes, uint(numTxs))
primary := rts.nodes[0]
secondary := rts.nodes[1]
@@ -300,7 +311,9 @@ func TestReactor_MaxTxBytes(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rts := setupReactors(ctx, t, numNodes, 0)
+ logger := log.NewNopLogger()
+
+ rts := setupReactors(ctx, t, logger, numNodes, 0)
primary := rts.nodes[0]
secondary := rts.nodes[1]
@@ -336,7 +349,8 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rts := setupReactors(ctx, t, 1, MaxActiveIDs+1)
+ logger := log.NewNopLogger()
+ rts := setupReactors(ctx, t, logger, 1, MaxActiveIDs+1)
nodeID := rts.nodes[0]
@@ -388,7 +402,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rts := setupReactors(ctx, t, 2, 2)
+ logger := log.NewNopLogger()
+
+ rts := setupReactors(ctx, t, logger, 2, 2)
primary := rts.nodes[0]
secondary := rts.nodes[1]
From 02c7199eece1dc9295a44e80c229f660c61df13b Mon Sep 17 00:00:00 2001
From: William Banfield <4561443+williambanfield@users.noreply.github.com>
Date: Thu, 17 Mar 2022 14:49:37 -0400
Subject: [PATCH 6/6] types: update synchrony params to match checked in proto
(#8142)
The `.proto` file do not have the `nullable = false` annotation present on the `SynchronyParams` durations. This pull request updates the `SynchronyParams` to match the checked in proto files. Note, this does not make the code buildable against the latest protos. This pull request was achieved by checking out all files _not relevant_ to the `SynchronyParams` and removing the new `TimeoutParams` from the the `params.proto` file. Future updates will add these back.
This pull request also adds a `nil` check to the `pbParams.Synchrony` field in `ConsensusParamsFromProto`. Old versions of Tendermint will not have the `Synchrony` parameters filled in so this code would panic on startup.
We will fill in the empty fields with defaults, but per https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-009-consensus-parameter-upgrades.md#only-update-hashedparams-on-hash-breaking-releases we will keep out of the hash during this release.
---
proto/tendermint/statesync/message_test.go | 9 +-
proto/tendermint/types/params.pb.go | 164 +++++++++++++--------
types/params.go | 27 ++--
types/params_test.go | 8 +-
4 files changed, 130 insertions(+), 78 deletions(-)
diff --git a/proto/tendermint/statesync/message_test.go b/proto/tendermint/statesync/message_test.go
index a0b241615..1db421aca 100644
--- a/proto/tendermint/statesync/message_test.go
+++ b/proto/tendermint/statesync/message_test.go
@@ -3,6 +3,7 @@ package statesync_test
import (
"encoding/hex"
"testing"
+ "time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
@@ -204,8 +205,8 @@ func TestStateSyncVectors(t *testing.T) {
AppVersion: 11,
},
Synchrony: &tmproto.SynchronyParams{
- MessageDelay: 550,
- Precision: 90,
+ MessageDelay: durationPtr(550),
+ Precision: durationPtr(90),
},
},
},
@@ -224,3 +225,7 @@ func TestStateSyncVectors(t *testing.T) {
require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
}
}
+
+func durationPtr(t time.Duration) *time.Duration {
+ return &t
+}
diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go
index 32689932c..ff55379df 100644
--- a/proto/tendermint/types/params.pb.go
+++ b/proto/tendermint/types/params.pb.go
@@ -381,9 +381,17 @@ func (m *HashedParams) GetBlockMaxGas() int64 {
return 0
}
+// SynchronyParams configure the bounds under which a proposed block's timestamp is considered valid.
+// These parameters are part of the proposer-based timestamps algorithm. For more information,
+// see the specification of proposer-based timestamps:
+// https://github.com/tendermint/tendermint/tree/master/spec/consensus/proposer-based-timestamp
type SynchronyParams struct {
- MessageDelay time.Duration `protobuf:"bytes,1,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay"`
- Precision time.Duration `protobuf:"bytes,2,opt,name=precision,proto3,stdduration" json:"precision"`
+ // message_delay bounds how long a proposal message may take to reach all validators on a newtork
+ // and still be considered valid.
+ MessageDelay *time.Duration `protobuf:"bytes,1,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay,omitempty"`
+ // precision bounds how skewed a proposer's clock may be from any validator
+ // on the network while still producing valid proposals.
+ Precision *time.Duration `protobuf:"bytes,2,opt,name=precision,proto3,stdduration" json:"precision,omitempty"`
}
func (m *SynchronyParams) Reset() { *m = SynchronyParams{} }
@@ -419,18 +427,18 @@ func (m *SynchronyParams) XXX_DiscardUnknown() {
var xxx_messageInfo_SynchronyParams proto.InternalMessageInfo
-func (m *SynchronyParams) GetMessageDelay() time.Duration {
+func (m *SynchronyParams) GetMessageDelay() *time.Duration {
if m != nil {
return m.MessageDelay
}
- return 0
+ return nil
}
-func (m *SynchronyParams) GetPrecision() time.Duration {
+func (m *SynchronyParams) GetPrecision() *time.Duration {
if m != nil {
return m.Precision
}
- return 0
+ return nil
}
func init() {
@@ -446,43 +454,43 @@ func init() {
func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) }
var fileDescriptor_e12598271a686f57 = []byte{
- // 561 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4d, 0x6b, 0xd4, 0x40,
- 0x18, 0xc7, 0x37, 0xdd, 0xbe, 0xec, 0x3e, 0xdb, 0xed, 0x96, 0x41, 0x30, 0x56, 0x9a, 0x5d, 0x73,
- 0x90, 0x82, 0x90, 0x88, 0x45, 0x44, 0x10, 0xa4, 0xdb, 0x8a, 0x05, 0xa9, 0x48, 0x7c, 0x39, 0xf4,
- 0x12, 0x26, 0xbb, 0x63, 0x36, 0x74, 0x93, 0x19, 0x32, 0xc9, 0xb2, 0xf9, 0x16, 0x1e, 0x3d, 0x79,
- 0xd6, 0x8f, 0xe1, 0xad, 0xc7, 0x1e, 0x3d, 0xa9, 0xec, 0x7e, 0x11, 0x99, 0xc9, 0x4c, 0xd3, 0xdd,
- 0x2a, 0xd8, 0x5b, 0x32, 0xcf, 0xef, 0x97, 0x87, 0xf9, 0x3f, 0x93, 0x81, 0xdd, 0x8c, 0x24, 0x43,
- 0x92, 0xc6, 0x51, 0x92, 0xb9, 0x59, 0xc1, 0x08, 0x77, 0x19, 0x4e, 0x71, 0xcc, 0x1d, 0x96, 0xd2,
- 0x8c, 0xa2, 0xed, 0xaa, 0xec, 0xc8, 0xf2, 0xce, 0xad, 0x90, 0x86, 0x54, 0x16, 0x5d, 0xf1, 0x54,
- 0x72, 0x3b, 0x56, 0x48, 0x69, 0x38, 0x26, 0xae, 0x7c, 0x0b, 0xf2, 0x8f, 0xee, 0x30, 0x4f, 0x71,
- 0x16, 0xd1, 0xa4, 0xac, 0xdb, 0xdf, 0x57, 0xa0, 0x73, 0x48, 0x13, 0x4e, 0x12, 0x9e, 0xf3, 0x37,
- 0xb2, 0x03, 0xda, 0x87, 0xb5, 0x60, 0x4c, 0x07, 0x67, 0xa6, 0xd1, 0x33, 0xf6, 0x5a, 0x8f, 0x76,
- 0x9d, 0xe5, 0x5e, 0x4e, 0x5f, 0x94, 0x4b, 0xda, 0x2b, 0x59, 0xf4, 0x0c, 0x1a, 0x64, 0x12, 0x0d,
- 0x49, 0x32, 0x20, 0xe6, 0x8a, 0xf4, 0x7a, 0xd7, 0xbd, 0x17, 0x8a, 0x50, 0xea, 0xa5, 0x81, 0x9e,
- 0x43, 0x73, 0x82, 0xc7, 0xd1, 0x10, 0x67, 0x34, 0x35, 0xeb, 0x52, 0xbf, 0x77, 0x5d, 0xff, 0xa0,
- 0x11, 0xe5, 0x57, 0x0e, 0x7a, 0x0a, 0x1b, 0x13, 0x92, 0xf2, 0x88, 0x26, 0xe6, 0xaa, 0xd4, 0xbb,
- 0x7f, 0xd1, 0x4b, 0x40, 0xc9, 0x9a, 0x17, 0xbd, 0x79, 0x91, 0x0c, 0x46, 0x29, 0x4d, 0x0a, 0x73,
- 0xed, 0x5f, 0xbd, 0xdf, 0x6a, 0x44, 0xf7, 0xbe, 0x74, 0xec, 0x43, 0x68, 0x5d, 0x09, 0x04, 0xdd,
- 0x85, 0x66, 0x8c, 0xa7, 0x7e, 0x50, 0x64, 0x84, 0xcb, 0x08, 0xeb, 0x5e, 0x23, 0xc6, 0xd3, 0xbe,
- 0x78, 0x47, 0xb7, 0x61, 0x43, 0x14, 0x43, 0xcc, 0x65, 0x4a, 0x75, 0x6f, 0x3d, 0xc6, 0xd3, 0x97,
- 0x98, 0xdb, 0xdf, 0x0c, 0xd8, 0x5a, 0x8c, 0x07, 0x3d, 0x00, 0x24, 0x58, 0x1c, 0x12, 0x3f, 0xc9,
- 0x63, 0x5f, 0xe6, 0xac, 0xbf, 0xd8, 0x89, 0xf1, 0xf4, 0x20, 0x24, 0xaf, 0xf3, 0x58, 0xb6, 0xe6,
- 0xe8, 0x04, 0xb6, 0x35, 0xac, 0x47, 0xac, 0xe6, 0x70, 0xc7, 0x29, 0xcf, 0x80, 0xa3, 0xcf, 0x80,
- 0x73, 0xa4, 0x80, 0x7e, 0xe3, 0xfc, 0x67, 0xb7, 0xf6, 0xf9, 0x57, 0xd7, 0xf0, 0xb6, 0xca, 0xef,
- 0xe9, 0xca, 0xe2, 0x26, 0xea, 0x8b, 0x9b, 0xb0, 0x1f, 0x43, 0x67, 0x69, 0x14, 0xc8, 0x86, 0x36,
- 0xcb, 0x03, 0xff, 0x8c, 0x14, 0xbe, 0xcc, 0xcb, 0x34, 0x7a, 0xf5, 0xbd, 0xa6, 0xd7, 0x62, 0x79,
- 0xf0, 0x8a, 0x14, 0xef, 0xc4, 0x92, 0xfd, 0x10, 0xda, 0x0b, 0x23, 0x40, 0x5d, 0x68, 0x61, 0xc6,
- 0x7c, 0x3d, 0x38, 0xb1, 0xb3, 0x55, 0x0f, 0x30, 0x63, 0x0a, 0xb3, 0x4f, 0x61, 0xf3, 0x18, 0xf3,
- 0x11, 0x19, 0x2a, 0xe1, 0x3e, 0x74, 0x64, 0x0a, 0xfe, 0x72, 0xc0, 0x6d, 0xb9, 0x7c, 0xa2, 0x53,
- 0xb6, 0xa1, 0x5d, 0x71, 0x55, 0xd6, 0x2d, 0x4d, 0x89, 0xc0, 0xbf, 0x18, 0xd0, 0x59, 0x1a, 0x2a,
- 0x3a, 0x86, 0x76, 0x4c, 0x38, 0x97, 0x21, 0x92, 0x31, 0x2e, 0xd4, 0x1f, 0xf0, 0x5f, 0x09, 0x6e,
- 0x2a, 0xf3, 0x48, 0x88, 0xe8, 0x00, 0x9a, 0x2c, 0x25, 0x83, 0x88, 0xdf, 0x70, 0x0e, 0x95, 0xd5,
- 0x7f, 0xff, 0x75, 0x66, 0x19, 0xe7, 0x33, 0xcb, 0xb8, 0x98, 0x59, 0xc6, 0xef, 0x99, 0x65, 0x7c,
- 0x9a, 0x5b, 0xb5, 0x8b, 0xb9, 0x55, 0xfb, 0x31, 0xb7, 0x6a, 0xa7, 0x4f, 0xc2, 0x28, 0x1b, 0xe5,
- 0x81, 0x33, 0xa0, 0xb1, 0x7b, 0xf5, 0xaa, 0xa8, 0x1e, 0xcb, 0xbb, 0x60, 0xf9, 0x1a, 0x09, 0xd6,
- 0xe5, 0xfa, 0xfe, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x26, 0x8a, 0x0b, 0x61, 0x04, 0x00,
- 0x00,
+ // 565 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4d, 0x8b, 0xd3, 0x40,
+ 0x18, 0xc7, 0x9b, 0xed, 0xbe, 0xb4, 0x4f, 0xb7, 0xdb, 0x65, 0x10, 0x8c, 0x2b, 0x9b, 0xd6, 0x1c,
+ 0x64, 0x41, 0x48, 0xc4, 0x45, 0x44, 0x50, 0xc4, 0x6e, 0x45, 0x41, 0x56, 0x24, 0xbe, 0x1c, 0xf6,
+ 0x12, 0x26, 0xed, 0x98, 0x86, 0x6d, 0x32, 0x43, 0x26, 0x29, 0xcd, 0xb7, 0xf0, 0x24, 0x7e, 0x04,
+ 0xfd, 0x18, 0xde, 0xf6, 0xb8, 0x47, 0x4f, 0x2a, 0xed, 0x17, 0x91, 0x99, 0xcc, 0x6c, 0xb6, 0x5d,
+ 0x15, 0x6f, 0xc9, 0x3c, 0xff, 0xdf, 0x3c, 0xcc, 0xef, 0x49, 0x06, 0xf6, 0x33, 0x92, 0x8c, 0x48,
+ 0x1a, 0x47, 0x49, 0xe6, 0x66, 0x05, 0x23, 0xdc, 0x65, 0x38, 0xc5, 0x31, 0x77, 0x58, 0x4a, 0x33,
+ 0x8a, 0x76, 0xab, 0xb2, 0x23, 0xcb, 0x7b, 0xd7, 0x42, 0x1a, 0x52, 0x59, 0x74, 0xc5, 0x53, 0x99,
+ 0xdb, 0xb3, 0x42, 0x4a, 0xc3, 0x09, 0x71, 0xe5, 0x5b, 0x90, 0x7f, 0x70, 0x47, 0x79, 0x8a, 0xb3,
+ 0x88, 0x26, 0x65, 0xdd, 0xfe, 0xb6, 0x06, 0x9d, 0x23, 0x9a, 0x70, 0x92, 0xf0, 0x9c, 0xbf, 0x96,
+ 0x1d, 0xd0, 0x21, 0x6c, 0x04, 0x13, 0x3a, 0x3c, 0x35, 0x8d, 0x9e, 0x71, 0xd0, 0xba, 0xb7, 0xef,
+ 0xac, 0xf6, 0x72, 0xfa, 0xa2, 0x5c, 0xa6, 0xbd, 0x32, 0x8b, 0x1e, 0x41, 0x83, 0x4c, 0xa3, 0x11,
+ 0x49, 0x86, 0xc4, 0x5c, 0x93, 0x5c, 0xef, 0x2a, 0xf7, 0x4c, 0x25, 0x14, 0x7a, 0x41, 0xa0, 0x27,
+ 0xd0, 0x9c, 0xe2, 0x49, 0x34, 0xc2, 0x19, 0x4d, 0xcd, 0xba, 0xc4, 0x6f, 0x5d, 0xc5, 0xdf, 0xeb,
+ 0x88, 0xe2, 0x2b, 0x06, 0x3d, 0x84, 0xad, 0x29, 0x49, 0x79, 0x44, 0x13, 0x73, 0x5d, 0xe2, 0xdd,
+ 0x3f, 0xe0, 0x65, 0x40, 0xc1, 0x3a, 0x2f, 0x7a, 0xf3, 0x22, 0x19, 0x8e, 0x53, 0x9a, 0x14, 0xe6,
+ 0xc6, 0xdf, 0x7a, 0xbf, 0xd1, 0x11, 0xdd, 0xfb, 0x82, 0xb1, 0x8f, 0xa0, 0x75, 0x49, 0x08, 0xba,
+ 0x09, 0xcd, 0x18, 0xcf, 0xfc, 0xa0, 0xc8, 0x08, 0x97, 0x0a, 0xeb, 0x5e, 0x23, 0xc6, 0xb3, 0xbe,
+ 0x78, 0x47, 0xd7, 0x61, 0x4b, 0x14, 0x43, 0xcc, 0xa5, 0xa5, 0xba, 0xb7, 0x19, 0xe3, 0xd9, 0x73,
+ 0xcc, 0xed, 0xaf, 0x06, 0xec, 0x2c, 0xeb, 0x41, 0x77, 0x00, 0x89, 0x2c, 0x0e, 0x89, 0x9f, 0xe4,
+ 0xb1, 0x2f, 0x3d, 0xeb, 0x1d, 0x3b, 0x31, 0x9e, 0x3d, 0x0d, 0xc9, 0xab, 0x3c, 0x96, 0xad, 0x39,
+ 0x3a, 0x86, 0x5d, 0x1d, 0xd6, 0x23, 0x56, 0x73, 0xb8, 0xe1, 0x94, 0xdf, 0x80, 0xa3, 0xbf, 0x01,
+ 0x67, 0xa0, 0x02, 0xfd, 0xc6, 0xd9, 0x8f, 0x6e, 0xed, 0xf3, 0xcf, 0xae, 0xe1, 0xed, 0x94, 0xfb,
+ 0xe9, 0xca, 0xf2, 0x21, 0xea, 0xcb, 0x87, 0xb0, 0xef, 0x43, 0x67, 0x65, 0x14, 0xc8, 0x86, 0x36,
+ 0xcb, 0x03, 0xff, 0x94, 0x14, 0xbe, 0xf4, 0x65, 0x1a, 0xbd, 0xfa, 0x41, 0xd3, 0x6b, 0xb1, 0x3c,
+ 0x78, 0x49, 0x8a, 0xb7, 0x62, 0xc9, 0xbe, 0x0b, 0xed, 0xa5, 0x11, 0xa0, 0x2e, 0xb4, 0x30, 0x63,
+ 0xbe, 0x1e, 0x9c, 0x38, 0xd9, 0xba, 0x07, 0x98, 0x31, 0x15, 0xb3, 0x4f, 0x60, 0xfb, 0x05, 0xe6,
+ 0x63, 0x32, 0x52, 0xc0, 0x6d, 0xe8, 0x48, 0x0b, 0xfe, 0xaa, 0xe0, 0xb6, 0x5c, 0x3e, 0xd6, 0x96,
+ 0x6d, 0x68, 0x57, 0xb9, 0xca, 0x75, 0x4b, 0xa7, 0x84, 0xf0, 0x4f, 0x06, 0x74, 0x56, 0x86, 0x8a,
+ 0x06, 0xd0, 0x8e, 0x09, 0xe7, 0x52, 0x22, 0x99, 0xe0, 0x42, 0xfd, 0x01, 0xff, 0x30, 0xb8, 0x2e,
+ 0xed, 0x6d, 0x2b, 0x6a, 0x20, 0x20, 0xf4, 0x18, 0x9a, 0x2c, 0x25, 0xc3, 0x88, 0xff, 0xd7, 0x0c,
+ 0xca, 0x1d, 0x2a, 0xa2, 0xff, 0xee, 0xcb, 0xdc, 0x32, 0xce, 0xe6, 0x96, 0x71, 0x3e, 0xb7, 0x8c,
+ 0x5f, 0x73, 0xcb, 0xf8, 0xb8, 0xb0, 0x6a, 0xe7, 0x0b, 0xab, 0xf6, 0x7d, 0x61, 0xd5, 0x4e, 0x1e,
+ 0x84, 0x51, 0x36, 0xce, 0x03, 0x67, 0x48, 0x63, 0xf7, 0xf2, 0x15, 0x51, 0x3d, 0x96, 0x77, 0xc0,
+ 0xea, 0xf5, 0x11, 0x6c, 0xca, 0xf5, 0xc3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x57, 0x89, 0x7c,
+ 0xd9, 0x59, 0x04, 0x00, 0x00,
}
func (this *ConsensusParams) Equal(that interface{}) bool {
@@ -677,10 +685,22 @@ func (this *SynchronyParams) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.MessageDelay != that1.MessageDelay {
+ if this.MessageDelay != nil && that1.MessageDelay != nil {
+ if *this.MessageDelay != *that1.MessageDelay {
+ return false
+ }
+ } else if this.MessageDelay != nil {
+ return false
+ } else if that1.MessageDelay != nil {
return false
}
- if this.Precision != that1.Precision {
+ if this.Precision != nil && that1.Precision != nil {
+ if *this.Precision != *that1.Precision {
+ return false
+ }
+ } else if this.Precision != nil {
+ return false
+ } else if that1.Precision != nil {
return false
}
return true
@@ -955,22 +975,26 @@ func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Precision):])
- if err7 != nil {
- return 0, err7
+ if m.Precision != nil {
+ n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):])
+ if err7 != nil {
+ return 0, err7
+ }
+ i -= n7
+ i = encodeVarintParams(dAtA, i, uint64(n7))
+ i--
+ dAtA[i] = 0x12
}
- i -= n7
- i = encodeVarintParams(dAtA, i, uint64(n7))
- i--
- dAtA[i] = 0x12
- n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MessageDelay):])
- if err8 != nil {
- return 0, err8
+ if m.MessageDelay != nil {
+ n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):])
+ if err8 != nil {
+ return 0, err8
+ }
+ i -= n8
+ i = encodeVarintParams(dAtA, i, uint64(n8))
+ i--
+ dAtA[i] = 0xa
}
- i -= n8
- i = encodeVarintParams(dAtA, i, uint64(n8))
- i--
- dAtA[i] = 0xa
return len(dAtA) - i, nil
}
@@ -1094,10 +1118,14 @@ func (m *SynchronyParams) Size() (n int) {
}
var l int
_ = l
- l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MessageDelay)
- n += 1 + l + sovParams(uint64(l))
- l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Precision)
- n += 1 + l + sovParams(uint64(l))
+ if m.MessageDelay != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay)
+ n += 1 + l + sovParams(uint64(l))
+ }
+ if m.Precision != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision)
+ n += 1 + l + sovParams(uint64(l))
+ }
return n
}
@@ -1843,7 +1871,10 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil {
+ if m.MessageDelay == nil {
+ m.MessageDelay = new(time.Duration)
+ }
+ if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -1876,7 +1907,10 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Precision, dAtA[iNdEx:postIndex]); err != nil {
+ if m.Precision == nil {
+ m.Precision = new(time.Duration)
+ }
+ if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Precision, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
diff --git a/types/params.go b/types/params.go
index fc9c4aaad..a74ae3762 100644
--- a/types/params.go
+++ b/types/params.go
@@ -275,8 +275,12 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa
res.Version.AppVersion = params2.Version.AppVersion
}
if params2.Synchrony != nil {
- res.Synchrony.Precision = params2.Synchrony.Precision
- res.Synchrony.MessageDelay = params2.Synchrony.MessageDelay
+ if params2.Synchrony.MessageDelay != nil {
+ res.Synchrony.MessageDelay = *params2.Synchrony.GetMessageDelay()
+ }
+ if params2.Synchrony.Precision != nil {
+ res.Synchrony.Precision = *params2.Synchrony.GetPrecision()
+ }
}
return res
}
@@ -299,14 +303,14 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams {
AppVersion: params.Version.AppVersion,
},
Synchrony: &tmproto.SynchronyParams{
- MessageDelay: params.Synchrony.MessageDelay,
- Precision: params.Synchrony.Precision,
+ MessageDelay: ¶ms.Synchrony.MessageDelay,
+ Precision: ¶ms.Synchrony.Precision,
},
}
}
func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams {
- return ConsensusParams{
+ c := ConsensusParams{
Block: BlockParams{
MaxBytes: pbParams.Block.MaxBytes,
MaxGas: pbParams.Block.MaxGas,
@@ -322,9 +326,14 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams
Version: VersionParams{
AppVersion: pbParams.Version.AppVersion,
},
- Synchrony: SynchronyParams{
- MessageDelay: pbParams.Synchrony.MessageDelay,
- Precision: pbParams.Synchrony.Precision,
- },
}
+ if pbParams.Synchrony != nil {
+ if pbParams.Synchrony.MessageDelay != nil {
+ c.Synchrony.MessageDelay = *pbParams.Synchrony.GetMessageDelay()
+ }
+ if pbParams.Synchrony.Precision != nil {
+ c.Synchrony.Precision = *pbParams.Synchrony.GetPrecision()
+ }
+ }
+ return c
}
diff --git a/types/params_test.go b/types/params_test.go
index 0aaf1e2b9..41b5afc04 100644
--- a/types/params_test.go
+++ b/types/params_test.go
@@ -246,8 +246,8 @@ func TestConsensusParamsUpdate(t *testing.T) {
intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}),
updates: &tmproto.ConsensusParams{
Synchrony: &tmproto.SynchronyParams{
- Precision: time.Second * 2,
- MessageDelay: time.Second * 4,
+ Precision: durationPtr(time.Second * 2),
+ MessageDelay: durationPtr(time.Second * 4),
},
},
updatedParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: 2 * time.Second, messageDelay: 4 * time.Second}),
@@ -339,3 +339,7 @@ func TestProto(t *testing.T) {
}
}
+
+func durationPtr(t time.Duration) *time.Duration {
+ return &t
+}