Browse Source

Merge branch 'master' into wb/pbts-runbook

pull/8129/head
William Banfield 3 years ago
committed by GitHub
parent
commit
82d0b99b29
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 263 additions and 464 deletions
  1. +1
    -1
      go.mod
  2. +2
    -1
      go.sum
  3. +0
    -16
      internal/blocksync/pool.go
  4. +0
    -2
      internal/blocksync/reactor.go
  5. +0
    -6
      internal/consensus/reactor.go
  6. +1
    -1
      internal/libs/clist/bench_test.go
  7. +11
    -67
      internal/libs/clist/clist.go
  8. +1
    -25
      internal/libs/clist/clist_test.go
  9. +9
    -1
      internal/mempool/mempool_bench_test.go
  10. +79
    -26
      internal/mempool/mempool_test.go
  11. +25
    -9
      internal/mempool/reactor_test.go
  12. +2
    -53
      libs/events/events.go
  13. +2
    -178
      libs/events/events_test.go
  14. +7
    -2
      proto/tendermint/statesync/message_test.go
  15. +99
    -65
      proto/tendermint/types/params.pb.go
  16. +18
    -9
      types/params.go
  17. +6
    -2
      types/params_test.go

+ 1
- 1
go.mod View File

@ -28,7 +28,7 @@ require (
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.4.0
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.7.0
github.com/stretchr/testify v1.7.1
github.com/tendermint/tm-db v0.6.6
github.com/vektra/mockery/v2 v2.10.0
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce


+ 2
- 1
go.sum View File

@ -971,8 +971,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04=


+ 0
- 16
internal/blocksync/pool.go View File

@ -86,7 +86,6 @@ type BlockPool struct {
requestsCh chan<- BlockRequest
errorsCh chan<- peerError
exitedCh chan struct{}
startHeight int64
lastHundredBlockTimeStamp time.Time
@ -109,7 +108,6 @@ func NewBlockPool(
height: start,
startHeight: start,
numPending: 0,
exitedCh: make(chan struct{}),
requestsCh: requestsCh,
errorsCh: errorsCh,
lastSyncRate: 0,
@ -125,11 +123,6 @@ func (pool *BlockPool) OnStart(ctx context.Context) error {
pool.lastHundredBlockTimeStamp = pool.lastAdvance
go pool.makeRequestersRoutine(ctx)
go func() {
defer close(pool.exitedCh)
pool.Wait()
}()
return nil
}
@ -637,12 +630,6 @@ func (bpr *bpRequester) redo(peerID types.NodeID) {
// Responsible for making more requests as necessary
// Returns only when a block is found (e.g. AddBlock() is called)
func (bpr *bpRequester) requestRoutine(ctx context.Context) {
bprPoolDone := make(chan struct{})
go func() {
defer close(bprPoolDone)
bpr.pool.Wait()
}()
OUTER_LOOP:
for {
// Pick a peer to send request to.
@ -670,9 +657,6 @@ OUTER_LOOP:
select {
case <-ctx.Done():
return
case <-bpr.pool.exitedCh:
bpr.Stop()
return
case peerID := <-bpr.redoCh:
if peerID == bpr.peerID {
bpr.reset()


+ 0
- 2
internal/blocksync/reactor.go View File

@ -445,8 +445,6 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool) {
select {
case <-ctx.Done():
return
case <-r.pool.exitedCh:
return
case <-switchToConsensusTicker.C:
var (
height, numPending, lenRequesters = r.pool.GetStatus()


+ 0
- 6
internal/consensus/reactor.go View File

@ -219,8 +219,6 @@ func (r *Reactor) OnStart(ctx context.Context) error {
// blocking until they all exit, as well as unsubscribing from events and stopping
// state.
func (r *Reactor) OnStop() {
r.unsubscribeFromBroadcastEvents()
r.state.Stop()
if !r.WaitSync() {
@ -394,10 +392,6 @@ func (r *Reactor) subscribeToBroadcastEvents() {
}
}
func (r *Reactor) unsubscribeFromBroadcastEvents() {
r.state.evsw.RemoveListener(listenerIDConsensus)
}
func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep {
return &tmcons.NewRoundStep{
Height: rs.Height,


+ 1
- 1
internal/libs/clist/bench_test.go View File

@ -12,7 +12,7 @@ func BenchmarkDetaching(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
start.removed = true
start.DetachNext()
start.detachNext()
start.DetachPrev()
tmp := nxt
nxt = nxt.Next()


+ 11
- 67
internal/libs/clist/clist.go View File

@ -44,7 +44,6 @@ waiting on NextWait() (since it's just a read operation).
type CElement struct {
mtx sync.RWMutex
prev *CElement
prevWaitCh chan struct{}
next *CElement
nextWaitCh chan struct{}
removed bool
@ -72,33 +71,6 @@ func (e *CElement) NextWait() *CElement {
}
}
// Blocking implementation of Prev().
// May return nil iff CElement was head and got removed.
func (e *CElement) PrevWait() *CElement {
for {
e.mtx.RLock()
prev := e.prev
removed := e.removed
signal := e.prevWaitCh
e.mtx.RUnlock()
if prev != nil || removed {
return prev
}
<-signal
}
}
// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does,
// channel will be closed.
func (e *CElement) PrevWaitChan() <-chan struct{} {
e.mtx.RLock()
defer e.mtx.RUnlock()
return e.prevWaitCh
}
// NextWaitChan can be used to wait until Next becomes not nil. Once it does,
// channel will be closed.
func (e *CElement) NextWaitChan() <-chan struct{} {
@ -131,7 +103,7 @@ func (e *CElement) Removed() bool {
return isRemoved
}
func (e *CElement) DetachNext() {
func (e *CElement) detachNext() {
e.mtx.Lock()
if !e.removed {
e.mtx.Unlock()
@ -153,7 +125,7 @@ func (e *CElement) DetachPrev() {
// NOTE: This function needs to be safe for
// concurrent goroutines waiting on nextWg.
func (e *CElement) SetNext(newNext *CElement) {
func (e *CElement) setNext(newNext *CElement) {
e.mtx.Lock()
oldNext := e.next
@ -174,30 +146,20 @@ func (e *CElement) SetNext(newNext *CElement) {
// NOTE: This function needs to be safe for
// concurrent goroutines waiting on prevWg
func (e *CElement) SetPrev(newPrev *CElement) {
func (e *CElement) setPrev(newPrev *CElement) {
e.mtx.Lock()
defer e.mtx.Unlock()
oldPrev := e.prev
e.prev = newPrev
if oldPrev != nil && newPrev == nil {
e.prevWaitCh = make(chan struct{})
}
if oldPrev == nil && newPrev != nil {
close(e.prevWaitCh)
}
}
func (e *CElement) SetRemoved() {
func (e *CElement) setRemoved() {
e.mtx.Lock()
defer e.mtx.Unlock()
e.removed = true
// This wakes up anyone waiting in either direction.
if e.prev == nil {
close(e.prevWaitCh)
}
// This wakes up anyone waiting.
if e.next == nil {
close(e.nextWaitCh)
}
@ -211,7 +173,6 @@ func (e *CElement) SetRemoved() {
// Panics if length grows beyond the max.
type CList struct {
mtx sync.RWMutex
wg *sync.WaitGroup
waitCh chan struct{}
head *CElement // first element
tail *CElement // last element
@ -250,7 +211,7 @@ func (l *CList) Front() *CElement {
return head
}
func (l *CList) FrontWait() *CElement {
func (l *CList) frontWait() *CElement {
// Loop until the head is non-nil else wait and try again
for {
l.mtx.RLock()
@ -273,22 +234,6 @@ func (l *CList) Back() *CElement {
return back
}
func (l *CList) BackWait() *CElement {
for {
l.mtx.RLock()
tail := l.tail
wg := l.wg
l.mtx.RUnlock()
if tail != nil {
return tail
}
wg.Wait()
// l.tail doesn't necessarily exist here.
// That's why we need to continue a for-loop.
}
}
// WaitChan can be used to wait until Front or Back becomes not nil. Once it
// does, channel will be closed.
func (l *CList) WaitChan() <-chan struct{} {
@ -305,7 +250,6 @@ func (l *CList) PushBack(v interface{}) *CElement {
// Construct a new element
e := &CElement{
prev: nil,
prevWaitCh: make(chan struct{}),
next: nil,
nextWaitCh: make(chan struct{}),
removed: false,
@ -326,8 +270,8 @@ func (l *CList) PushBack(v interface{}) *CElement {
l.head = e
l.tail = e
} else {
e.SetPrev(l.tail) // We must init e first.
l.tail.SetNext(e) // This will make e accessible.
e.setPrev(l.tail) // We must init e first.
l.tail.setNext(e) // This will make e accessible.
l.tail = e // Update the list.
}
l.mtx.Unlock()
@ -365,16 +309,16 @@ func (l *CList) Remove(e *CElement) interface{} {
if prev == nil {
l.head = next
} else {
prev.SetNext(next)
prev.setNext(next)
}
if next == nil {
l.tail = prev
} else {
next.SetPrev(prev)
next.setPrev(prev)
}
// Set .Done() on e, otherwise waiters will wait forever.
e.SetRemoved()
e.setRemoved()
return e.Value
}

+ 1
- 25
internal/libs/clist/clist_test.go View File

@ -218,7 +218,7 @@ func TestScanRightDeleteRandom(t *testing.T) {
default:
}
if el == nil {
el = l.FrontWait()
el = l.frontWait()
restartCounter++
}
el = el.Next()
@ -314,30 +314,6 @@ FOR_LOOP:
t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen)
}
// 4) test iterating backwards (PrevWaitChan and Prev)
prev := next
seen = 0
FOR_LOOP2:
for {
select {
case <-prev.PrevWaitChan():
prev = prev.Prev()
seen++
if prev == nil {
t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem")
}
if pushed == seen {
break FOR_LOOP2
}
case <-time.After(250 * time.Millisecond):
break FOR_LOOP2
}
}
if pushed != seen {
t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen)
}
}
func TestRemoved(t *testing.T) {


+ 9
- 1
internal/mempool/mempool_bench_test.go View File

@ -8,15 +8,23 @@ import (
"time"
"github.com/stretchr/testify/require"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/libs/log"
)
func BenchmarkTxMempool_CheckTx(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client := abciclient.NewLocalClient(log.NewNopLogger(), kvstore.NewApplication())
if err := client.Start(ctx); err != nil {
b.Fatal(err)
}
// setup the cache and the mempool number for hitting GetEvictableTxs during the
// benchmark. 5000 is the current default mempool size in the TM config.
txmp := setup(ctx, b, 10000)
txmp := setup(ctx, b, client, 10000)
txmp.config.Size = 5000
rng := rand.New(rand.NewSource(time.Now().UnixNano()))


+ 79
- 26
internal/mempool/mempool_test.go View File

@ -72,30 +72,18 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
}
}
func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
func setup(ctx context.Context, t testing.TB, app abciclient.Client, cacheSize int, options ...TxMempoolOption) *TxMempool {
t.Helper()
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
logger := log.TestingLogger()
conn := abciclient.NewLocalClient(logger, &application{
kvstore.NewApplication(),
})
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
require.NoError(t, err)
cfg.Mempool.CacheSize = cacheSize
require.NoError(t, conn.Start(ctx))
t.Cleanup(func() {
os.RemoveAll(cfg.RootDir)
cancel()
conn.Wait()
})
t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })
return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, conn, options...)
return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, app, options...)
}
func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
@ -137,7 +125,13 @@ func TestTxMempool_TxsAvailable(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 0)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 0)
txmp.EnableTxsAvailable()
ensureNoTxFire := func() {
@ -194,7 +188,13 @@ func TestTxMempool_Size(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 0)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 0)
txs := checkTxs(ctx, t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@ -221,7 +221,13 @@ func TestTxMempool_Flush(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 0)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 0)
txs := checkTxs(ctx, t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@ -249,7 +255,13 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 0)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 0)
tTxs := checkTxs(ctx, t, txmp, 100, 0) // all txs request 1 gas unit
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@ -302,7 +314,13 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 0)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 0)
tTxs := checkTxs(ctx, t, txmp, 100, 0)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
@ -354,7 +372,12 @@ func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 0)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 0)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxBytes+1)
@ -374,7 +397,13 @@ func TestTxMempool_CheckTxSamePeer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 100)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 100)
peerID := uint16(1)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
@ -392,7 +421,13 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 100)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 100)
peerID := uint16(1)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
@ -417,7 +452,13 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 100)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 100)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
checkTxDone := make(chan struct{})
@ -484,7 +525,13 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
txmp := setup(ctx, t, 500)
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
txmp := setup(ctx, t, client, 500)
txmp.height = 100
txmp.config.TTLNumBlocks = 10
@ -556,10 +603,16 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
if err := client.Start(ctx); err != nil {
t.Fatal(err)
}
t.Cleanup(client.Wait)
postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
return testCase.err
}
txmp := setup(ctx, t, 0, WithPostCheck(postCheckFn))
txmp := setup(ctx, t, client, 0, WithPostCheck(postCheckFn))
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxBytes-1)
_, err := rng.Read(tx)


+ 25
- 9
internal/mempool/reactor_test.go View File

@ -13,6 +13,7 @@ import (
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/require"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
@ -39,7 +40,7 @@ type reactorTestSuite struct {
nodes []types.NodeID
}
func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) *reactorTestSuite {
func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNodes int, chBuf uint) *reactorTestSuite {
t.Helper()
cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
@ -63,7 +64,11 @@ func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint)
for nodeID := range rts.network.Nodes {
rts.kvstores[nodeID] = kvstore.NewApplication()
mempool := setup(ctx, t, 0)
client := abciclient.NewLocalClient(logger, rts.kvstores[nodeID])
require.NoError(t, client.Start(ctx))
t.Cleanup(client.Wait)
mempool := setup(ctx, t, client, 0)
rts.mempools[nodeID] = mempool
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
@ -151,7 +156,9 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) {
defer cancel()
const numNodes = 2
rts := setupReactors(ctx, t, numNodes, 0)
logger := log.NewNopLogger()
rts := setupReactors(ctx, t, logger, numNodes, 0)
observePanic := func(r interface{}) {
t.Fatal("panic detected in reactor")
@ -194,7 +201,9 @@ func TestReactorBroadcastTxs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rts := setupReactors(ctx, t, numNodes, uint(numTxs))
logger := log.NewNopLogger()
rts := setupReactors(ctx, t, logger, numNodes, uint(numTxs))
primary := rts.nodes[0]
secondaries := rts.nodes[1:]
@ -218,7 +227,8 @@ func TestReactorConcurrency(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rts := setupReactors(ctx, t, numNodes, 0)
logger := log.NewNopLogger()
rts := setupReactors(ctx, t, logger, numNodes, 0)
primary := rts.nodes[0]
secondary := rts.nodes[1]
@ -276,7 +286,8 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rts := setupReactors(ctx, t, numNodes, uint(numTxs))
logger := log.NewNopLogger()
rts := setupReactors(ctx, t, logger, numNodes, uint(numTxs))
primary := rts.nodes[0]
secondary := rts.nodes[1]
@ -300,7 +311,9 @@ func TestReactor_MaxTxBytes(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rts := setupReactors(ctx, t, numNodes, 0)
logger := log.NewNopLogger()
rts := setupReactors(ctx, t, logger, numNodes, 0)
primary := rts.nodes[0]
secondary := rts.nodes[1]
@ -336,7 +349,8 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rts := setupReactors(ctx, t, 1, MaxActiveIDs+1)
logger := log.NewNopLogger()
rts := setupReactors(ctx, t, logger, 1, MaxActiveIDs+1)
nodeID := rts.nodes[0]
@ -388,7 +402,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rts := setupReactors(ctx, t, 2, 2)
logger := log.NewNopLogger()
rts := setupReactors(ctx, t, logger, 2, 2)
primary := rts.nodes[0]
secondary := rts.nodes[1]


+ 2
- 53
libs/events/events.go View File

@ -50,8 +50,6 @@ type EventSwitch interface {
Stop()
AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error
RemoveListenerForEvent(event string, listenerID string)
RemoveListener(listenerID string)
}
type eventSwitch struct {
@ -71,11 +69,8 @@ func NewEventSwitch(logger log.Logger) EventSwitch {
return evsw
}
func (evsw *eventSwitch) OnStart(ctx context.Context) error {
return nil
}
func (evsw *eventSwitch) OnStop() {}
func (evsw *eventSwitch) OnStart(ctx context.Context) error { return nil }
func (evsw *eventSwitch) OnStop() {}
func (evsw *eventSwitch) AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error {
// Get/Create eventCell and listener.
@ -103,52 +98,6 @@ func (evsw *eventSwitch) AddListenerForEvent(listenerID, eventValue string, cb E
return nil
}
func (evsw *eventSwitch) RemoveListener(listenerID string) {
// Get and remove listener.
evsw.mtx.RLock()
listener := evsw.listeners[listenerID]
evsw.mtx.RUnlock()
if listener == nil {
return
}
evsw.mtx.Lock()
delete(evsw.listeners, listenerID)
evsw.mtx.Unlock()
// Remove callback for each event.
listener.SetRemoved()
for _, event := range listener.GetEvents() {
evsw.RemoveListenerForEvent(event, listenerID)
}
}
func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) {
// Get eventCell
evsw.mtx.Lock()
eventCell := evsw.eventCells[event]
evsw.mtx.Unlock()
if eventCell == nil {
return
}
// Remove listenerID from eventCell
numListeners := eventCell.RemoveListener(listenerID)
// Maybe garbage collect eventCell.
if numListeners == 0 {
// Lock again and double check.
evsw.mtx.Lock() // OUTER LOCK
eventCell.mtx.Lock() // INNER LOCK
if len(eventCell.listeners) == 0 {
delete(evsw.eventCells, event)
}
eventCell.mtx.Unlock() // INNER LOCK
evsw.mtx.Unlock() // OUTER LOCK
}
}
func (evsw *eventSwitch) FireEvent(ctx context.Context, event string, data EventData) {
// Get the eventCell
evsw.mtx.RLock()


+ 2
- 178
libs/events/events_test.go View File

@ -7,7 +7,6 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/log"
@ -28,8 +27,6 @@ func TestAddListenerForEventFireOnce(t *testing.T) {
messages := make(chan EventData)
require.NoError(t, evsw.AddListenerForEvent("listener", "event",
func(ctx context.Context, data EventData) error {
// test there's no deadlock if we remove the listener inside a callback
evsw.RemoveListener("listener")
select {
case messages <- data:
return nil
@ -234,171 +231,7 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) {
}
}
func TestAddAndRemoveListenerConcurrency(t *testing.T) {
var (
stopInputEvent = false
roundCount = 2000
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
evsw := NewEventSwitch(logger)
require.NoError(t, evsw.Start(ctx))
t.Cleanup(evsw.Wait)
done1 := make(chan struct{})
done2 := make(chan struct{})
// Must be executed concurrently to uncover the data race.
// 1. RemoveListener
go func() {
defer close(done1)
for i := 0; i < roundCount; i++ {
evsw.RemoveListener("listener")
}
}()
// 2. AddListenerForEvent
go func() {
defer close(done2)
for i := 0; i < roundCount; i++ {
index := i
// we explicitly ignore errors here, since the listener will sometimes be removed
// (that's what we're testing)
_ = evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index),
func(ctx context.Context, data EventData) error {
t.Errorf("should not run callback for %d.\n", index)
stopInputEvent = true
return nil
})
}
}()
<-done1
<-done2
evsw.RemoveListener("listener") // remove the last listener
for i := 0; i < roundCount && !stopInputEvent; i++ {
evsw.FireEvent(ctx, fmt.Sprintf("event%d", i), uint64(1001))
}
}
// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to
// two events, fires a thousand integers for the first event, then unsubscribes
// the listener and fires a thousand integers for the second event.
func TestAddAndRemoveListener(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
evsw := NewEventSwitch(logger)
require.NoError(t, evsw.Start(ctx))
t.Cleanup(evsw.Wait)
doneSum1 := make(chan uint64)
doneSum2 := make(chan uint64)
doneSending1 := make(chan uint64)
doneSending2 := make(chan uint64)
numbers1 := make(chan uint64, 4)
numbers2 := make(chan uint64, 4)
// subscribe two listener to three events
require.NoError(t, evsw.AddListenerForEvent("listener", "event1",
func(ctx context.Context, data EventData) error {
select {
case numbers1 <- data.(uint64):
return nil
case <-ctx.Done():
return ctx.Err()
}
}))
require.NoError(t, evsw.AddListenerForEvent("listener", "event2",
func(ctx context.Context, data EventData) error {
select {
case numbers2 <- data.(uint64):
return nil
case <-ctx.Done():
return ctx.Err()
}
}))
// collect received events for event1
go sumReceivedNumbers(numbers1, doneSum1)
// collect received events for event2
go sumReceivedNumbers(numbers2, doneSum2)
// go fire events
go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1))
checkSumEvent1 := <-doneSending1
// after sending all event1, unsubscribe for all events
evsw.RemoveListener("listener")
go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001))
checkSumEvent2 := <-doneSending2
close(numbers1)
close(numbers2)
eventSum1 := <-doneSum1
eventSum2 := <-doneSum2
if checkSumEvent1 != eventSum1 ||
// correct value asserted by preceding tests, suffices to be non-zero
checkSumEvent2 == uint64(0) ||
eventSum2 != uint64(0) {
t.Errorf("not all messages sent were received or unsubscription did not register.\n")
}
}
// TestRemoveListener does basic tests on adding and removing
func TestRemoveListener(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
evsw := NewEventSwitch(logger)
require.NoError(t, evsw.Start(ctx))
t.Cleanup(evsw.Wait)
count := 10
sum1, sum2 := 0, 0
// add some listeners and make sure they work
require.NoError(t, evsw.AddListenerForEvent("listener", "event1",
func(ctx context.Context, data EventData) error {
sum1++
return nil
}))
require.NoError(t, evsw.AddListenerForEvent("listener", "event2",
func(ctx context.Context, data EventData) error {
sum2++
return nil
}))
for i := 0; i < count; i++ {
evsw.FireEvent(ctx, "event1", true)
evsw.FireEvent(ctx, "event2", true)
}
assert.Equal(t, count, sum1)
assert.Equal(t, count, sum2)
// remove one by event and make sure it is gone
evsw.RemoveListenerForEvent("event2", "listener")
for i := 0; i < count; i++ {
evsw.FireEvent(ctx, "event1", true)
evsw.FireEvent(ctx, "event2", true)
}
assert.Equal(t, count*2, sum1)
assert.Equal(t, count, sum2)
// remove the listener entirely and make sure both gone
evsw.RemoveListener("listener")
for i := 0; i < count; i++ {
evsw.FireEvent(ctx, "event1", true)
evsw.FireEvent(ctx, "event2", true)
}
assert.Equal(t, count*2, sum1)
assert.Equal(t, count, sum2)
}
// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two
// TestManagerLiistenersAsync sets up an EventSwitch, subscribes two
// listeners to three events, and fires a thousand integers for each event.
// These two listeners serve as the baseline validation while other listeners
// are randomly subscribed and unsubscribed.
@ -408,7 +241,7 @@ func TestRemoveListener(t *testing.T) {
// at that point subscribed to.
// NOTE: it is important to run this test with race conditions tracking on,
// `go test -race`, to examine for possible race conditions.
func TestRemoveListenersAsync(t *testing.T) {
func TestManageListenersAsync(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
@ -494,18 +327,9 @@ func TestRemoveListenersAsync(t *testing.T) {
func(context.Context, EventData) error { return nil })
}
}
removeListenersStress := func() {
r2 := rand.New(rand.NewSource(time.Now().Unix()))
r2.Seed(time.Now().UnixNano())
for k := uint16(0); k < 80; k++ {
listenerNumber := r2.Intn(100) + 3
go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber))
}
}
addListenersStress()
// go fire events
go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1))
removeListenersStress()
go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001))
go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001))
checkSumEvent1 := <-doneSending1


+ 7
- 2
proto/tendermint/statesync/message_test.go View File

@ -3,6 +3,7 @@ package statesync_test
import (
"encoding/hex"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
@ -204,8 +205,8 @@ func TestStateSyncVectors(t *testing.T) {
AppVersion: 11,
},
Synchrony: &tmproto.SynchronyParams{
MessageDelay: 550,
Precision: 90,
MessageDelay: durationPtr(550),
Precision: durationPtr(90),
},
},
},
@ -224,3 +225,7 @@ func TestStateSyncVectors(t *testing.T) {
require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
}
}
func durationPtr(t time.Duration) *time.Duration {
return &t
}

+ 99
- 65
proto/tendermint/types/params.pb.go View File

@ -381,9 +381,17 @@ func (m *HashedParams) GetBlockMaxGas() int64 {
return 0
}
// SynchronyParams configure the bounds under which a proposed block's timestamp is considered valid.
// These parameters are part of the proposer-based timestamps algorithm. For more information,
// see the specification of proposer-based timestamps:
// https://github.com/tendermint/tendermint/tree/master/spec/consensus/proposer-based-timestamp
type SynchronyParams struct {
MessageDelay time.Duration `protobuf:"bytes,1,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay"`
Precision time.Duration `protobuf:"bytes,2,opt,name=precision,proto3,stdduration" json:"precision"`
// message_delay bounds how long a proposal message may take to reach all validators on a newtork
// and still be considered valid.
MessageDelay *time.Duration `protobuf:"bytes,1,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay,omitempty"`
// precision bounds how skewed a proposer's clock may be from any validator
// on the network while still producing valid proposals.
Precision *time.Duration `protobuf:"bytes,2,opt,name=precision,proto3,stdduration" json:"precision,omitempty"`
}
func (m *SynchronyParams) Reset() { *m = SynchronyParams{} }
@ -419,18 +427,18 @@ func (m *SynchronyParams) XXX_DiscardUnknown() {
var xxx_messageInfo_SynchronyParams proto.InternalMessageInfo
func (m *SynchronyParams) GetMessageDelay() time.Duration {
func (m *SynchronyParams) GetMessageDelay() *time.Duration {
if m != nil {
return m.MessageDelay
}
return 0
return nil
}
func (m *SynchronyParams) GetPrecision() time.Duration {
func (m *SynchronyParams) GetPrecision() *time.Duration {
if m != nil {
return m.Precision
}
return 0
return nil
}
func init() {
@ -446,43 +454,43 @@ func init() {
func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) }
var fileDescriptor_e12598271a686f57 = []byte{
// 561 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4d, 0x6b, 0xd4, 0x40,
0x18, 0xc7, 0x37, 0xdd, 0xbe, 0xec, 0x3e, 0xdb, 0xed, 0x96, 0x41, 0x30, 0x56, 0x9a, 0x5d, 0x73,
0x90, 0x82, 0x90, 0x88, 0x45, 0x44, 0x10, 0xa4, 0xdb, 0x8a, 0x05, 0xa9, 0x48, 0x7c, 0x39, 0xf4,
0x12, 0x26, 0xbb, 0x63, 0x36, 0x74, 0x93, 0x19, 0x32, 0xc9, 0xb2, 0xf9, 0x16, 0x1e, 0x3d, 0x79,
0xd6, 0x8f, 0xe1, 0xad, 0xc7, 0x1e, 0x3d, 0xa9, 0xec, 0x7e, 0x11, 0x99, 0xc9, 0x4c, 0xd3, 0xdd,
0x2a, 0xd8, 0x5b, 0x32, 0xcf, 0xef, 0x97, 0x87, 0xf9, 0x3f, 0x93, 0x81, 0xdd, 0x8c, 0x24, 0x43,
0x92, 0xc6, 0x51, 0x92, 0xb9, 0x59, 0xc1, 0x08, 0x77, 0x19, 0x4e, 0x71, 0xcc, 0x1d, 0x96, 0xd2,
0x8c, 0xa2, 0xed, 0xaa, 0xec, 0xc8, 0xf2, 0xce, 0xad, 0x90, 0x86, 0x54, 0x16, 0x5d, 0xf1, 0x54,
0x72, 0x3b, 0x56, 0x48, 0x69, 0x38, 0x26, 0xae, 0x7c, 0x0b, 0xf2, 0x8f, 0xee, 0x30, 0x4f, 0x71,
0x16, 0xd1, 0xa4, 0xac, 0xdb, 0xdf, 0x57, 0xa0, 0x73, 0x48, 0x13, 0x4e, 0x12, 0x9e, 0xf3, 0x37,
0xb2, 0x03, 0xda, 0x87, 0xb5, 0x60, 0x4c, 0x07, 0x67, 0xa6, 0xd1, 0x33, 0xf6, 0x5a, 0x8f, 0x76,
0x9d, 0xe5, 0x5e, 0x4e, 0x5f, 0x94, 0x4b, 0xda, 0x2b, 0x59, 0xf4, 0x0c, 0x1a, 0x64, 0x12, 0x0d,
0x49, 0x32, 0x20, 0xe6, 0x8a, 0xf4, 0x7a, 0xd7, 0xbd, 0x17, 0x8a, 0x50, 0xea, 0xa5, 0x81, 0x9e,
0x43, 0x73, 0x82, 0xc7, 0xd1, 0x10, 0x67, 0x34, 0x35, 0xeb, 0x52, 0xbf, 0x77, 0x5d, 0xff, 0xa0,
0x11, 0xe5, 0x57, 0x0e, 0x7a, 0x0a, 0x1b, 0x13, 0x92, 0xf2, 0x88, 0x26, 0xe6, 0xaa, 0xd4, 0xbb,
0x7f, 0xd1, 0x4b, 0x40, 0xc9, 0x9a, 0x17, 0xbd, 0x79, 0x91, 0x0c, 0x46, 0x29, 0x4d, 0x0a, 0x73,
0xed, 0x5f, 0xbd, 0xdf, 0x6a, 0x44, 0xf7, 0xbe, 0x74, 0xec, 0x43, 0x68, 0x5d, 0x09, 0x04, 0xdd,
0x85, 0x66, 0x8c, 0xa7, 0x7e, 0x50, 0x64, 0x84, 0xcb, 0x08, 0xeb, 0x5e, 0x23, 0xc6, 0xd3, 0xbe,
0x78, 0x47, 0xb7, 0x61, 0x43, 0x14, 0x43, 0xcc, 0x65, 0x4a, 0x75, 0x6f, 0x3d, 0xc6, 0xd3, 0x97,
0x98, 0xdb, 0xdf, 0x0c, 0xd8, 0x5a, 0x8c, 0x07, 0x3d, 0x00, 0x24, 0x58, 0x1c, 0x12, 0x3f, 0xc9,
0x63, 0x5f, 0xe6, 0xac, 0xbf, 0xd8, 0x89, 0xf1, 0xf4, 0x20, 0x24, 0xaf, 0xf3, 0x58, 0xb6, 0xe6,
0xe8, 0x04, 0xb6, 0x35, 0xac, 0x47, 0xac, 0xe6, 0x70, 0xc7, 0x29, 0xcf, 0x80, 0xa3, 0xcf, 0x80,
0x73, 0xa4, 0x80, 0x7e, 0xe3, 0xfc, 0x67, 0xb7, 0xf6, 0xf9, 0x57, 0xd7, 0xf0, 0xb6, 0xca, 0xef,
0xe9, 0xca, 0xe2, 0x26, 0xea, 0x8b, 0x9b, 0xb0, 0x1f, 0x43, 0x67, 0x69, 0x14, 0xc8, 0x86, 0x36,
0xcb, 0x03, 0xff, 0x8c, 0x14, 0xbe, 0xcc, 0xcb, 0x34, 0x7a, 0xf5, 0xbd, 0xa6, 0xd7, 0x62, 0x79,
0xf0, 0x8a, 0x14, 0xef, 0xc4, 0x92, 0xfd, 0x10, 0xda, 0x0b, 0x23, 0x40, 0x5d, 0x68, 0x61, 0xc6,
0x7c, 0x3d, 0x38, 0xb1, 0xb3, 0x55, 0x0f, 0x30, 0x63, 0x0a, 0xb3, 0x4f, 0x61, 0xf3, 0x18, 0xf3,
0x11, 0x19, 0x2a, 0xe1, 0x3e, 0x74, 0x64, 0x0a, 0xfe, 0x72, 0xc0, 0x6d, 0xb9, 0x7c, 0xa2, 0x53,
0xb6, 0xa1, 0x5d, 0x71, 0x55, 0xd6, 0x2d, 0x4d, 0x89, 0xc0, 0xbf, 0x18, 0xd0, 0x59, 0x1a, 0x2a,
0x3a, 0x86, 0x76, 0x4c, 0x38, 0x97, 0x21, 0x92, 0x31, 0x2e, 0xd4, 0x1f, 0xf0, 0x5f, 0x09, 0x6e,
0x2a, 0xf3, 0x48, 0x88, 0xe8, 0x00, 0x9a, 0x2c, 0x25, 0x83, 0x88, 0xdf, 0x70, 0x0e, 0x95, 0xd5,
0x7f, 0xff, 0x75, 0x66, 0x19, 0xe7, 0x33, 0xcb, 0xb8, 0x98, 0x59, 0xc6, 0xef, 0x99, 0x65, 0x7c,
0x9a, 0x5b, 0xb5, 0x8b, 0xb9, 0x55, 0xfb, 0x31, 0xb7, 0x6a, 0xa7, 0x4f, 0xc2, 0x28, 0x1b, 0xe5,
0x81, 0x33, 0xa0, 0xb1, 0x7b, 0xf5, 0xaa, 0xa8, 0x1e, 0xcb, 0xbb, 0x60, 0xf9, 0x1a, 0x09, 0xd6,
0xe5, 0xfa, 0xfe, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x26, 0x8a, 0x0b, 0x61, 0x04, 0x00,
0x00,
// 565 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4d, 0x8b, 0xd3, 0x40,
0x18, 0xc7, 0x9b, 0xed, 0xbe, 0xb4, 0x4f, 0xb7, 0xdb, 0x65, 0x10, 0x8c, 0x2b, 0x9b, 0xd6, 0x1c,
0x64, 0x41, 0x48, 0xc4, 0x45, 0x44, 0x50, 0xc4, 0x6e, 0x45, 0x41, 0x56, 0x24, 0xbe, 0x1c, 0xf6,
0x12, 0x26, 0xed, 0x98, 0x86, 0x6d, 0x32, 0x43, 0x26, 0x29, 0xcd, 0xb7, 0xf0, 0x24, 0x7e, 0x04,
0xfd, 0x18, 0xde, 0xf6, 0xb8, 0x47, 0x4f, 0x2a, 0xed, 0x17, 0x91, 0x99, 0xcc, 0x6c, 0xb6, 0x5d,
0x15, 0x6f, 0xc9, 0x3c, 0xff, 0xdf, 0x3c, 0xcc, 0xef, 0x49, 0x06, 0xf6, 0x33, 0x92, 0x8c, 0x48,
0x1a, 0x47, 0x49, 0xe6, 0x66, 0x05, 0x23, 0xdc, 0x65, 0x38, 0xc5, 0x31, 0x77, 0x58, 0x4a, 0x33,
0x8a, 0x76, 0xab, 0xb2, 0x23, 0xcb, 0x7b, 0xd7, 0x42, 0x1a, 0x52, 0x59, 0x74, 0xc5, 0x53, 0x99,
0xdb, 0xb3, 0x42, 0x4a, 0xc3, 0x09, 0x71, 0xe5, 0x5b, 0x90, 0x7f, 0x70, 0x47, 0x79, 0x8a, 0xb3,
0x88, 0x26, 0x65, 0xdd, 0xfe, 0xb6, 0x06, 0x9d, 0x23, 0x9a, 0x70, 0x92, 0xf0, 0x9c, 0xbf, 0x96,
0x1d, 0xd0, 0x21, 0x6c, 0x04, 0x13, 0x3a, 0x3c, 0x35, 0x8d, 0x9e, 0x71, 0xd0, 0xba, 0xb7, 0xef,
0xac, 0xf6, 0x72, 0xfa, 0xa2, 0x5c, 0xa6, 0xbd, 0x32, 0x8b, 0x1e, 0x41, 0x83, 0x4c, 0xa3, 0x11,
0x49, 0x86, 0xc4, 0x5c, 0x93, 0x5c, 0xef, 0x2a, 0xf7, 0x4c, 0x25, 0x14, 0x7a, 0x41, 0xa0, 0x27,
0xd0, 0x9c, 0xe2, 0x49, 0x34, 0xc2, 0x19, 0x4d, 0xcd, 0xba, 0xc4, 0x6f, 0x5d, 0xc5, 0xdf, 0xeb,
0x88, 0xe2, 0x2b, 0x06, 0x3d, 0x84, 0xad, 0x29, 0x49, 0x79, 0x44, 0x13, 0x73, 0x5d, 0xe2, 0xdd,
0x3f, 0xe0, 0x65, 0x40, 0xc1, 0x3a, 0x2f, 0x7a, 0xf3, 0x22, 0x19, 0x8e, 0x53, 0x9a, 0x14, 0xe6,
0xc6, 0xdf, 0x7a, 0xbf, 0xd1, 0x11, 0xdd, 0xfb, 0x82, 0xb1, 0x8f, 0xa0, 0x75, 0x49, 0x08, 0xba,
0x09, 0xcd, 0x18, 0xcf, 0xfc, 0xa0, 0xc8, 0x08, 0x97, 0x0a, 0xeb, 0x5e, 0x23, 0xc6, 0xb3, 0xbe,
0x78, 0x47, 0xd7, 0x61, 0x4b, 0x14, 0x43, 0xcc, 0xa5, 0xa5, 0xba, 0xb7, 0x19, 0xe3, 0xd9, 0x73,
0xcc, 0xed, 0xaf, 0x06, 0xec, 0x2c, 0xeb, 0x41, 0x77, 0x00, 0x89, 0x2c, 0x0e, 0x89, 0x9f, 0xe4,
0xb1, 0x2f, 0x3d, 0xeb, 0x1d, 0x3b, 0x31, 0x9e, 0x3d, 0x0d, 0xc9, 0xab, 0x3c, 0x96, 0xad, 0x39,
0x3a, 0x86, 0x5d, 0x1d, 0xd6, 0x23, 0x56, 0x73, 0xb8, 0xe1, 0x94, 0xdf, 0x80, 0xa3, 0xbf, 0x01,
0x67, 0xa0, 0x02, 0xfd, 0xc6, 0xd9, 0x8f, 0x6e, 0xed, 0xf3, 0xcf, 0xae, 0xe1, 0xed, 0x94, 0xfb,
0xe9, 0xca, 0xf2, 0x21, 0xea, 0xcb, 0x87, 0xb0, 0xef, 0x43, 0x67, 0x65, 0x14, 0xc8, 0x86, 0x36,
0xcb, 0x03, 0xff, 0x94, 0x14, 0xbe, 0xf4, 0x65, 0x1a, 0xbd, 0xfa, 0x41, 0xd3, 0x6b, 0xb1, 0x3c,
0x78, 0x49, 0x8a, 0xb7, 0x62, 0xc9, 0xbe, 0x0b, 0xed, 0xa5, 0x11, 0xa0, 0x2e, 0xb4, 0x30, 0x63,
0xbe, 0x1e, 0x9c, 0x38, 0xd9, 0xba, 0x07, 0x98, 0x31, 0x15, 0xb3, 0x4f, 0x60, 0xfb, 0x05, 0xe6,
0x63, 0x32, 0x52, 0xc0, 0x6d, 0xe8, 0x48, 0x0b, 0xfe, 0xaa, 0xe0, 0xb6, 0x5c, 0x3e, 0xd6, 0x96,
0x6d, 0x68, 0x57, 0xb9, 0xca, 0x75, 0x4b, 0xa7, 0x84, 0xf0, 0x4f, 0x06, 0x74, 0x56, 0x86, 0x8a,
0x06, 0xd0, 0x8e, 0x09, 0xe7, 0x52, 0x22, 0x99, 0xe0, 0x42, 0xfd, 0x01, 0xff, 0x30, 0xb8, 0x2e,
0xed, 0x6d, 0x2b, 0x6a, 0x20, 0x20, 0xf4, 0x18, 0x9a, 0x2c, 0x25, 0xc3, 0x88, 0xff, 0xd7, 0x0c,
0xca, 0x1d, 0x2a, 0xa2, 0xff, 0xee, 0xcb, 0xdc, 0x32, 0xce, 0xe6, 0x96, 0x71, 0x3e, 0xb7, 0x8c,
0x5f, 0x73, 0xcb, 0xf8, 0xb8, 0xb0, 0x6a, 0xe7, 0x0b, 0xab, 0xf6, 0x7d, 0x61, 0xd5, 0x4e, 0x1e,
0x84, 0x51, 0x36, 0xce, 0x03, 0x67, 0x48, 0x63, 0xf7, 0xf2, 0x15, 0x51, 0x3d, 0x96, 0x77, 0xc0,
0xea, 0xf5, 0x11, 0x6c, 0xca, 0xf5, 0xc3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x57, 0x89, 0x7c,
0xd9, 0x59, 0x04, 0x00, 0x00,
}
func (this *ConsensusParams) Equal(that interface{}) bool {
@ -677,10 +685,22 @@ func (this *SynchronyParams) Equal(that interface{}) bool {
} else if this == nil {
return false
}
if this.MessageDelay != that1.MessageDelay {
if this.MessageDelay != nil && that1.MessageDelay != nil {
if *this.MessageDelay != *that1.MessageDelay {
return false
}
} else if this.MessageDelay != nil {
return false
} else if that1.MessageDelay != nil {
return false
}
if this.Precision != that1.Precision {
if this.Precision != nil && that1.Precision != nil {
if *this.Precision != *that1.Precision {
return false
}
} else if this.Precision != nil {
return false
} else if that1.Precision != nil {
return false
}
return true
@ -955,22 +975,26 @@ func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Precision):])
if err7 != nil {
return 0, err7
if m.Precision != nil {
n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):])
if err7 != nil {
return 0, err7
}
i -= n7
i = encodeVarintParams(dAtA, i, uint64(n7))
i--
dAtA[i] = 0x12
}
i -= n7
i = encodeVarintParams(dAtA, i, uint64(n7))
i--
dAtA[i] = 0x12
n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MessageDelay):])
if err8 != nil {
return 0, err8
if m.MessageDelay != nil {
n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):])
if err8 != nil {
return 0, err8
}
i -= n8
i = encodeVarintParams(dAtA, i, uint64(n8))
i--
dAtA[i] = 0xa
}
i -= n8
i = encodeVarintParams(dAtA, i, uint64(n8))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
@ -1094,10 +1118,14 @@ func (m *SynchronyParams) Size() (n int) {
}
var l int
_ = l
l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MessageDelay)
n += 1 + l + sovParams(uint64(l))
l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Precision)
n += 1 + l + sovParams(uint64(l))
if m.MessageDelay != nil {
l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay)
n += 1 + l + sovParams(uint64(l))
}
if m.Precision != nil {
l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision)
n += 1 + l + sovParams(uint64(l))
}
return n
}
@ -1843,7 +1871,10 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil {
if m.MessageDelay == nil {
m.MessageDelay = new(time.Duration)
}
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@ -1876,7 +1907,10 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Precision, dAtA[iNdEx:postIndex]); err != nil {
if m.Precision == nil {
m.Precision = new(time.Duration)
}
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Precision, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex


+ 18
- 9
types/params.go View File

@ -275,8 +275,12 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa
res.Version.AppVersion = params2.Version.AppVersion
}
if params2.Synchrony != nil {
res.Synchrony.Precision = params2.Synchrony.Precision
res.Synchrony.MessageDelay = params2.Synchrony.MessageDelay
if params2.Synchrony.MessageDelay != nil {
res.Synchrony.MessageDelay = *params2.Synchrony.GetMessageDelay()
}
if params2.Synchrony.Precision != nil {
res.Synchrony.Precision = *params2.Synchrony.GetPrecision()
}
}
return res
}
@ -299,14 +303,14 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams {
AppVersion: params.Version.AppVersion,
},
Synchrony: &tmproto.SynchronyParams{
MessageDelay: params.Synchrony.MessageDelay,
Precision: params.Synchrony.Precision,
MessageDelay: &params.Synchrony.MessageDelay,
Precision: &params.Synchrony.Precision,
},
}
}
func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams {
return ConsensusParams{
c := ConsensusParams{
Block: BlockParams{
MaxBytes: pbParams.Block.MaxBytes,
MaxGas: pbParams.Block.MaxGas,
@ -322,9 +326,14 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams
Version: VersionParams{
AppVersion: pbParams.Version.AppVersion,
},
Synchrony: SynchronyParams{
MessageDelay: pbParams.Synchrony.MessageDelay,
Precision: pbParams.Synchrony.Precision,
},
}
if pbParams.Synchrony != nil {
if pbParams.Synchrony.MessageDelay != nil {
c.Synchrony.MessageDelay = *pbParams.Synchrony.GetMessageDelay()
}
if pbParams.Synchrony.Precision != nil {
c.Synchrony.Precision = *pbParams.Synchrony.GetPrecision()
}
}
return c
}

+ 6
- 2
types/params_test.go View File

@ -246,8 +246,8 @@ func TestConsensusParamsUpdate(t *testing.T) {
intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}),
updates: &tmproto.ConsensusParams{
Synchrony: &tmproto.SynchronyParams{
Precision: time.Second * 2,
MessageDelay: time.Second * 4,
Precision: durationPtr(time.Second * 2),
MessageDelay: durationPtr(time.Second * 4),
},
},
updatedParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: 2 * time.Second, messageDelay: 4 * time.Second}),
@ -339,3 +339,7 @@ func TestProto(t *testing.T) {
}
}
func durationPtr(t time.Duration) *time.Duration {
return &t
}

Loading…
Cancel
Save