From 69c7aa77bcc84cb92aadaa91023a32ec3951184b Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Sun, 30 Sep 2018 10:26:14 -0700 Subject: [PATCH] clist: speedup Next by removing defers (#2511) This change doubles the speed of the mempool's reaping. Before: BenchmarkReap-8 5000 365390 ns/op 122887 B/op After: BenchmarkReap-8 10000 158274 ns/op 122882 B/op --- libs/clist/clist.go | 6 ++--- mempool/bench_test.go | 55 +++++++++++++++++++++++++++++++++++++++++ mempool/mempool_test.go | 29 ---------------------- 3 files changed, 58 insertions(+), 32 deletions(-) create mode 100644 mempool/bench_test.go diff --git a/libs/clist/clist.go b/libs/clist/clist.go index c69d3d5f3..393bdf73f 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -113,9 +113,9 @@ func (e *CElement) NextWaitChan() <-chan struct{} { // Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.next + val := e.next + e.mtx.RUnlock() + return val } // Nonblocking, may return nil if at the end. diff --git a/mempool/bench_test.go b/mempool/bench_test.go new file mode 100644 index 000000000..68b033caa --- /dev/null +++ b/mempool/bench_test.go @@ -0,0 +1,55 @@ +package mempool + +import ( + "encoding/binary" + "testing" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/proxy" +) + +func BenchmarkReap(b *testing.B) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + + size := 10000 + for i := 0; i < size; i++ { + tx := make([]byte, 8) + binary.BigEndian.PutUint64(tx, uint64(i)) + mempool.CheckTx(tx, nil) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + mempool.ReapMaxBytesMaxGas(100000000, 10000000) + } +} + +func BenchmarkCacheInsertTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Push(txs[i]) + } +} + +// This benchmark is probably skewed, since we actually will be removing +// txs in parallel, which may cause some overhead due to mutex locking. +func BenchmarkCacheRemoveTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + cache.Push(txs[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Remove(txs[i]) + } +} diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 4f66da36c..5aabd00ee 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -399,35 +399,6 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 1, len(m3), "expecting the wal match in") } -func BenchmarkCacheInsertTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Push(txs[i]) - } -} - -// This benchmark is probably skewed, since we actually will be removing -// txs in parallel, which may cause some overhead due to mutex locking. -func BenchmarkCacheRemoveTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - cache.Push(txs[i]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Remove(txs[i]) - } -} - func checksumIt(data []byte) string { h := md5.New() h.Write(data)