Browse Source

abci: Flush socket requests and responses immediately. (#6997)

The main effect of this change is to flush the socket client and server message
encoding buffers immediately once the message is fully and correctly encoded.
This allows us to remove the timer and some other special cases, without
changing the observed behaviour of the system.

-- Background

The socket protocol client and server each use a buffered writer to encode
request and response messages onto the underlying connection. This reduces the
possibility of a single message being split across multiple writes, but has the
side-effect that a request may remain buffered for some time.

The implementation worked around this by keeping a ticker that occasionally
triggers a flush, and by flushing the writer in response to an explicit request
baked into the client/server protocol (see also #6994).

These workarounds are both unnecessary: Once a message has been dequeued for
sending and fully encoded in wire format, there is no real use keeping all or
part of it buffered locally.  Moreover, using an asynchronous process to flush
the buffer makes the round-trip performance of the request unpredictable.

-- Benchmarks

Code: https://play.golang.org/p/0ChUOxJOiHt

I found no pre-existing performance benchmarks to justify the flush pattern,
but a natural question is whether this will significantly harm client/server
performance.  To test this, I implemented a simple benchmark that transfers
randomly-sized byte buffers from a no-op "client" to a no-op "server" over a
Unix-domain socket, using a buffered writer, both with and without explicit
flushes after each write.

As the following data show, flushing every time (FLUSH=true) does reduce raw
throughput, but not by a significant amount except for very small request
sizes, where the transfer time is already trivial (1.9μs).  Given that the
client is calibrated for 1MiB transactions, the overhead is not meaningful.

The percentage in each section is the speedup for flushing only when the buffer
is full, relative to flushing every block.  The benchmark uses the default
buffer size (4096 bytes), which is the same value used by the socket client and
server implementation:

  FLUSH  NBLOCKS  MAX      AVG     TOTAL       ELAPSED       TIME/BLOCK
  false  3957471  512      255     1011165416  2.00018873s   505ns
  true   1068568  512      255     273064368   2.000217051s  1.871µs
                                                             (73%)

  false  536096   4096     2048    1098066401  2.000229108s  3.731µs
  true   477911   4096     2047    978746731   2.000177825s  4.185µs
                                                             (10.8%)

  false  124595   16384    8181    1019340160  2.000235086s  16.053µs
  true   120995   16384    8179    989703064   2.000329349s  16.532µs
                                                             (2.9%)

  false  2114     1048576  525693  1111316541  2.000479928s  946.3µs
  true   2083     1048576  526379  1096449173  2.001817137s  961.025µs
                                                             (1.5%)

Note also that the FLUSH=false baseline is actually faster than the production
code, which flushes more often than is required by the buffer filling up.

Moreover, the timer slows down the overall transaction rate of the client and
server, indepenedent of how fast the socket transfer is, so the loss on a real
workload is probably much less.
pull/6999/head
M. J. Fromberger 3 years ago
committed by GitHub
parent
commit
118bfe2087
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 24 additions and 53 deletions
  1. +18
    -40
      abci/client/socket_client.go
  2. +6
    -13
      abci/server/socket_server.go

+ 18
- 40
abci/client/socket_client.go View File

@ -13,7 +13,6 @@ import (
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/libs/timer"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
)
@ -22,8 +21,6 @@ const (
// reqQueueSize is the max number of queued async requests.
// (memory: 256MB max assuming 1MB transactions)
reqQueueSize = 256
// Don't wait longer than...
flushThrottleMS = 20
)
type reqResWithContext struct {
@ -40,8 +37,7 @@ type socketClient struct {
mustConnect bool
conn net.Conn
reqQueue chan *reqResWithContext
flushTimer *timer.ThrottleTimer
reqQueue chan *reqResWithContext
mtx tmsync.RWMutex
err error
@ -57,7 +53,6 @@ var _ Client = (*socketClient)(nil)
func NewSocketClient(addr string, mustConnect bool) Client {
cli := &socketClient{
reqQueue: make(chan *reqResWithContext, reqQueueSize),
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
mustConnect: mustConnect,
addr: addr,
@ -102,8 +97,7 @@ func (cli *socketClient) OnStop() {
cli.conn.Close()
}
cli.flushQueue()
cli.flushTimer.Stop()
cli.drainQueue()
}
// Error returns an error if the client was stopped abruptly.
@ -126,38 +120,25 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) {
//----------------------------------------
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
w := bufio.NewWriter(conn)
bw := bufio.NewWriter(conn)
for {
select {
case reqres := <-cli.reqQueue:
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
if reqres.C.Err() != nil {
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
continue
}
cli.willSendReq(reqres.R)
err := types.WriteMessage(reqres.R.Request, w)
if err != nil {
if err := types.WriteMessage(reqres.R.Request, bw); err != nil {
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
return
}
// If it's a flush request, flush the current buffer.
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
err = w.Flush()
if err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
return
}
}
case <-cli.flushTimer.Ch: // flush queue
select {
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
default:
// Probably will fill the buffer, or retry later.
if err := bw.Flush(); err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
return
}
case <-cli.Quit():
return
}
@ -492,14 +473,6 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
}
}
// Maybe auto-flush, or unset auto-flush
switch req.Value.(type) {
case *types.Request_Flush:
cli.flushTimer.Unset()
default:
cli.flushTimer.Set()
}
return reqres, nil
}
@ -537,7 +510,9 @@ func queueErr(e error) error {
return fmt.Errorf("can't queue req: %w", e)
}
func (cli *socketClient) flushQueue() {
// drainQueue marks as complete and discards all remaining pending requests
// from the queue.
func (cli *socketClient) drainQueue() {
cli.mtx.Lock()
defer cli.mtx.Unlock()
@ -547,14 +522,17 @@ func (cli *socketClient) flushQueue() {
reqres.Done()
}
// mark all queued messages as resolved
LOOP:
// Mark all queued messages as resolved.
//
// TODO(creachadair): We can't simply range the channel, because it is never
// closed, and the writer continues to add work.
// See https://github.com/tendermint/tendermint/issues/6996.
for {
select {
case reqres := <-cli.reqQueue:
reqres.R.Done()
default:
break LOOP
return
}
}
}


+ 6
- 13
abci/server/socket_server.go View File

@ -240,22 +240,15 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
// Pull responses from 'responses' and write them to conn.
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) {
var count int
var bufWriter = bufio.NewWriter(conn)
for {
var res = <-responses
err := types.WriteMessage(res, bufWriter)
if err != nil {
bw := bufio.NewWriter(conn)
for res := range responses {
if err := types.WriteMessage(res, bw); err != nil {
closeConn <- fmt.Errorf("error writing message: %w", err)
return
}
if _, ok := res.Value.(*types.Response_Flush); ok {
err = bufWriter.Flush()
if err != nil {
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
return
}
if err := bw.Flush(); err != nil {
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
return
}
count++
}
}

Loading…
Cancel
Save