package mempool import ( "bytes" "container/list" "context" "crypto/sha256" "fmt" "sync" "sync/atomic" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" auto "github.com/tendermint/tendermint/libs/autofile" "github.com/tendermint/tendermint/libs/clist" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) // TxKeySize is the size of the transaction key index const TxKeySize = sha256.Size var newline = []byte("\n") //-------------------------------------------------------------------------------- // CListMempool is an ordered in-memory pool for transactions before they are // proposed in a consensus round. Transaction validity is checked using the // CheckTx abci message before the transaction is added to the pool. The // mempool uses a concurrent list structure for storing transactions that can // be efficiently accessed by multiple concurrent readers. type CListMempool struct { // Atomic integers height int64 // the last block Update()'d to txsBytes int64 // total size of mempool, in bytes // notify listeners (ie. consensus) when txs are available notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty config *cfg.MempoolConfig // Exclusive mutex for Update method to prevent concurrent execution of // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. updateMtx tmsync.RWMutex preCheck PreCheckFunc postCheck PostCheckFunc wal *auto.AutoFile // a log of mempool txs txs *clist.CList // concurrent linked-list of good txs proxyAppConn proxy.AppConnMempool // Track whether we're rechecking txs. // These are not protected by a mutex and are expected to be mutated in // serial (ie. by abci responses which are called in serial). recheckCursor *clist.CElement // next expected response recheckEnd *clist.CElement // re-checking stops here // Map for quick access to txs to record sender in CheckTx. // txsMap: txKey -> CElement txsMap sync.Map // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. cache txCache logger log.Logger metrics *Metrics } var _ Mempool = &CListMempool{} // CListMempoolOption sets an optional parameter on the mempool. type CListMempoolOption func(*CListMempool) // NewCListMempool returns a new mempool with the given configuration and connection to an application. func NewCListMempool( config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64, options ...CListMempoolOption, ) *CListMempool { mempool := &CListMempool{ config: config, proxyAppConn: proxyAppConn, txs: clist.New(), height: height, recheckCursor: nil, recheckEnd: nil, logger: log.NewNopLogger(), metrics: NopMetrics(), } if config.CacheSize > 0 { mempool.cache = newMapTxCache(config.CacheSize) } else { mempool.cache = nopTxCache{} } proxyAppConn.SetResponseCallback(mempool.globalCb) for _, option := range options { option(mempool) } return mempool } // NOTE: not thread safe - should only be called once, on startup func (mem *CListMempool) EnableTxsAvailable() { mem.txsAvailable = make(chan struct{}, 1) } // SetLogger sets the Logger. func (mem *CListMempool) SetLogger(l log.Logger) { mem.logger = l } // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns // false. This is ran before CheckTx. Only applies to the first created block. // After that, Update overwrites the existing value. func WithPreCheck(f PreCheckFunc) CListMempoolOption { return func(mem *CListMempool) { mem.preCheck = f } } // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns // false. This is ran after CheckTx. Only applies to the first created block. // After that, Update overwrites the existing value. func WithPostCheck(f PostCheckFunc) CListMempoolOption { return func(mem *CListMempool) { mem.postCheck = f } } // WithMetrics sets the metrics. func WithMetrics(metrics *Metrics) CListMempoolOption { return func(mem *CListMempool) { mem.metrics = metrics } } func (mem *CListMempool) InitWAL() error { var ( walDir = mem.config.WalDir() walFile = walDir + "/wal" ) const perm = 0700 if err := tmos.EnsureDir(walDir, perm); err != nil { return err } af, err := auto.OpenAutoFile(walFile) if err != nil { return fmt.Errorf("can't open autofile %s: %w", walFile, err) } mem.wal = af return nil } func (mem *CListMempool) CloseWAL() { if err := mem.wal.Close(); err != nil { mem.logger.Error("Error closing WAL", "err", err) } mem.wal = nil } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Lock() { mem.updateMtx.Lock() } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Unlock() { mem.updateMtx.Unlock() } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Size() int { return mem.txs.Len() } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsBytes() int64 { return atomic.LoadInt64(&mem.txsBytes) } // Lock() must be help by the caller during execution. func (mem *CListMempool) FlushAppConn() error { return mem.proxyAppConn.FlushSync(context.Background()) } // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. func (mem *CListMempool) Flush() { mem.updateMtx.RLock() defer mem.updateMtx.RUnlock() _ = atomic.SwapInt64(&mem.txsBytes, 0) mem.cache.Reset() for e := mem.txs.Front(); e != nil; e = e.Next() { mem.txs.Remove(e) e.DetachPrev() } mem.txsMap.Range(func(key, _ interface{}) bool { mem.txsMap.Delete(key) return true }) } // TxsFront returns the first transaction in the ordered list for peer // goroutines to call .NextWait() on. // FIXME: leaking implementation details! // // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsFront() *clist.CElement { return mem.txs.Front() } // TxsWaitChan returns a channel to wait on transactions. It will be closed // once the mempool is not empty (ie. the internal `mem.txs` has at least one // element) // // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsWaitChan() <-chan struct{} { return mem.txs.WaitChan() } // It blocks if we're waiting on Update() or Reap(). // cb: A callback from the CheckTx command. // It gets called from another goroutine. // CONTRACT: Either cb will get called, or err returned. // // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) error { mem.updateMtx.RLock() // use defer to unlock mutex because application (*local client*) might panic defer mem.updateMtx.RUnlock() txSize := len(tx) if err := mem.isFull(txSize); err != nil { return err } if txSize > mem.config.MaxTxBytes { return ErrTxTooLarge{mem.config.MaxTxBytes, txSize} } if mem.preCheck != nil { if err := mem.preCheck(tx); err != nil { return ErrPreCheck{err} } } // NOTE: writing to the WAL and calling proxy must be done before adding tx // to the cache. otherwise, if either of them fails, next time CheckTx is // called with tx, ErrTxInCache will be returned without tx being checked at // all even once. if mem.wal != nil { // TODO: Notify administrators when WAL fails _, err := mem.wal.Write(append([]byte(tx), newline...)) if err != nil { return fmt.Errorf("wal.Write: %w", err) } } // NOTE: proxyAppConn may error if tx buffer is full if err := mem.proxyAppConn.Error(); err != nil { return err } if !mem.cache.Push(tx) { // Record a new sender for a tx we've already seen. // Note it's possible a tx is still in the cache but no longer in the mempool // (eg. after committing a block, txs are removed from mempool but not cache), // so we only record the sender for txs still in the mempool. if e, ok := mem.txsMap.Load(TxKey(tx)); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) memTx.senders.LoadOrStore(txInfo.SenderID, true) // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, // but they can spam the same tx with little cost to them atm. } return ErrTxInCache } ctx := context.Background() if txInfo.Context != nil { ctx = txInfo.Context } reqRes, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) if err != nil { mem.cache.Remove(tx) return err } reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb)) return nil } // Global callback that will be called after every ABCI response. // Having a single global callback avoids needing to set a callback for each request. // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that // include this information. If we're not in the midst of a recheck, this function will just return, // so the request specific callback can do the work. // // When rechecking, we don't need the peerID, so the recheck callback happens // here. func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { return } mem.metrics.RecheckTimes.Add(1) mem.resCbRecheck(req, res) // update metrics mem.metrics.Size.Set(float64(mem.Size())) } // Request specific callback that should be set on individual reqRes objects // to incorporate local information when processing the response. // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. // NOTE: alternatively, we could include this information in the ABCI request itself. // // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called // when all other response processing is complete. // // Used in CheckTx to record PeerID who sent us the tx. func (mem *CListMempool) reqResCb( tx []byte, peerID uint16, peerP2PID p2p.ID, externalCb func(*abci.Response), ) func(res *abci.Response) { return func(res *abci.Response) { if mem.recheckCursor != nil { // this should never happen panic("recheck cursor is not nil in reqResCb") } mem.resCbFirstTime(tx, peerID, peerP2PID, res) // update metrics mem.metrics.Size.Set(float64(mem.Size())) // passed in by the caller of CheckTx, eg. the RPC if externalCb != nil { externalCb(res) } } } // Called from: // - resCbFirstTime (lock not held) if tx is valid func (mem *CListMempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) mem.txsMap.Store(TxKey(memTx.tx), e) atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } // Called from: // - Update (lock held) if tx was committed // - resCbRecheck (lock not held) if tx was invalidated func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() mem.txsMap.Delete(TxKey(tx)) atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) if removeFromCache { mem.cache.Remove(tx) } } // RemoveTxByKey removes a transaction from the mempool by its TxKey index. func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bool) { if e, ok := mem.txsMap.Load(txKey); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) if memTx != nil { mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) } } } func (mem *CListMempool) isFull(txSize int) error { var ( memSize = mem.Size() txsBytes = mem.TxsBytes() ) if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { return ErrMempoolIsFull{ memSize, mem.config.Size, txsBytes, mem.config.MaxTxsBytes, } } return nil } // callback, which is called after the app checked the tx for the first time. // // The case where the app checks the tx for the second and subsequent times is // handled by the resCbRecheck callback. func (mem *CListMempool) resCbFirstTime( tx []byte, peerID uint16, peerP2PID p2p.ID, res *abci.Response, ) { switch r := res.Value.(type) { case *abci.Response_CheckTx: var postCheckErr error if mem.postCheck != nil { postCheckErr = mem.postCheck(tx, r.CheckTx) } if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { // Check mempool isn't full again to reduce the chance of exceeding the // limits. if err := mem.isFull(len(tx)); err != nil { // remove from cache (mempool might have a space later) mem.cache.Remove(tx) mem.logger.Error(err.Error()) return } memTx := &mempoolTx{ height: mem.height, gasWanted: r.CheckTx.GasWanted, tx: tx, } memTx.senders.Store(peerID, true) mem.addTx(memTx) mem.logger.Info("Added good transaction", "tx", txID(tx), "res", r, "height", memTx.height, "total", mem.Size(), ) mem.notifyTxsAvailable() } else { // ignore bad transaction mem.logger.Info("Rejected bad transaction", "tx", txID(tx), "peerID", peerP2PID, "res", r, "err", postCheckErr) mem.metrics.FailedTxs.Add(1) if !mem.config.KeepInvalidTxsInCache { // remove from cache (it might be good later) mem.cache.Remove(tx) } } default: // ignore other messages } } // callback, which is called after the app rechecked the tx. // // The case where the app checks the tx for the first time is handled by the // resCbFirstTime callback. func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: tx := req.GetCheckTx().Tx memTx := mem.recheckCursor.Value.(*mempoolTx) if !bytes.Equal(tx, memTx.tx) { panic(fmt.Sprintf( "Unexpected tx response from proxy during recheck\nExpected %X, got %X", memTx.tx, tx)) } var postCheckErr error if mem.postCheck != nil { postCheckErr = mem.postCheck(tx, r.CheckTx) } if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { // Good, nothing to do. } else { // Tx became invalidated due to newly committed block. mem.logger.Info("Tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr) // NOTE: we remove tx from the cache because it might be good later mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) } if mem.recheckCursor == mem.recheckEnd { mem.recheckCursor = nil } else { mem.recheckCursor = mem.recheckCursor.Next() } if mem.recheckCursor == nil { // Done! mem.logger.Info("Done rechecking txs") // incase the recheck removed all txs if mem.Size() > 0 { mem.notifyTxsAvailable() } } default: // ignore other messages } } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsAvailable() <-chan struct{} { return mem.txsAvailable } func (mem *CListMempool) notifyTxsAvailable() { if mem.Size() == 0 { panic("notified txs available but mempool is empty!") } if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { // channel cap is 1, so this will send once mem.notifiedTxsAvailable = true select { case mem.txsAvailable <- struct{}{}: default: } } } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { mem.updateMtx.RLock() defer mem.updateMtx.RUnlock() var totalGas int64 // TODO: we will get a performance boost if we have a good estimate of avg // size per tx, and set the initial capacity based off of that. // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) txs := make([]types.Tx, 0, mem.txs.Len()) for e := mem.txs.Front(); e != nil; e = e.Next() { memTx := e.Value.(*mempoolTx) dataSize := types.ComputeProtoSizeForTxs(append(txs, memTx.tx)) // Check total size requirement if maxBytes > -1 && dataSize > maxBytes { return txs } // Check total gas requirement. // If maxGas is negative, skip this check. // Since newTotalGas < masGas, which // must be non-negative, it follows that this won't overflow. newTotalGas := totalGas + memTx.gasWanted if maxGas > -1 && newTotalGas > maxGas { return txs } totalGas = newTotalGas txs = append(txs, memTx.tx) } return txs } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { mem.updateMtx.RLock() defer mem.updateMtx.RUnlock() if max < 0 { max = mem.txs.Len() } txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { memTx := e.Value.(*mempoolTx) txs = append(txs, memTx.tx) } return txs } // Lock() must be help by the caller during execution. func (mem *CListMempool) Update( height int64, txs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, preCheck PreCheckFunc, postCheck PostCheckFunc, ) error { // Set height mem.height = height mem.notifiedTxsAvailable = false if preCheck != nil { mem.preCheck = preCheck } if postCheck != nil { mem.postCheck = postCheck } for i, tx := range txs { if deliverTxResponses[i].Code == abci.CodeTypeOK { // Add valid committed tx to the cache (if missing). _ = mem.cache.Push(tx) } else if !mem.config.KeepInvalidTxsInCache { // Allow invalid transactions to be resubmitted. mem.cache.Remove(tx) } // Remove committed tx from the mempool. // // Note an evil proposer can drop valid txs! // Mempool before: // 100 -> 101 -> 102 // Block, proposed by an evil proposer: // 101 -> 102 // Mempool after: // 100 // https://github.com/tendermint/tendermint/issues/3322. if e, ok := mem.txsMap.Load(TxKey(tx)); ok { mem.removeTx(tx, e.(*clist.CElement), false) } } // Either recheck non-committed txs to see if they became invalid // or just notify there're some txs left. if mem.Size() > 0 { if mem.config.Recheck { mem.logger.Info("Recheck txs", "numtxs", mem.Size(), "height", height) mem.recheckTxs() // At this point, mem.txs are being rechecked. // mem.recheckCursor re-scans mem.txs and possibly removes some txs. // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. } else { mem.notifyTxsAvailable() } } // Update metrics mem.metrics.Size.Set(float64(mem.Size())) return nil } func (mem *CListMempool) recheckTxs() { if mem.Size() == 0 { panic("recheckTxs is called, but the mempool is empty") } mem.recheckCursor = mem.txs.Front() mem.recheckEnd = mem.txs.Back() ctx := context.Background() // Push txs to proxyAppConn // NOTE: globalCb may be called concurrently. for e := mem.txs.Front(); e != nil; e = e.Next() { memTx := e.Value.(*mempoolTx) _, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ Tx: memTx.tx, Type: abci.CheckTxType_Recheck, }) if err != nil { // No need in retrying since memTx will be rechecked after next block. mem.logger.Error("Can't check tx", "err", err) } } _, err := mem.proxyAppConn.FlushAsync(ctx) if err != nil { mem.logger.Error("Can't flush txs", "err", err) } } //-------------------------------------------------------------------------------- // mempoolTx is a transaction that successfully ran type mempoolTx struct { height int64 // height that this tx had been validated in gasWanted int64 // amount of gas this tx states it will require tx types.Tx // // ids of peers who've sent us this tx (as a map for quick lookups). // senders: PeerID -> bool senders sync.Map } // Height returns the height for this transaction func (memTx *mempoolTx) Height() int64 { return atomic.LoadInt64(&memTx.height) } //-------------------------------------------------------------------------------- type txCache interface { Reset() Push(tx types.Tx) bool Remove(tx types.Tx) } // mapTxCache maintains a LRU cache of transactions. This only stores the hash // of the tx, due to memory concerns. type mapTxCache struct { mtx tmsync.Mutex size int cacheMap map[[TxKeySize]byte]*list.Element list *list.List } var _ txCache = (*mapTxCache)(nil) // newMapTxCache returns a new mapTxCache. func newMapTxCache(cacheSize int) *mapTxCache { return &mapTxCache{ size: cacheSize, cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), list: list.New(), } } // Reset resets the cache to an empty state. func (cache *mapTxCache) Reset() { cache.mtx.Lock() cache.cacheMap = make(map[[TxKeySize]byte]*list.Element, cache.size) cache.list.Init() cache.mtx.Unlock() } // Push adds the given tx to the cache and returns true. It returns // false if tx is already in the cache. func (cache *mapTxCache) Push(tx types.Tx) bool { cache.mtx.Lock() defer cache.mtx.Unlock() // Use the tx hash in the cache txHash := TxKey(tx) if moved, exists := cache.cacheMap[txHash]; exists { cache.list.MoveToBack(moved) return false } if cache.list.Len() >= cache.size { popped := cache.list.Front() if popped != nil { poppedTxHash := popped.Value.([TxKeySize]byte) delete(cache.cacheMap, poppedTxHash) cache.list.Remove(popped) } } e := cache.list.PushBack(txHash) cache.cacheMap[txHash] = e return true } // Remove removes the given tx from the cache. func (cache *mapTxCache) Remove(tx types.Tx) { cache.mtx.Lock() txHash := TxKey(tx) popped := cache.cacheMap[txHash] delete(cache.cacheMap, txHash) if popped != nil { cache.list.Remove(popped) } cache.mtx.Unlock() } type nopTxCache struct{} var _ txCache = (*nopTxCache)(nil) func (nopTxCache) Reset() {} func (nopTxCache) Push(types.Tx) bool { return true } func (nopTxCache) Remove(types.Tx) {} //-------------------------------------------------------------------------------- // TxKey is the fixed length array hash used as the key in maps. func TxKey(tx types.Tx) [TxKeySize]byte { return sha256.Sum256(tx) } // txID is the hex encoded hash of the bytes as a types.Tx. func txID(tx []byte) string { return fmt.Sprintf("%X", types.Tx(tx).Hash()) }