You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

578 lines
15 KiB

  1. package mempool
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "math/rand"
  8. "os"
  9. "sort"
  10. "strconv"
  11. "strings"
  12. "sync"
  13. "testing"
  14. "time"
  15. "github.com/stretchr/testify/require"
  16. abciclient "github.com/tendermint/tendermint/abci/client"
  17. "github.com/tendermint/tendermint/abci/example/code"
  18. "github.com/tendermint/tendermint/abci/example/kvstore"
  19. abci "github.com/tendermint/tendermint/abci/types"
  20. "github.com/tendermint/tendermint/config"
  21. "github.com/tendermint/tendermint/libs/log"
  22. "github.com/tendermint/tendermint/types"
  23. )
  24. // application extends the KV store application by overriding CheckTx to provide
  25. // transaction priority based on the value in the key/value pair.
  26. type application struct {
  27. *kvstore.Application
  28. }
  29. type testTx struct {
  30. tx types.Tx
  31. priority int64
  32. }
  33. func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
  34. var (
  35. priority int64
  36. sender string
  37. )
  38. // infer the priority from the raw transaction value (sender=key=value)
  39. parts := bytes.Split(req.Tx, []byte("="))
  40. if len(parts) == 3 {
  41. v, err := strconv.ParseInt(string(parts[2]), 10, 64)
  42. if err != nil {
  43. return abci.ResponseCheckTx{
  44. Priority: priority,
  45. Code: 100,
  46. GasWanted: 1,
  47. }
  48. }
  49. priority = v
  50. sender = string(parts[0])
  51. } else {
  52. return abci.ResponseCheckTx{
  53. Priority: priority,
  54. Code: 101,
  55. GasWanted: 1,
  56. }
  57. }
  58. return abci.ResponseCheckTx{
  59. Priority: priority,
  60. Sender: sender,
  61. Code: code.CodeTypeOK,
  62. GasWanted: 1,
  63. }
  64. }
  65. func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
  66. t.Helper()
  67. var cancel context.CancelFunc
  68. ctx, cancel = context.WithCancel(ctx)
  69. app := &application{kvstore.NewApplication()}
  70. cc := abciclient.NewLocalCreator(app)
  71. logger := log.TestingLogger()
  72. cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|"))
  73. require.NoError(t, err)
  74. cfg.Mempool.CacheSize = cacheSize
  75. appConnMem, err := cc(logger)
  76. require.NoError(t, err)
  77. require.NoError(t, appConnMem.Start(ctx))
  78. t.Cleanup(func() {
  79. os.RemoveAll(cfg.RootDir)
  80. cancel()
  81. appConnMem.Wait()
  82. })
  83. return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
  84. }
  85. func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
  86. t.Helper()
  87. txs := make([]testTx, numTxs)
  88. txInfo := TxInfo{SenderID: peerID}
  89. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  90. for i := 0; i < numTxs; i++ {
  91. prefix := make([]byte, 20)
  92. _, err := rng.Read(prefix)
  93. require.NoError(t, err)
  94. priority := int64(rng.Intn(9999-1000) + 1000)
  95. txs[i] = testTx{
  96. tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
  97. priority: priority,
  98. }
  99. require.NoError(t, txmp.CheckTx(ctx, txs[i].tx, nil, txInfo))
  100. }
  101. return txs
  102. }
  103. func convertTex(in []testTx) types.Txs {
  104. out := make([]types.Tx, len(in))
  105. for idx := range in {
  106. out[idx] = in[idx].tx
  107. }
  108. return out
  109. }
  110. func TestTxMempool_TxsAvailable(t *testing.T) {
  111. ctx, cancel := context.WithCancel(context.Background())
  112. defer cancel()
  113. txmp := setup(ctx, t, 0)
  114. txmp.EnableTxsAvailable()
  115. ensureNoTxFire := func() {
  116. timer := time.NewTimer(500 * time.Millisecond)
  117. select {
  118. case <-txmp.TxsAvailable():
  119. require.Fail(t, "unexpected transactions event")
  120. case <-timer.C:
  121. }
  122. }
  123. ensureTxFire := func() {
  124. timer := time.NewTimer(500 * time.Millisecond)
  125. select {
  126. case <-txmp.TxsAvailable():
  127. case <-timer.C:
  128. require.Fail(t, "expected transactions event")
  129. }
  130. }
  131. // ensure no event as we have not executed any transactions yet
  132. ensureNoTxFire()
  133. // Execute CheckTx for some transactions and ensure TxsAvailable only fires
  134. // once.
  135. txs := checkTxs(ctx, t, txmp, 100, 0)
  136. ensureTxFire()
  137. ensureNoTxFire()
  138. rawTxs := make([]types.Tx, len(txs))
  139. for i, tx := range txs {
  140. rawTxs[i] = tx.tx
  141. }
  142. responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
  143. for i := 0; i < len(responses); i++ {
  144. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  145. }
  146. // commit half the transactions and ensure we fire an event
  147. txmp.Lock()
  148. require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil))
  149. txmp.Unlock()
  150. ensureTxFire()
  151. ensureNoTxFire()
  152. // Execute CheckTx for more transactions and ensure we do not fire another
  153. // event as we're still on the same height (1).
  154. _ = checkTxs(ctx, t, txmp, 100, 0)
  155. ensureNoTxFire()
  156. }
  157. func TestTxMempool_Size(t *testing.T) {
  158. ctx, cancel := context.WithCancel(context.Background())
  159. defer cancel()
  160. txmp := setup(ctx, t, 0)
  161. txs := checkTxs(ctx, t, txmp, 100, 0)
  162. require.Equal(t, len(txs), txmp.Size())
  163. require.Equal(t, int64(5690), txmp.SizeBytes())
  164. rawTxs := make([]types.Tx, len(txs))
  165. for i, tx := range txs {
  166. rawTxs[i] = tx.tx
  167. }
  168. responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
  169. for i := 0; i < len(responses); i++ {
  170. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  171. }
  172. txmp.Lock()
  173. require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil))
  174. txmp.Unlock()
  175. require.Equal(t, len(rawTxs)/2, txmp.Size())
  176. require.Equal(t, int64(2850), txmp.SizeBytes())
  177. }
  178. func TestTxMempool_Flush(t *testing.T) {
  179. ctx, cancel := context.WithCancel(context.Background())
  180. defer cancel()
  181. txmp := setup(ctx, t, 0)
  182. txs := checkTxs(ctx, t, txmp, 100, 0)
  183. require.Equal(t, len(txs), txmp.Size())
  184. require.Equal(t, int64(5690), txmp.SizeBytes())
  185. rawTxs := make([]types.Tx, len(txs))
  186. for i, tx := range txs {
  187. rawTxs[i] = tx.tx
  188. }
  189. responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
  190. for i := 0; i < len(responses); i++ {
  191. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  192. }
  193. txmp.Lock()
  194. require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil))
  195. txmp.Unlock()
  196. txmp.Flush()
  197. require.Zero(t, txmp.Size())
  198. require.Equal(t, int64(0), txmp.SizeBytes())
  199. }
  200. func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
  201. ctx, cancel := context.WithCancel(context.Background())
  202. defer cancel()
  203. txmp := setup(ctx, t, 0)
  204. tTxs := checkTxs(ctx, t, txmp, 100, 0) // all txs request 1 gas unit
  205. require.Equal(t, len(tTxs), txmp.Size())
  206. require.Equal(t, int64(5690), txmp.SizeBytes())
  207. txMap := make(map[types.TxKey]testTx)
  208. priorities := make([]int64, len(tTxs))
  209. for i, tTx := range tTxs {
  210. txMap[tTx.tx.Key()] = tTx
  211. priorities[i] = tTx.priority
  212. }
  213. sort.Slice(priorities, func(i, j int) bool {
  214. // sort by priority, i.e. decreasing order
  215. return priorities[i] > priorities[j]
  216. })
  217. ensurePrioritized := func(reapedTxs types.Txs) {
  218. reapedPriorities := make([]int64, len(reapedTxs))
  219. for i, rTx := range reapedTxs {
  220. reapedPriorities[i] = txMap[rTx.Key()].priority
  221. }
  222. require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
  223. }
  224. // reap by gas capacity only
  225. reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
  226. ensurePrioritized(reapedTxs)
  227. require.Equal(t, len(tTxs), txmp.Size())
  228. require.Equal(t, int64(5690), txmp.SizeBytes())
  229. require.Len(t, reapedTxs, 50)
  230. // reap by transaction bytes only
  231. reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
  232. ensurePrioritized(reapedTxs)
  233. require.Equal(t, len(tTxs), txmp.Size())
  234. require.Equal(t, int64(5690), txmp.SizeBytes())
  235. require.GreaterOrEqual(t, len(reapedTxs), 16)
  236. // Reap by both transaction bytes and gas, where the size yields 31 reaped
  237. // transactions and the gas limit reaps 25 transactions.
  238. reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
  239. ensurePrioritized(reapedTxs)
  240. require.Equal(t, len(tTxs), txmp.Size())
  241. require.Equal(t, int64(5690), txmp.SizeBytes())
  242. require.Len(t, reapedTxs, 25)
  243. }
  244. func TestTxMempool_ReapMaxTxs(t *testing.T) {
  245. ctx, cancel := context.WithCancel(context.Background())
  246. defer cancel()
  247. txmp := setup(ctx, t, 0)
  248. tTxs := checkTxs(ctx, t, txmp, 100, 0)
  249. require.Equal(t, len(tTxs), txmp.Size())
  250. require.Equal(t, int64(5690), txmp.SizeBytes())
  251. txMap := make(map[types.TxKey]testTx)
  252. priorities := make([]int64, len(tTxs))
  253. for i, tTx := range tTxs {
  254. txMap[tTx.tx.Key()] = tTx
  255. priorities[i] = tTx.priority
  256. }
  257. sort.Slice(priorities, func(i, j int) bool {
  258. // sort by priority, i.e. decreasing order
  259. return priorities[i] > priorities[j]
  260. })
  261. ensurePrioritized := func(reapedTxs types.Txs) {
  262. reapedPriorities := make([]int64, len(reapedTxs))
  263. for i, rTx := range reapedTxs {
  264. reapedPriorities[i] = txMap[rTx.Key()].priority
  265. }
  266. require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
  267. }
  268. // reap all transactions
  269. reapedTxs := txmp.ReapMaxTxs(-1)
  270. ensurePrioritized(reapedTxs)
  271. require.Equal(t, len(tTxs), txmp.Size())
  272. require.Equal(t, int64(5690), txmp.SizeBytes())
  273. require.Len(t, reapedTxs, len(tTxs))
  274. // reap a single transaction
  275. reapedTxs = txmp.ReapMaxTxs(1)
  276. ensurePrioritized(reapedTxs)
  277. require.Equal(t, len(tTxs), txmp.Size())
  278. require.Equal(t, int64(5690), txmp.SizeBytes())
  279. require.Len(t, reapedTxs, 1)
  280. // reap half of the transactions
  281. reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
  282. ensurePrioritized(reapedTxs)
  283. require.Equal(t, len(tTxs), txmp.Size())
  284. require.Equal(t, int64(5690), txmp.SizeBytes())
  285. require.Len(t, reapedTxs, len(tTxs)/2)
  286. }
  287. func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
  288. ctx, cancel := context.WithCancel(context.Background())
  289. defer cancel()
  290. txmp := setup(ctx, t, 0)
  291. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  292. tx := make([]byte, txmp.config.MaxTxBytes+1)
  293. _, err := rng.Read(tx)
  294. require.NoError(t, err)
  295. require.Error(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: 0}))
  296. tx = make([]byte, txmp.config.MaxTxBytes-1)
  297. _, err = rng.Read(tx)
  298. require.NoError(t, err)
  299. require.NoError(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: 0}))
  300. }
  301. func TestTxMempool_CheckTxSamePeer(t *testing.T) {
  302. ctx, cancel := context.WithCancel(context.Background())
  303. defer cancel()
  304. txmp := setup(ctx, t, 100)
  305. peerID := uint16(1)
  306. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  307. prefix := make([]byte, 20)
  308. _, err := rng.Read(prefix)
  309. require.NoError(t, err)
  310. tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50))
  311. require.NoError(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: peerID}))
  312. require.Error(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: peerID}))
  313. }
  314. func TestTxMempool_CheckTxSameSender(t *testing.T) {
  315. ctx, cancel := context.WithCancel(context.Background())
  316. defer cancel()
  317. txmp := setup(ctx, t, 100)
  318. peerID := uint16(1)
  319. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  320. prefix1 := make([]byte, 20)
  321. _, err := rng.Read(prefix1)
  322. require.NoError(t, err)
  323. prefix2 := make([]byte, 20)
  324. _, err = rng.Read(prefix2)
  325. require.NoError(t, err)
  326. tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50))
  327. tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50))
  328. require.NoError(t, txmp.CheckTx(ctx, tx1, nil, TxInfo{SenderID: peerID}))
  329. require.Equal(t, 1, txmp.Size())
  330. require.NoError(t, txmp.CheckTx(ctx, tx2, nil, TxInfo{SenderID: peerID}))
  331. require.Equal(t, 1, txmp.Size())
  332. }
  333. func TestTxMempool_ConcurrentTxs(t *testing.T) {
  334. ctx, cancel := context.WithCancel(context.Background())
  335. defer cancel()
  336. txmp := setup(ctx, t, 100)
  337. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  338. checkTxDone := make(chan struct{})
  339. var wg sync.WaitGroup
  340. wg.Add(1)
  341. go func() {
  342. for i := 0; i < 20; i++ {
  343. _ = checkTxs(ctx, t, txmp, 100, 0)
  344. dur := rng.Intn(1000-500) + 500
  345. time.Sleep(time.Duration(dur) * time.Millisecond)
  346. }
  347. wg.Done()
  348. close(checkTxDone)
  349. }()
  350. wg.Add(1)
  351. go func() {
  352. ticker := time.NewTicker(time.Second)
  353. defer ticker.Stop()
  354. defer wg.Done()
  355. var height int64 = 1
  356. for range ticker.C {
  357. reapedTxs := txmp.ReapMaxTxs(200)
  358. if len(reapedTxs) > 0 {
  359. responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
  360. for i := 0; i < len(responses); i++ {
  361. var code uint32
  362. if i%10 == 0 {
  363. code = 100
  364. } else {
  365. code = abci.CodeTypeOK
  366. }
  367. responses[i] = &abci.ResponseDeliverTx{Code: code}
  368. }
  369. txmp.Lock()
  370. require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil))
  371. txmp.Unlock()
  372. height++
  373. } else {
  374. // only return once we know we finished the CheckTx loop
  375. select {
  376. case <-checkTxDone:
  377. return
  378. default:
  379. }
  380. }
  381. }
  382. }()
  383. wg.Wait()
  384. require.Zero(t, txmp.Size())
  385. require.Zero(t, txmp.SizeBytes())
  386. }
  387. func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
  388. ctx, cancel := context.WithCancel(context.Background())
  389. defer cancel()
  390. txmp := setup(ctx, t, 500)
  391. txmp.height = 100
  392. txmp.config.TTLNumBlocks = 10
  393. tTxs := checkTxs(ctx, t, txmp, 100, 0)
  394. require.Equal(t, len(tTxs), txmp.Size())
  395. require.Equal(t, 100, txmp.heightIndex.Size())
  396. // reap 5 txs at the next height -- no txs should expire
  397. reapedTxs := txmp.ReapMaxTxs(5)
  398. responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
  399. for i := 0; i < len(responses); i++ {
  400. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  401. }
  402. txmp.Lock()
  403. require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil))
  404. txmp.Unlock()
  405. require.Equal(t, 95, txmp.Size())
  406. require.Equal(t, 95, txmp.heightIndex.Size())
  407. // check more txs at height 101
  408. _ = checkTxs(ctx, t, txmp, 50, 1)
  409. require.Equal(t, 145, txmp.Size())
  410. require.Equal(t, 145, txmp.heightIndex.Size())
  411. // Reap 5 txs at a height that would expire all the transactions from before
  412. // the previous Update (height 100).
  413. //
  414. // NOTE: When we reap txs below, we do not know if we're picking txs from the
  415. // initial CheckTx calls or from the second round of CheckTx calls. Thus, we
  416. // cannot guarantee that all 95 txs are remaining that should be expired and
  417. // removed. However, we do know that that at most 95 txs can be expired and
  418. // removed.
  419. reapedTxs = txmp.ReapMaxTxs(5)
  420. responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
  421. for i := 0; i < len(responses); i++ {
  422. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  423. }
  424. txmp.Lock()
  425. require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil))
  426. txmp.Unlock()
  427. require.GreaterOrEqual(t, txmp.Size(), 45)
  428. require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
  429. }
  430. func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
  431. ctx, cancel := context.WithCancel(context.Background())
  432. defer cancel()
  433. cases := []struct {
  434. name string
  435. err error
  436. }{
  437. {
  438. name: "error",
  439. err: errors.New("test error"),
  440. },
  441. {
  442. name: "no error",
  443. err: nil,
  444. },
  445. }
  446. for _, tc := range cases {
  447. testCase := tc
  448. t.Run(testCase.name, func(t *testing.T) {
  449. ctx, cancel := context.WithCancel(ctx)
  450. defer cancel()
  451. postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
  452. return testCase.err
  453. }
  454. txmp := setup(ctx, t, 0, WithPostCheck(postCheckFn))
  455. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  456. tx := make([]byte, txmp.config.MaxTxBytes-1)
  457. _, err := rng.Read(tx)
  458. require.NoError(t, err)
  459. callback := func(res *abci.ResponseCheckTx) {
  460. expectedErrString := ""
  461. if testCase.err != nil {
  462. expectedErrString = testCase.err.Error()
  463. }
  464. require.Equal(t, expectedErrString, res.MempoolError)
  465. }
  466. require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0}))
  467. })
  468. }
  469. }