You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

433 lines
11 KiB

  1. package v1
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "math/rand"
  7. "os"
  8. "sort"
  9. "strconv"
  10. "sync"
  11. "testing"
  12. "time"
  13. "github.com/stretchr/testify/require"
  14. "github.com/tendermint/tendermint/abci/example/code"
  15. "github.com/tendermint/tendermint/abci/example/kvstore"
  16. abci "github.com/tendermint/tendermint/abci/types"
  17. "github.com/tendermint/tendermint/config"
  18. "github.com/tendermint/tendermint/internal/mempool"
  19. "github.com/tendermint/tendermint/libs/log"
  20. "github.com/tendermint/tendermint/proxy"
  21. "github.com/tendermint/tendermint/types"
  22. )
  23. // application extends the KV store application by overriding CheckTx to provide
  24. // transaction priority based on the value in the key/value pair.
  25. type application struct {
  26. *kvstore.Application
  27. }
  28. type testTx struct {
  29. tx types.Tx
  30. priority int64
  31. }
  32. func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
  33. var (
  34. priority int64
  35. sender string
  36. )
  37. // infer the priority from the raw transaction value (sender=key=value)
  38. parts := bytes.Split(req.Tx, []byte("="))
  39. if len(parts) == 3 {
  40. v, err := strconv.ParseInt(string(parts[2]), 10, 64)
  41. if err != nil {
  42. return abci.ResponseCheckTx{
  43. Priority: priority,
  44. Code: 100,
  45. GasWanted: 1,
  46. }
  47. }
  48. priority = v
  49. sender = string(parts[0])
  50. } else {
  51. return abci.ResponseCheckTx{
  52. Priority: priority,
  53. Code: 101,
  54. GasWanted: 1,
  55. }
  56. }
  57. return abci.ResponseCheckTx{
  58. Priority: priority,
  59. Sender: sender,
  60. Code: code.CodeTypeOK,
  61. GasWanted: 1,
  62. }
  63. }
  64. func setup(t testing.TB, cacheSize int) *TxMempool {
  65. t.Helper()
  66. app := &application{kvstore.NewApplication()}
  67. cc := proxy.NewLocalClientCreator(app)
  68. cfg := config.ResetTestRoot(t.Name())
  69. cfg.Mempool.CacheSize = cacheSize
  70. appConnMem, err := cc.NewABCIClient()
  71. require.NoError(t, err)
  72. require.NoError(t, appConnMem.Start())
  73. t.Cleanup(func() {
  74. os.RemoveAll(cfg.RootDir)
  75. require.NoError(t, appConnMem.Stop())
  76. })
  77. return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0)
  78. }
  79. func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
  80. txs := make([]testTx, numTxs)
  81. txInfo := mempool.TxInfo{SenderID: peerID}
  82. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  83. for i := 0; i < numTxs; i++ {
  84. prefix := make([]byte, 20)
  85. _, err := rng.Read(prefix)
  86. require.NoError(t, err)
  87. // sender := make([]byte, 10)
  88. // _, err = rng.Read(sender)
  89. // require.NoError(t, err)
  90. priority := int64(rng.Intn(9999-1000) + 1000)
  91. txs[i] = testTx{
  92. tx: []byte(fmt.Sprintf("sender-%d=%X=%d", i, prefix, priority)),
  93. priority: priority,
  94. }
  95. require.NoError(t, txmp.CheckTx(context.Background(), txs[i].tx, nil, txInfo))
  96. }
  97. return txs
  98. }
  99. func TestTxMempool_TxsAvailable(t *testing.T) {
  100. txmp := setup(t, 0)
  101. txmp.EnableTxsAvailable()
  102. ensureNoTxFire := func() {
  103. timer := time.NewTimer(500 * time.Millisecond)
  104. select {
  105. case <-txmp.TxsAvailable():
  106. require.Fail(t, "unexpected transactions event")
  107. case <-timer.C:
  108. }
  109. }
  110. ensureTxFire := func() {
  111. timer := time.NewTimer(500 * time.Millisecond)
  112. select {
  113. case <-txmp.TxsAvailable():
  114. case <-timer.C:
  115. require.Fail(t, "expected transactions event")
  116. }
  117. }
  118. // ensure no event as we have not executed any transactions yet
  119. ensureNoTxFire()
  120. // Execute CheckTx for some transactions and ensure TxsAvailable only fires
  121. // once.
  122. txs := checkTxs(t, txmp, 100, 0)
  123. ensureTxFire()
  124. ensureNoTxFire()
  125. rawTxs := make([]types.Tx, len(txs))
  126. for i, tx := range txs {
  127. rawTxs[i] = tx.tx
  128. }
  129. responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
  130. for i := 0; i < len(responses); i++ {
  131. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  132. }
  133. // commit half the transactions and ensure we fire an event
  134. txmp.Lock()
  135. require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
  136. txmp.Unlock()
  137. ensureTxFire()
  138. ensureNoTxFire()
  139. // Execute CheckTx for more transactions and ensure we do not fire another
  140. // event as we're still on the same height (1).
  141. _ = checkTxs(t, txmp, 100, 0)
  142. ensureNoTxFire()
  143. }
  144. func TestTxMempool_Size(t *testing.T) {
  145. txmp := setup(t, 0)
  146. txs := checkTxs(t, txmp, 100, 0)
  147. require.Equal(t, len(txs), txmp.Size())
  148. require.Equal(t, int64(5490), txmp.SizeBytes())
  149. rawTxs := make([]types.Tx, len(txs))
  150. for i, tx := range txs {
  151. rawTxs[i] = tx.tx
  152. }
  153. responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
  154. for i := 0; i < len(responses); i++ {
  155. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  156. }
  157. txmp.Lock()
  158. require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
  159. txmp.Unlock()
  160. require.Equal(t, len(rawTxs)/2, txmp.Size())
  161. require.Equal(t, int64(2750), txmp.SizeBytes())
  162. }
  163. func TestTxMempool_Flush(t *testing.T) {
  164. txmp := setup(t, 0)
  165. txs := checkTxs(t, txmp, 100, 0)
  166. require.Equal(t, len(txs), txmp.Size())
  167. require.Equal(t, int64(5490), txmp.SizeBytes())
  168. rawTxs := make([]types.Tx, len(txs))
  169. for i, tx := range txs {
  170. rawTxs[i] = tx.tx
  171. }
  172. responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
  173. for i := 0; i < len(responses); i++ {
  174. responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
  175. }
  176. txmp.Lock()
  177. require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
  178. txmp.Unlock()
  179. txmp.Flush()
  180. require.Zero(t, txmp.Size())
  181. require.Equal(t, int64(0), txmp.SizeBytes())
  182. }
  183. func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
  184. txmp := setup(t, 0)
  185. tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
  186. require.Equal(t, len(tTxs), txmp.Size())
  187. require.Equal(t, int64(5490), txmp.SizeBytes())
  188. txMap := make(map[[mempool.TxKeySize]byte]testTx)
  189. priorities := make([]int64, len(tTxs))
  190. for i, tTx := range tTxs {
  191. txMap[mempool.TxKey(tTx.tx)] = tTx
  192. priorities[i] = tTx.priority
  193. }
  194. sort.Slice(priorities, func(i, j int) bool {
  195. // sort by priority, i.e. decreasing order
  196. return priorities[i] > priorities[j]
  197. })
  198. ensurePrioritized := func(reapedTxs types.Txs) {
  199. reapedPriorities := make([]int64, len(reapedTxs))
  200. for i, rTx := range reapedTxs {
  201. reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority
  202. }
  203. require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
  204. }
  205. // reap by gas capacity only
  206. reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
  207. ensurePrioritized(reapedTxs)
  208. require.Equal(t, len(tTxs), txmp.Size())
  209. require.Equal(t, int64(5490), txmp.SizeBytes())
  210. require.Len(t, reapedTxs, 50)
  211. // reap by transaction bytes only
  212. reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
  213. ensurePrioritized(reapedTxs)
  214. require.Equal(t, len(tTxs), txmp.Size())
  215. require.Equal(t, int64(5490), txmp.SizeBytes())
  216. require.Len(t, reapedTxs, 17)
  217. // Reap by both transaction bytes and gas, where the size yields 31 reaped
  218. // transactions and the gas limit reaps 26 transactions.
  219. reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
  220. ensurePrioritized(reapedTxs)
  221. require.Equal(t, len(tTxs), txmp.Size())
  222. require.Equal(t, int64(5490), txmp.SizeBytes())
  223. require.Len(t, reapedTxs, 26)
  224. }
  225. func TestTxMempool_ReapMaxTxs(t *testing.T) {
  226. txmp := setup(t, 0)
  227. tTxs := checkTxs(t, txmp, 100, 0)
  228. require.Equal(t, len(tTxs), txmp.Size())
  229. require.Equal(t, int64(5490), txmp.SizeBytes())
  230. txMap := make(map[[mempool.TxKeySize]byte]testTx)
  231. priorities := make([]int64, len(tTxs))
  232. for i, tTx := range tTxs {
  233. txMap[mempool.TxKey(tTx.tx)] = tTx
  234. priorities[i] = tTx.priority
  235. }
  236. sort.Slice(priorities, func(i, j int) bool {
  237. // sort by priority, i.e. decreasing order
  238. return priorities[i] > priorities[j]
  239. })
  240. ensurePrioritized := func(reapedTxs types.Txs) {
  241. reapedPriorities := make([]int64, len(reapedTxs))
  242. for i, rTx := range reapedTxs {
  243. reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority
  244. }
  245. require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
  246. }
  247. // reap all transactions
  248. reapedTxs := txmp.ReapMaxTxs(-1)
  249. ensurePrioritized(reapedTxs)
  250. require.Equal(t, len(tTxs), txmp.Size())
  251. require.Equal(t, int64(5490), txmp.SizeBytes())
  252. require.Len(t, reapedTxs, len(tTxs))
  253. // reap a single transaction
  254. reapedTxs = txmp.ReapMaxTxs(1)
  255. ensurePrioritized(reapedTxs)
  256. require.Equal(t, len(tTxs), txmp.Size())
  257. require.Equal(t, int64(5490), txmp.SizeBytes())
  258. require.Len(t, reapedTxs, 1)
  259. // reap half of the transactions
  260. reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
  261. ensurePrioritized(reapedTxs)
  262. require.Equal(t, len(tTxs), txmp.Size())
  263. require.Equal(t, int64(5490), txmp.SizeBytes())
  264. require.Len(t, reapedTxs, len(tTxs)/2)
  265. }
  266. func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
  267. txmp := setup(t, 0)
  268. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  269. tx := make([]byte, txmp.config.MaxTxsBytes+1)
  270. _, err := rng.Read(tx)
  271. require.NoError(t, err)
  272. require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0}))
  273. }
  274. func TestTxMempool_CheckTxSamePeer(t *testing.T) {
  275. txmp := setup(t, 100)
  276. peerID := uint16(1)
  277. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  278. prefix := make([]byte, 20)
  279. _, err := rng.Read(prefix)
  280. require.NoError(t, err)
  281. tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50))
  282. require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID}))
  283. require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID}))
  284. }
  285. func TestTxMempool_CheckTxSameSender(t *testing.T) {
  286. txmp := setup(t, 100)
  287. peerID := uint16(1)
  288. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  289. prefix1 := make([]byte, 20)
  290. _, err := rng.Read(prefix1)
  291. require.NoError(t, err)
  292. prefix2 := make([]byte, 20)
  293. _, err = rng.Read(prefix2)
  294. require.NoError(t, err)
  295. tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50))
  296. tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50))
  297. require.NoError(t, txmp.CheckTx(context.Background(), tx1, nil, mempool.TxInfo{SenderID: peerID}))
  298. require.Equal(t, 1, txmp.Size())
  299. require.NoError(t, txmp.CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: peerID}))
  300. require.Equal(t, 1, txmp.Size())
  301. }
  302. func TestTxMempool_ConcurrentTxs(t *testing.T) {
  303. txmp := setup(t, 100)
  304. rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  305. checkTxDone := make(chan struct{})
  306. var wg sync.WaitGroup
  307. wg.Add(1)
  308. go func() {
  309. for i := 0; i < 20; i++ {
  310. _ = checkTxs(t, txmp, 100, 0)
  311. dur := rng.Intn(1000-500) + 500
  312. time.Sleep(time.Duration(dur) * time.Millisecond)
  313. }
  314. wg.Done()
  315. close(checkTxDone)
  316. }()
  317. wg.Add(1)
  318. go func() {
  319. ticker := time.NewTicker(time.Second)
  320. defer ticker.Stop()
  321. defer wg.Done()
  322. var height int64 = 1
  323. for range ticker.C {
  324. reapedTxs := txmp.ReapMaxTxs(200)
  325. if len(reapedTxs) > 0 {
  326. responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
  327. for i := 0; i < len(responses); i++ {
  328. var code uint32
  329. if i%10 == 0 {
  330. code = 100
  331. } else {
  332. code = abci.CodeTypeOK
  333. }
  334. responses[i] = &abci.ResponseDeliverTx{Code: code}
  335. }
  336. txmp.Lock()
  337. require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil))
  338. txmp.Unlock()
  339. height++
  340. } else {
  341. // only return once we know we finished the CheckTx loop
  342. select {
  343. case <-checkTxDone:
  344. return
  345. default:
  346. }
  347. }
  348. }
  349. }()
  350. wg.Wait()
  351. require.Zero(t, txmp.Size())
  352. require.Zero(t, txmp.SizeBytes())
  353. }