You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

463 lines
14 KiB

7 years ago
8 years ago
8 years ago
  1. package mempool
  2. import (
  3. "crypto/md5"
  4. "crypto/rand"
  5. "encoding/binary"
  6. "fmt"
  7. "io/ioutil"
  8. "os"
  9. "path/filepath"
  10. "testing"
  11. "time"
  12. "github.com/stretchr/testify/assert"
  13. "github.com/stretchr/testify/require"
  14. amino "github.com/tendermint/go-amino"
  15. "github.com/tendermint/tendermint/abci/example/counter"
  16. "github.com/tendermint/tendermint/abci/example/kvstore"
  17. abci "github.com/tendermint/tendermint/abci/types"
  18. cfg "github.com/tendermint/tendermint/config"
  19. cmn "github.com/tendermint/tendermint/libs/common"
  20. "github.com/tendermint/tendermint/libs/log"
  21. "github.com/tendermint/tendermint/proxy"
  22. "github.com/tendermint/tendermint/types"
  23. )
  24. func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
  25. config := cfg.ResetTestRoot("mempool_test")
  26. appConnMem, _ := cc.NewABCIClient()
  27. appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
  28. err := appConnMem.Start()
  29. if err != nil {
  30. panic(err)
  31. }
  32. mempool := NewMempool(config.Mempool, appConnMem, 0)
  33. mempool.SetLogger(log.TestingLogger())
  34. return mempool
  35. }
  36. func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
  37. timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
  38. select {
  39. case <-ch:
  40. t.Fatal("Expected not to fire")
  41. case <-timer.C:
  42. }
  43. }
  44. func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
  45. timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
  46. select {
  47. case <-ch:
  48. case <-timer.C:
  49. t.Fatal("Expected to fire")
  50. }
  51. }
  52. func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
  53. txs := make(types.Txs, count)
  54. for i := 0; i < count; i++ {
  55. txBytes := make([]byte, 20)
  56. txs[i] = txBytes
  57. _, err := rand.Read(txBytes)
  58. if err != nil {
  59. t.Error(err)
  60. }
  61. if err := mempool.CheckTx(txBytes, nil); err != nil {
  62. // Skip invalid txs.
  63. // TestMempoolFilters will fail otherwise. It asserts a number of txs
  64. // returned.
  65. if IsPreCheckError(err) {
  66. continue
  67. }
  68. t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i)
  69. }
  70. }
  71. return txs
  72. }
  73. func TestReapMaxBytesMaxGas(t *testing.T) {
  74. app := kvstore.NewKVStoreApplication()
  75. cc := proxy.NewLocalClientCreator(app)
  76. mempool := newMempoolWithApp(cc)
  77. // Ensure gas calculation behaves as expected
  78. checkTxs(t, mempool, 1)
  79. tx0 := mempool.TxsFront().Value.(*mempoolTx)
  80. // assert that kv store has gas wanted = 1.
  81. require.Equal(t, app.CheckTx(tx0.tx).GasWanted, int64(1), "KVStore had a gas value neq to 1")
  82. require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
  83. // ensure each tx is 20 bytes long
  84. require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")
  85. mempool.Flush()
  86. // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
  87. // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas
  88. tests := []struct {
  89. numTxsToCreate int
  90. maxBytes int64
  91. maxGas int64
  92. expectedNumTxs int
  93. }{
  94. {20, -1, -1, 20},
  95. {20, -1, 0, 0},
  96. {20, -1, 10, 10},
  97. {20, -1, 30, 20},
  98. {20, 0, -1, 0},
  99. {20, 0, 10, 0},
  100. {20, 10, 10, 0},
  101. {20, 22, 10, 1},
  102. {20, 220, -1, 10},
  103. {20, 220, 5, 5},
  104. {20, 220, 10, 10},
  105. {20, 220, 15, 10},
  106. {20, 20000, -1, 20},
  107. {20, 20000, 5, 5},
  108. {20, 20000, 30, 20},
  109. }
  110. for tcIndex, tt := range tests {
  111. checkTxs(t, mempool, tt.numTxsToCreate)
  112. got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
  113. assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d",
  114. len(got), tt.expectedNumTxs, tcIndex)
  115. mempool.Flush()
  116. }
  117. }
  118. func TestMempoolFilters(t *testing.T) {
  119. app := kvstore.NewKVStoreApplication()
  120. cc := proxy.NewLocalClientCreator(app)
  121. mempool := newMempoolWithApp(cc)
  122. emptyTxArr := []types.Tx{[]byte{}}
  123. nopPreFilter := func(tx types.Tx) error { return nil }
  124. nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil }
  125. // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
  126. // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas
  127. tests := []struct {
  128. numTxsToCreate int
  129. preFilter PreCheckFunc
  130. postFilter PostCheckFunc
  131. expectedNumTxs int
  132. }{
  133. {10, nopPreFilter, nopPostFilter, 10},
  134. {10, PreCheckAminoMaxBytes(10), nopPostFilter, 0},
  135. {10, PreCheckAminoMaxBytes(20), nopPostFilter, 0},
  136. {10, PreCheckAminoMaxBytes(22), nopPostFilter, 10},
  137. {10, nopPreFilter, PostCheckMaxGas(-1), 10},
  138. {10, nopPreFilter, PostCheckMaxGas(0), 0},
  139. {10, nopPreFilter, PostCheckMaxGas(1), 10},
  140. {10, nopPreFilter, PostCheckMaxGas(3000), 10},
  141. {10, PreCheckAminoMaxBytes(10), PostCheckMaxGas(20), 0},
  142. {10, PreCheckAminoMaxBytes(30), PostCheckMaxGas(20), 10},
  143. {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(1), 10},
  144. {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0},
  145. }
  146. for tcIndex, tt := range tests {
  147. mempool.Update(1, emptyTxArr, tt.preFilter, tt.postFilter)
  148. checkTxs(t, mempool, tt.numTxsToCreate)
  149. require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
  150. mempool.Flush()
  151. }
  152. }
  153. func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
  154. app := kvstore.NewKVStoreApplication()
  155. cc := proxy.NewLocalClientCreator(app)
  156. mempool := newMempoolWithApp(cc)
  157. mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil)
  158. err := mempool.CheckTx([]byte{0x01}, nil)
  159. if assert.Error(t, err) {
  160. assert.Equal(t, ErrTxInCache, err)
  161. }
  162. }
  163. func TestTxsAvailable(t *testing.T) {
  164. app := kvstore.NewKVStoreApplication()
  165. cc := proxy.NewLocalClientCreator(app)
  166. mempool := newMempoolWithApp(cc)
  167. mempool.EnableTxsAvailable()
  168. timeoutMS := 500
  169. // with no txs, it shouldnt fire
  170. ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
  171. // send a bunch of txs, it should only fire once
  172. txs := checkTxs(t, mempool, 100)
  173. ensureFire(t, mempool.TxsAvailable(), timeoutMS)
  174. ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
  175. // call update with half the txs.
  176. // it should fire once now for the new height
  177. // since there are still txs left
  178. committedTxs, txs := txs[:50], txs[50:]
  179. if err := mempool.Update(1, committedTxs, nil, nil); err != nil {
  180. t.Error(err)
  181. }
  182. ensureFire(t, mempool.TxsAvailable(), timeoutMS)
  183. ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
  184. // send a bunch more txs. we already fired for this height so it shouldnt fire again
  185. moreTxs := checkTxs(t, mempool, 50)
  186. ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
  187. // now call update with all the txs. it should not fire as there are no txs left
  188. committedTxs = append(txs, moreTxs...)
  189. if err := mempool.Update(2, committedTxs, nil, nil); err != nil {
  190. t.Error(err)
  191. }
  192. ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
  193. // send a bunch more txs, it should only fire once
  194. checkTxs(t, mempool, 100)
  195. ensureFire(t, mempool.TxsAvailable(), timeoutMS)
  196. ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
  197. }
  198. func TestSerialReap(t *testing.T) {
  199. app := counter.NewCounterApplication(true)
  200. app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
  201. cc := proxy.NewLocalClientCreator(app)
  202. mempool := newMempoolWithApp(cc)
  203. appConnCon, _ := cc.NewABCIClient()
  204. appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
  205. err := appConnCon.Start()
  206. require.Nil(t, err)
  207. cacheMap := make(map[string]struct{})
  208. deliverTxsRange := func(start, end int) {
  209. // Deliver some txs.
  210. for i := start; i < end; i++ {
  211. // This will succeed
  212. txBytes := make([]byte, 8)
  213. binary.BigEndian.PutUint64(txBytes, uint64(i))
  214. err := mempool.CheckTx(txBytes, nil)
  215. _, cached := cacheMap[string(txBytes)]
  216. if cached {
  217. require.NotNil(t, err, "expected error for cached tx")
  218. } else {
  219. require.Nil(t, err, "expected no err for uncached tx")
  220. }
  221. cacheMap[string(txBytes)] = struct{}{}
  222. // Duplicates are cached and should return error
  223. err = mempool.CheckTx(txBytes, nil)
  224. require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
  225. }
  226. }
  227. reapCheck := func(exp int) {
  228. txs := mempool.ReapMaxBytesMaxGas(-1, -1)
  229. require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs)))
  230. }
  231. updateRange := func(start, end int) {
  232. txs := make([]types.Tx, 0)
  233. for i := start; i < end; i++ {
  234. txBytes := make([]byte, 8)
  235. binary.BigEndian.PutUint64(txBytes, uint64(i))
  236. txs = append(txs, txBytes)
  237. }
  238. if err := mempool.Update(0, txs, nil, nil); err != nil {
  239. t.Error(err)
  240. }
  241. }
  242. commitRange := func(start, end int) {
  243. // Deliver some txs.
  244. for i := start; i < end; i++ {
  245. txBytes := make([]byte, 8)
  246. binary.BigEndian.PutUint64(txBytes, uint64(i))
  247. res, err := appConnCon.DeliverTxSync(txBytes)
  248. if err != nil {
  249. t.Errorf("Client error committing tx: %v", err)
  250. }
  251. if res.IsErr() {
  252. t.Errorf("Error committing tx. Code:%v result:%X log:%v",
  253. res.Code, res.Data, res.Log)
  254. }
  255. }
  256. res, err := appConnCon.CommitSync()
  257. if err != nil {
  258. t.Errorf("Client error committing: %v", err)
  259. }
  260. if len(res.Data) != 8 {
  261. t.Errorf("Error committing. Hash:%X", res.Data)
  262. }
  263. }
  264. //----------------------------------------
  265. // Deliver some txs.
  266. deliverTxsRange(0, 100)
  267. // Reap the txs.
  268. reapCheck(100)
  269. // Reap again. We should get the same amount
  270. reapCheck(100)
  271. // Deliver 0 to 999, we should reap 900 new txs
  272. // because 100 were already counted.
  273. deliverTxsRange(0, 1000)
  274. // Reap the txs.
  275. reapCheck(1000)
  276. // Reap again. We should get the same amount
  277. reapCheck(1000)
  278. // Commit from the conensus AppConn
  279. commitRange(0, 500)
  280. updateRange(0, 500)
  281. // We should have 500 left.
  282. reapCheck(500)
  283. // Deliver 100 invalid txs and 100 valid txs
  284. deliverTxsRange(900, 1100)
  285. // We should have 600 now.
  286. reapCheck(600)
  287. }
  288. func TestCacheRemove(t *testing.T) {
  289. cache := newMapTxCache(100)
  290. numTxs := 10
  291. txs := make([][]byte, numTxs)
  292. for i := 0; i < numTxs; i++ {
  293. // probability of collision is 2**-256
  294. txBytes := make([]byte, 32)
  295. rand.Read(txBytes)
  296. txs[i] = txBytes
  297. cache.Push(txBytes)
  298. // make sure its added to both the linked list and the map
  299. require.Equal(t, i+1, len(cache.map_))
  300. require.Equal(t, i+1, cache.list.Len())
  301. }
  302. for i := 0; i < numTxs; i++ {
  303. cache.Remove(txs[i])
  304. // make sure its removed from both the map and the linked list
  305. require.Equal(t, numTxs-(i+1), len(cache.map_))
  306. require.Equal(t, numTxs-(i+1), cache.list.Len())
  307. }
  308. }
  309. func TestMempoolCloseWAL(t *testing.T) {
  310. // 1. Create the temporary directory for mempool and WAL testing.
  311. rootDir, err := ioutil.TempDir("", "mempool-test")
  312. require.Nil(t, err, "expecting successful tmpdir creation")
  313. defer os.RemoveAll(rootDir)
  314. // 2. Ensure that it doesn't contain any elements -- Sanity check
  315. m1, err := filepath.Glob(filepath.Join(rootDir, "*"))
  316. require.Nil(t, err, "successful globbing expected")
  317. require.Equal(t, 0, len(m1), "no matches yet")
  318. // 3. Create the mempool
  319. wcfg := cfg.DefaultMempoolConfig()
  320. wcfg.RootDir = rootDir
  321. app := kvstore.NewKVStoreApplication()
  322. cc := proxy.NewLocalClientCreator(app)
  323. appConnMem, _ := cc.NewABCIClient()
  324. mempool := NewMempool(wcfg, appConnMem, 10)
  325. mempool.InitWAL()
  326. // 4. Ensure that the directory contains the WAL file
  327. m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
  328. require.Nil(t, err, "successful globbing expected")
  329. require.Equal(t, 1, len(m2), "expecting the wal match in")
  330. // 5. Write some contents to the WAL
  331. mempool.CheckTx(types.Tx([]byte("foo")), nil)
  332. walFilepath := mempool.wal.Path
  333. sum1 := checksumFile(walFilepath, t)
  334. // 6. Sanity check to ensure that the written TX matches the expectation.
  335. require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written")
  336. // 7. Invoke CloseWAL() and ensure it discards the
  337. // WAL thus any other write won't go through.
  338. mempool.CloseWAL()
  339. mempool.CheckTx(types.Tx([]byte("bar")), nil)
  340. sum2 := checksumFile(walFilepath, t)
  341. require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
  342. // 8. Sanity check to ensure that the WAL file still exists
  343. m3, err := filepath.Glob(filepath.Join(rootDir, "*"))
  344. require.Nil(t, err, "successful globbing expected")
  345. require.Equal(t, 1, len(m3), "expecting the wal match in")
  346. }
  347. // Size of the amino encoded TxMessage is the length of the
  348. // encoded byte array, plus 1 for the struct field, plus 4
  349. // for the amino prefix.
  350. func txMessageSize(tx types.Tx) int {
  351. return amino.ByteSliceSize(tx) + 1 + 4
  352. }
  353. func TestMempoolMaxMsgSize(t *testing.T) {
  354. app := kvstore.NewKVStoreApplication()
  355. cc := proxy.NewLocalClientCreator(app)
  356. mempl := newMempoolWithApp(cc)
  357. testCases := []struct {
  358. len int
  359. err bool
  360. }{
  361. // check small txs. no error
  362. {10, false},
  363. {1000, false},
  364. {1000000, false},
  365. // check around maxTxSize
  366. // changes from no error to error
  367. {maxTxSize - 2, false},
  368. {maxTxSize - 1, false},
  369. {maxTxSize, false},
  370. {maxTxSize + 1, true},
  371. {maxTxSize + 2, true},
  372. // check around maxMsgSize. all error
  373. {maxMsgSize - 1, true},
  374. {maxMsgSize, true},
  375. {maxMsgSize + 1, true},
  376. }
  377. for i, testCase := range testCases {
  378. caseString := fmt.Sprintf("case %d, len %d", i, testCase.len)
  379. tx := cmn.RandBytes(testCase.len)
  380. err := mempl.CheckTx(tx, nil)
  381. msg := &TxMessage{tx}
  382. encoded := cdc.MustMarshalBinaryBare(msg)
  383. require.Equal(t, len(encoded), txMessageSize(tx), caseString)
  384. if !testCase.err {
  385. require.True(t, len(encoded) <= maxMsgSize, caseString)
  386. require.NoError(t, err, caseString)
  387. } else {
  388. require.True(t, len(encoded) > maxMsgSize, caseString)
  389. require.Equal(t, err, ErrTxTooLarge, caseString)
  390. }
  391. }
  392. }
  393. func checksumIt(data []byte) string {
  394. h := md5.New()
  395. h.Write(data)
  396. return fmt.Sprintf("%x", h.Sum(nil))
  397. }
  398. func checksumFile(p string, t *testing.T) string {
  399. data, err := ioutil.ReadFile(p)
  400. require.Nil(t, err, "expecting successful read of %q", p)
  401. return checksumIt(data)
  402. }