diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index ecb4f7e0a..4d76de6a5 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -52,6 +52,9 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) - [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder - [libs/rand] \#6364 Removed most of libs/rand in favour of standard lib's `math/rand` (@liamsi) + - [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. + Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself + and `TxInfo`. (@alexanderbez) - Blockchain Protocol @@ -70,6 +73,13 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer exchange reactors behave the same. (@cmwaters) - [crypto] \#6376 Enable sr25519 as a validator key +- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez) + - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of + the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. + - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the + `mempool.version` configuration, where `v1` is the default configuration. + - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. + - Transactions are gossiped in FIFO order as they are in `v0`. - [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) ### IMPROVEMENTS diff --git a/UPGRADING.md b/UPGRADING.md index 61c2ca528..3c1d38658 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -7,10 +7,13 @@ This guide provides instructions for upgrading to specific versions of Tendermin ### ABCI Changes * Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors. - * The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's. - * Messages are written to a byte stream using uin64 length delimiters instead of int64. +* When mempool `v1` is enabled, transactions broadcasted via `sync` mode may return a successful + response with a transaction hash indicating that the transaction was successfully inserted into + the mempool. While this is true for `v0`, the `v1` mempool reactor may at a later point in time + evict or even drop this transaction after a hash has been returned. Thus, the user or client must + query for that transaction to check if it is still in the mempool. ### Config Changes diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index fd4bb3369..566bb29ac 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1835,6 +1835,8 @@ type ResponseCheckTx struct { GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } @@ -1926,6 +1928,20 @@ func (m *ResponseCheckTx) GetCodespace() string { return "" } +func (m *ResponseCheckTx) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *ResponseCheckTx) GetPriority() int64 { + if m != nil { + return m.Priority + } + return 0 +} + type ResponseDeliverTx struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2938,169 +2954,170 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 2585 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0x1b, 0xc7, - 0x11, 0xc6, 0x1b, 0xd8, 0x26, 0x01, 0x82, 0x23, 0x5a, 0x86, 0x60, 0x89, 0x94, 0x57, 0x25, 0xc7, - 0x92, 0x6d, 0x32, 0xa6, 0x4a, 0x8a, 0x5c, 0xce, 0xc3, 0x04, 0x04, 0x05, 0xb4, 0x18, 0x92, 0x19, - 0x42, 0x72, 0x39, 0x89, 0xb5, 0x5e, 0x60, 0x87, 0xc0, 0x5a, 0xc0, 0xee, 0x1a, 0x3b, 0xa0, 0x48, - 0x1f, 0xf3, 0xb8, 0xa8, 0x72, 0xd0, 0x31, 0x17, 0x57, 0xe5, 0x1f, 0xe4, 0x9a, 0x53, 0x4e, 0x39, - 0xf8, 0x90, 0x54, 0xf9, 0x98, 0x93, 0x93, 0x92, 0x6e, 0xf9, 0x03, 0x39, 0xa5, 0x2a, 0x35, 0x8f, - 0x7d, 0x01, 0x58, 0x00, 0x8c, 0x73, 0xcb, 0x6d, 0xa6, 0xb7, 0xbb, 0x31, 0xd3, 0x33, 0xdd, 0xfd, - 0x75, 0x0f, 0xe0, 0x35, 0x4a, 0x2c, 0x83, 0x0c, 0x07, 0xa6, 0x45, 0xb7, 0xf4, 0x76, 0xc7, 0xdc, - 0xa2, 0x67, 0x0e, 0x71, 0x37, 0x9d, 0xa1, 0x4d, 0x6d, 0xb4, 0x12, 0x7c, 0xdc, 0x64, 0x1f, 0xab, - 0x57, 0x42, 0xdc, 0x9d, 0xe1, 0x99, 0x43, 0xed, 0x2d, 0x67, 0x68, 0xdb, 0xc7, 0x82, 0xbf, 0x7a, - 0x39, 0xf4, 0x99, 0xeb, 0x09, 0x6b, 0x8b, 0x7c, 0x95, 0xc2, 0x4f, 0xc8, 0x99, 0xf7, 0xf5, 0xca, - 0x84, 0xac, 0xa3, 0x0f, 0xf5, 0x81, 0xf7, 0x79, 0xa3, 0x6b, 0xdb, 0xdd, 0x3e, 0xd9, 0xe2, 0xb3, - 0xf6, 0xe8, 0x78, 0x8b, 0x9a, 0x03, 0xe2, 0x52, 0x7d, 0xe0, 0x48, 0x86, 0xb5, 0xae, 0xdd, 0xb5, - 0xf9, 0x70, 0x8b, 0x8d, 0x04, 0x55, 0xfd, 0x6b, 0x1e, 0xf2, 0x98, 0x7c, 0x3e, 0x22, 0x2e, 0x45, - 0xdb, 0x90, 0x21, 0x9d, 0x9e, 0x5d, 0x49, 0x5e, 0x4d, 0xbe, 0xb9, 0xb4, 0x7d, 0x79, 0x73, 0x6c, - 0x73, 0x9b, 0x92, 0xaf, 0xd1, 0xe9, 0xd9, 0xcd, 0x04, 0xe6, 0xbc, 0xe8, 0x36, 0x64, 0x8f, 0xfb, - 0x23, 0xb7, 0x57, 0x49, 0x71, 0xa1, 0x2b, 0x71, 0x42, 0xf7, 0x19, 0x53, 0x33, 0x81, 0x05, 0x37, - 0xfb, 0x29, 0xd3, 0x3a, 0xb6, 0x2b, 0xe9, 0xd9, 0x3f, 0xb5, 0x6b, 0x1d, 0xf3, 0x9f, 0x62, 0xbc, - 0xa8, 0x06, 0x60, 0x5a, 0x26, 0xd5, 0x3a, 0x3d, 0xdd, 0xb4, 0x2a, 0x19, 0x2e, 0xf9, 0x7a, 0xbc, - 0xa4, 0x49, 0xeb, 0x8c, 0xb1, 0x99, 0xc0, 0x8a, 0xe9, 0x4d, 0xd8, 0x72, 0x3f, 0x1f, 0x91, 0xe1, - 0x59, 0x25, 0x3b, 0x7b, 0xb9, 0x3f, 0x65, 0x4c, 0x6c, 0xb9, 0x9c, 0x1b, 0x35, 0x60, 0xa9, 0x4d, - 0xba, 0xa6, 0xa5, 0xb5, 0xfb, 0x76, 0xe7, 0x49, 0x25, 0xc7, 0x85, 0xd5, 0x38, 0xe1, 0x1a, 0x63, - 0xad, 0x31, 0xce, 0x66, 0x02, 0x43, 0xdb, 0x9f, 0xa1, 0xef, 0x43, 0xa1, 0xd3, 0x23, 0x9d, 0x27, - 0x1a, 0x3d, 0xad, 0xe4, 0xb9, 0x8e, 0x8d, 0x38, 0x1d, 0x75, 0xc6, 0xd7, 0x3a, 0x6d, 0x26, 0x70, - 0xbe, 0x23, 0x86, 0x6c, 0xff, 0x06, 0xe9, 0x9b, 0x27, 0x64, 0xc8, 0xe4, 0x0b, 0xb3, 0xf7, 0x7f, - 0x4f, 0x70, 0x72, 0x0d, 0x8a, 0xe1, 0x4d, 0xd0, 0x8f, 0x40, 0x21, 0x96, 0x21, 0xb7, 0xa1, 0x70, - 0x15, 0x57, 0x63, 0xcf, 0xd9, 0x32, 0xbc, 0x4d, 0x14, 0x88, 0x1c, 0xa3, 0xbb, 0x90, 0xeb, 0xd8, - 0x83, 0x81, 0x49, 0x2b, 0xc0, 0xa5, 0xd7, 0x63, 0x37, 0xc0, 0xb9, 0x9a, 0x09, 0x2c, 0xf9, 0xd1, - 0x3e, 0x94, 0xfa, 0xa6, 0x4b, 0x35, 0xd7, 0xd2, 0x1d, 0xb7, 0x67, 0x53, 0xb7, 0xb2, 0xc4, 0x35, - 0x5c, 0x8f, 0xd3, 0xb0, 0x67, 0xba, 0xf4, 0xc8, 0x63, 0x6e, 0x26, 0x70, 0xb1, 0x1f, 0x26, 0x30, - 0x7d, 0xf6, 0xf1, 0x31, 0x19, 0xfa, 0x0a, 0x2b, 0xcb, 0xb3, 0xf5, 0x1d, 0x30, 0x6e, 0x4f, 0x9e, - 0xe9, 0xb3, 0xc3, 0x04, 0xf4, 0x73, 0xb8, 0xd0, 0xb7, 0x75, 0xc3, 0x57, 0xa7, 0x75, 0x7a, 0x23, - 0xeb, 0x49, 0xa5, 0xc8, 0x95, 0xde, 0x88, 0x5d, 0xa4, 0xad, 0x1b, 0x9e, 0x8a, 0x3a, 0x13, 0x68, - 0x26, 0xf0, 0x6a, 0x7f, 0x9c, 0x88, 0x1e, 0xc3, 0x9a, 0xee, 0x38, 0xfd, 0xb3, 0x71, 0xed, 0x25, - 0xae, 0xfd, 0x66, 0x9c, 0xf6, 0x1d, 0x26, 0x33, 0xae, 0x1e, 0xe9, 0x13, 0xd4, 0x5a, 0x1e, 0xb2, - 0x27, 0x7a, 0x7f, 0x44, 0xd4, 0xef, 0xc0, 0x52, 0xc8, 0x4d, 0x51, 0x05, 0xf2, 0x03, 0xe2, 0xba, - 0x7a, 0x97, 0x70, 0xaf, 0x56, 0xb0, 0x37, 0x55, 0x4b, 0xb0, 0x1c, 0x76, 0x4d, 0xf5, 0x79, 0xd2, - 0x97, 0x64, 0x5e, 0xc7, 0x24, 0x4f, 0xc8, 0xd0, 0x35, 0x6d, 0xcb, 0x93, 0x94, 0x53, 0x74, 0x0d, - 0x8a, 0xfc, 0xfe, 0x68, 0xde, 0x77, 0xe6, 0xfa, 0x19, 0xbc, 0xcc, 0x89, 0x8f, 0x24, 0xd3, 0x06, - 0x2c, 0x39, 0xdb, 0x8e, 0xcf, 0x92, 0xe6, 0x2c, 0xe0, 0x6c, 0x3b, 0x1e, 0xc3, 0xeb, 0xb0, 0xcc, - 0x76, 0xea, 0x73, 0x64, 0xf8, 0x8f, 0x2c, 0x31, 0x9a, 0x64, 0x51, 0xff, 0x92, 0x82, 0xf2, 0xb8, - 0x3b, 0xa3, 0xbb, 0x90, 0x61, 0x91, 0x4d, 0x06, 0xa9, 0xea, 0xa6, 0x08, 0x7b, 0x9b, 0x5e, 0xd8, - 0xdb, 0x6c, 0x79, 0x61, 0xaf, 0x56, 0xf8, 0xea, 0x9b, 0x8d, 0xc4, 0xf3, 0xbf, 0x6f, 0x24, 0x31, - 0x97, 0x40, 0x97, 0x98, 0xf7, 0xe9, 0xa6, 0xa5, 0x99, 0x06, 0x5f, 0xb2, 0xc2, 0x5c, 0x4b, 0x37, - 0xad, 0x5d, 0x03, 0xed, 0x41, 0xb9, 0x63, 0x5b, 0x2e, 0xb1, 0xdc, 0x91, 0xab, 0x89, 0xb0, 0x2a, - 0x43, 0x53, 0xc4, 0xc1, 0x44, 0xb0, 0xae, 0x7b, 0x9c, 0x87, 0x9c, 0x11, 0xaf, 0x74, 0xa2, 0x04, - 0x74, 0x1f, 0xe0, 0x44, 0xef, 0x9b, 0x86, 0x4e, 0xed, 0xa1, 0x5b, 0xc9, 0x5c, 0x4d, 0x4f, 0xf5, - 0xb2, 0x47, 0x1e, 0xcb, 0x43, 0xc7, 0xd0, 0x29, 0xa9, 0x65, 0xd8, 0x72, 0x71, 0x48, 0x12, 0xbd, - 0x01, 0x2b, 0xba, 0xe3, 0x68, 0x2e, 0xd5, 0x29, 0xd1, 0xda, 0x67, 0x94, 0xb8, 0x3c, 0x6c, 0x2d, - 0xe3, 0xa2, 0xee, 0x38, 0x47, 0x8c, 0x5a, 0x63, 0x44, 0x74, 0x1d, 0x4a, 0x2c, 0xc2, 0x99, 0x7a, - 0x5f, 0xeb, 0x11, 0xb3, 0xdb, 0xa3, 0x3c, 0x40, 0xa5, 0x71, 0x51, 0x52, 0x9b, 0x9c, 0xa8, 0x1a, - 0xfe, 0x89, 0xf3, 0xe8, 0x86, 0x10, 0x64, 0x0c, 0x9d, 0xea, 0xdc, 0x92, 0xcb, 0x98, 0x8f, 0x19, - 0xcd, 0xd1, 0x69, 0x4f, 0xda, 0x87, 0x8f, 0xd1, 0x45, 0xc8, 0x49, 0xb5, 0x69, 0xae, 0x56, 0xce, - 0xd0, 0x1a, 0x64, 0x9d, 0xa1, 0x7d, 0x42, 0xf8, 0xd1, 0x15, 0xb0, 0x98, 0xa8, 0xbf, 0x4e, 0xc1, - 0xea, 0x44, 0x1c, 0x64, 0x7a, 0x7b, 0xba, 0xdb, 0xf3, 0x7e, 0x8b, 0x8d, 0xd1, 0x1d, 0xa6, 0x57, - 0x37, 0xc8, 0x50, 0xe6, 0x8e, 0xca, 0xa4, 0xa9, 0x9b, 0xfc, 0xbb, 0x34, 0x8d, 0xe4, 0x46, 0x07, - 0x50, 0xee, 0xeb, 0x2e, 0xd5, 0x44, 0x5c, 0xd1, 0x42, 0x79, 0x64, 0x32, 0x9a, 0xee, 0xe9, 0x5e, - 0x24, 0x62, 0x97, 0x5a, 0x2a, 0x2a, 0xf5, 0x23, 0x54, 0x84, 0x61, 0xad, 0x7d, 0xf6, 0x85, 0x6e, - 0x51, 0xd3, 0x22, 0xda, 0xc4, 0xc9, 0x5d, 0x9a, 0x50, 0xda, 0x38, 0x31, 0x0d, 0x62, 0x75, 0xbc, - 0x23, 0xbb, 0xe0, 0x0b, 0xfb, 0x47, 0xea, 0xaa, 0x18, 0x4a, 0xd1, 0x48, 0x8e, 0x4a, 0x90, 0xa2, - 0xa7, 0xd2, 0x00, 0x29, 0x7a, 0x8a, 0xbe, 0x0b, 0x19, 0xb6, 0x49, 0xbe, 0xf9, 0xd2, 0x94, 0x14, - 0x28, 0xe5, 0x5a, 0x67, 0x0e, 0xc1, 0x9c, 0x53, 0x55, 0x7d, 0x77, 0xf0, 0xa3, 0xfb, 0xb8, 0x56, - 0xf5, 0x06, 0xac, 0x8c, 0x85, 0xef, 0xd0, 0xf9, 0x25, 0xc3, 0xe7, 0xa7, 0xae, 0x40, 0x31, 0x12, - 0xab, 0xd5, 0x8b, 0xb0, 0x36, 0x2d, 0xf4, 0xaa, 0x3d, 0x9f, 0x1e, 0x09, 0xa1, 0xe8, 0x36, 0x14, - 0xfc, 0xd8, 0x2b, 0xdc, 0x71, 0xd2, 0x56, 0x1e, 0x33, 0xf6, 0x59, 0x99, 0x1f, 0xb2, 0x6b, 0xcd, - 0xef, 0x43, 0x8a, 0x2f, 0x3c, 0xaf, 0x3b, 0x4e, 0x53, 0x77, 0x7b, 0xea, 0xa7, 0x50, 0x89, 0x8b, - 0xab, 0x63, 0xdb, 0xc8, 0xf8, 0xd7, 0xf0, 0x22, 0xe4, 0x8e, 0xed, 0xe1, 0x40, 0xa7, 0x5c, 0x59, - 0x11, 0xcb, 0x19, 0xbb, 0x9e, 0x22, 0xc6, 0xa6, 0x39, 0x59, 0x4c, 0x54, 0x0d, 0x2e, 0xc5, 0xc6, - 0x56, 0x26, 0x62, 0x5a, 0x06, 0x11, 0xf6, 0x2c, 0x62, 0x31, 0x09, 0x14, 0x89, 0xc5, 0x8a, 0x09, - 0xfb, 0x59, 0x97, 0xef, 0x95, 0xeb, 0x57, 0xb0, 0x9c, 0xa9, 0xbf, 0x2f, 0x40, 0x01, 0x13, 0xd7, - 0x61, 0x31, 0x01, 0xd5, 0x40, 0x21, 0xa7, 0x1d, 0xe2, 0x50, 0x2f, 0x8c, 0x4e, 0x47, 0x0d, 0x82, - 0xbb, 0xe1, 0x71, 0xb2, 0x94, 0xed, 0x8b, 0xa1, 0x5b, 0x12, 0x95, 0xc5, 0x03, 0x2c, 0x29, 0x1e, - 0x86, 0x65, 0x77, 0x3c, 0x58, 0x96, 0x8e, 0xcd, 0xd2, 0x42, 0x6a, 0x0c, 0x97, 0xdd, 0x92, 0xb8, - 0x2c, 0x33, 0xe7, 0xc7, 0x22, 0xc0, 0xac, 0x1e, 0x01, 0x66, 0xd9, 0x39, 0xdb, 0x8c, 0x41, 0x66, - 0x77, 0x3c, 0x64, 0x96, 0x9b, 0xb3, 0xe2, 0x31, 0x68, 0x76, 0x3f, 0x0a, 0xcd, 0x04, 0xac, 0xba, - 0x16, 0x2b, 0x1d, 0x8b, 0xcd, 0x7e, 0x10, 0xc2, 0x66, 0x85, 0x58, 0x60, 0x24, 0x94, 0x4c, 0x01, - 0x67, 0xf5, 0x08, 0x38, 0x53, 0xe6, 0xd8, 0x20, 0x06, 0x9d, 0x7d, 0x10, 0x46, 0x67, 0x10, 0x0b, - 0xf0, 0xe4, 0x79, 0x4f, 0x83, 0x67, 0xef, 0xf9, 0xf0, 0x6c, 0x29, 0x16, 0x5f, 0xca, 0x3d, 0x8c, - 0xe3, 0xb3, 0x83, 0x09, 0x7c, 0x26, 0xf0, 0xd4, 0x1b, 0xb1, 0x2a, 0xe6, 0x00, 0xb4, 0x83, 0x09, - 0x80, 0x56, 0x9c, 0xa3, 0x70, 0x0e, 0x42, 0xfb, 0xc5, 0x74, 0x84, 0x16, 0x8f, 0xa1, 0xe4, 0x32, - 0x17, 0x83, 0x68, 0x5a, 0x0c, 0x44, 0x5b, 0xe1, 0xea, 0xdf, 0x8a, 0x55, 0x7f, 0x7e, 0x8c, 0x76, - 0x83, 0x65, 0xc8, 0x31, 0x9f, 0x67, 0x51, 0x86, 0x0c, 0x87, 0xf6, 0x50, 0xa2, 0x2d, 0x31, 0x51, - 0xdf, 0x64, 0x39, 0x3b, 0xf0, 0xef, 0x19, 0x78, 0x8e, 0x47, 0xf3, 0x90, 0x4f, 0xab, 0x7f, 0x4c, - 0x06, 0xb2, 0x3c, 0xcd, 0x85, 0xf3, 0xbd, 0x22, 0xf3, 0x7d, 0x08, 0xe5, 0xa5, 0xa2, 0x28, 0x6f, - 0x03, 0x96, 0x58, 0x94, 0x1e, 0x03, 0x70, 0xba, 0xe3, 0x03, 0xb8, 0x9b, 0xb0, 0xca, 0xd3, 0xb0, - 0xc0, 0x82, 0x32, 0x34, 0x67, 0x78, 0x86, 0x59, 0x61, 0x1f, 0xc4, 0xe5, 0x14, 0x31, 0xfa, 0x1d, - 0xb8, 0x10, 0xe2, 0xf5, 0xa3, 0xbf, 0x40, 0x33, 0x65, 0x9f, 0x7b, 0x47, 0xa6, 0x81, 0x3f, 0x27, - 0x03, 0x0b, 0x05, 0xc8, 0x6f, 0x1a, 0x48, 0x4b, 0xfe, 0x8f, 0x40, 0x5a, 0xea, 0xbf, 0x06, 0x69, - 0xe1, 0x6c, 0x96, 0x8e, 0x66, 0xb3, 0x7f, 0x25, 0x83, 0x33, 0xf1, 0x21, 0x57, 0xc7, 0x36, 0x88, - 0xcc, 0x2f, 0x7c, 0x8c, 0xca, 0x90, 0xee, 0xdb, 0x5d, 0x99, 0x45, 0xd8, 0x90, 0x71, 0xf9, 0x41, - 0x58, 0x91, 0x31, 0xd6, 0x4f, 0x4d, 0x59, 0x6e, 0x61, 0x99, 0x9a, 0xca, 0x90, 0x7e, 0x42, 0x44, - 0xc8, 0x5c, 0xc6, 0x6c, 0xc8, 0xf8, 0xf8, 0x25, 0xe3, 0x81, 0x70, 0x19, 0x8b, 0x09, 0xba, 0x0b, - 0x0a, 0x6f, 0x43, 0x68, 0xb6, 0xe3, 0xca, 0xe8, 0xf6, 0x5a, 0x78, 0xaf, 0xa2, 0xdb, 0xb0, 0x79, - 0xc8, 0x78, 0x0e, 0x1c, 0x17, 0x17, 0x1c, 0x39, 0x0a, 0x65, 0x5d, 0x25, 0x02, 0xfe, 0x2e, 0x83, - 0xc2, 0x56, 0xef, 0x3a, 0x7a, 0x87, 0xf0, 0x50, 0xa5, 0xe0, 0x80, 0xa0, 0x3e, 0x06, 0x34, 0x19, - 0x70, 0x51, 0x13, 0x72, 0xe4, 0x84, 0x58, 0x94, 0x1d, 0x1b, 0x33, 0xf7, 0xc5, 0x29, 0xc8, 0x8a, - 0x58, 0xb4, 0x56, 0x61, 0x46, 0xfe, 0xe7, 0x37, 0x1b, 0x65, 0xc1, 0xfd, 0xb6, 0x3d, 0x30, 0x29, - 0x19, 0x38, 0xf4, 0x0c, 0x4b, 0x79, 0xf5, 0x57, 0x29, 0x06, 0x73, 0x22, 0xc1, 0x78, 0xaa, 0x6d, - 0xbd, 0x2b, 0x9f, 0x0a, 0x41, 0xdc, 0xc5, 0xec, 0xbd, 0x0e, 0xd0, 0xd5, 0x5d, 0xed, 0xa9, 0x6e, - 0x51, 0x62, 0x48, 0xa3, 0x87, 0x28, 0xa8, 0x0a, 0x05, 0x36, 0x1b, 0xb9, 0xc4, 0x90, 0x68, 0xdb, - 0x9f, 0x87, 0xf6, 0x99, 0xff, 0x76, 0xfb, 0x8c, 0x5a, 0xb9, 0x30, 0x6e, 0xe5, 0xdf, 0xa4, 0x02, - 0x37, 0x09, 0x10, 0xe1, 0xff, 0x9f, 0x1d, 0x7e, 0xcb, 0xeb, 0xc4, 0x68, 0x56, 0x44, 0x47, 0xb0, - 0xea, 0x7b, 0xa9, 0x36, 0xe2, 0xde, 0xeb, 0xdd, 0xbb, 0x45, 0xdd, 0xbc, 0x7c, 0x12, 0x25, 0xbb, - 0xe8, 0x63, 0x78, 0x75, 0x2c, 0x04, 0xf9, 0xaa, 0x53, 0x8b, 0x46, 0xa2, 0x57, 0xa2, 0x91, 0xc8, - 0x53, 0x1d, 0x18, 0x2b, 0xfd, 0x2d, 0x9d, 0x63, 0x97, 0x95, 0x1e, 0xe1, 0x24, 0x3f, 0xf5, 0xf8, - 0xaf, 0x41, 0x71, 0x48, 0x28, 0x2b, 0x87, 0x23, 0xc5, 0xdd, 0xb2, 0x20, 0xca, 0x92, 0xf1, 0x10, - 0x5e, 0x99, 0x9a, 0xec, 0xd1, 0xf7, 0x40, 0x09, 0x70, 0x42, 0x32, 0xa6, 0x4e, 0xf2, 0xb1, 0x7f, - 0xc0, 0xab, 0xfe, 0x29, 0x19, 0xa8, 0x8c, 0x56, 0x13, 0x0d, 0xc8, 0x0d, 0x89, 0x3b, 0xea, 0x0b, - 0x7c, 0x5f, 0xda, 0x7e, 0x67, 0x31, 0x98, 0xc0, 0xa8, 0xa3, 0x3e, 0xc5, 0x52, 0x58, 0x7d, 0x0c, - 0x39, 0x41, 0x41, 0x4b, 0x90, 0x7f, 0xb8, 0xff, 0x60, 0xff, 0xe0, 0xa3, 0xfd, 0x72, 0x02, 0x01, - 0xe4, 0x76, 0xea, 0xf5, 0xc6, 0x61, 0xab, 0x9c, 0x44, 0x0a, 0x64, 0x77, 0x6a, 0x07, 0xb8, 0x55, - 0x4e, 0x31, 0x32, 0x6e, 0x7c, 0xd8, 0xa8, 0xb7, 0xca, 0x69, 0xb4, 0x0a, 0x45, 0x31, 0xd6, 0xee, - 0x1f, 0xe0, 0x9f, 0xec, 0xb4, 0xca, 0x99, 0x10, 0xe9, 0xa8, 0xb1, 0x7f, 0xaf, 0x81, 0xcb, 0x59, - 0xf5, 0x5d, 0x56, 0x40, 0xc4, 0x00, 0x8b, 0xa0, 0x54, 0x48, 0x86, 0x4a, 0x05, 0xf5, 0x77, 0x29, - 0xa8, 0xc6, 0xa3, 0x05, 0xf4, 0xe1, 0xd8, 0xc6, 0xb7, 0xcf, 0x01, 0x35, 0xc6, 0x76, 0x8f, 0xae, - 0x43, 0x69, 0x48, 0x8e, 0x09, 0xed, 0xf4, 0x04, 0x7a, 0x11, 0x99, 0xad, 0x88, 0x8b, 0x92, 0xca, - 0x85, 0x5c, 0xc1, 0xf6, 0x19, 0xe9, 0x50, 0x4d, 0x54, 0x2d, 0xe2, 0xd2, 0x29, 0x8c, 0x8d, 0x51, - 0x8f, 0x04, 0x51, 0xfd, 0xf4, 0x5c, 0xb6, 0x54, 0x20, 0x8b, 0x1b, 0x2d, 0xfc, 0x71, 0x39, 0x8d, - 0x10, 0x94, 0xf8, 0x50, 0x3b, 0xda, 0xdf, 0x39, 0x3c, 0x6a, 0x1e, 0x30, 0x5b, 0x5e, 0x80, 0x15, - 0xcf, 0x96, 0x1e, 0x31, 0xab, 0x7e, 0x02, 0xa5, 0x68, 0x89, 0xce, 0x4c, 0x38, 0xb4, 0x47, 0x96, - 0xc1, 0x8d, 0x91, 0xc5, 0x62, 0x82, 0x6e, 0x43, 0xf6, 0xc4, 0x16, 0x6e, 0x36, 0xfd, 0xae, 0x3d, - 0xb2, 0x29, 0x09, 0x95, 0xf8, 0x82, 0x5b, 0xfd, 0x02, 0xb2, 0xdc, 0x6b, 0x98, 0x07, 0xf0, 0x62, - 0x5b, 0x62, 0x1f, 0x36, 0x46, 0x9f, 0x00, 0xe8, 0x94, 0x0e, 0xcd, 0xf6, 0x28, 0x50, 0xbc, 0x31, - 0xdd, 0xeb, 0x76, 0x3c, 0xbe, 0xda, 0x65, 0xe9, 0x7e, 0x6b, 0x81, 0x68, 0xc8, 0x05, 0x43, 0x0a, - 0xd5, 0x7d, 0x28, 0x45, 0x65, 0xbd, 0x6c, 0x2d, 0xd6, 0x10, 0xcd, 0xd6, 0x02, 0x7c, 0xc9, 0x6c, - 0xed, 0xe7, 0xfa, 0xb4, 0x68, 0xac, 0xf0, 0x89, 0xfa, 0x2c, 0x09, 0x85, 0xd6, 0xa9, 0x3c, 0x8f, - 0x98, 0x9a, 0x3e, 0x10, 0x4d, 0x85, 0x2b, 0x58, 0xd1, 0x24, 0x48, 0xfb, 0xad, 0x87, 0x0f, 0xfc, - 0x1b, 0x97, 0x59, 0xb4, 0x50, 0xf1, 0x7a, 0x30, 0xd2, 0xcb, 0xde, 0x07, 0xc5, 0x8f, 0x99, 0x0c, - 0x44, 0xea, 0x86, 0x31, 0x24, 0xae, 0x2b, 0xef, 0xbd, 0x37, 0xe5, 0x2d, 0x22, 0xfb, 0xa9, 0xac, - 0x91, 0xd3, 0x58, 0x4c, 0x54, 0x03, 0x56, 0xc6, 0x02, 0x2e, 0x7a, 0x1f, 0xf2, 0xce, 0xa8, 0xad, - 0x79, 0xe6, 0x19, 0x7b, 0x12, 0xf0, 0xe0, 0xc9, 0xa8, 0xdd, 0x37, 0x3b, 0x0f, 0xc8, 0x99, 0xb7, - 0x18, 0x67, 0xd4, 0x7e, 0x20, 0xac, 0x28, 0x7e, 0x25, 0x15, 0xfe, 0x95, 0x13, 0x28, 0x78, 0x97, - 0x02, 0xfd, 0x10, 0x14, 0x3f, 0x96, 0xfb, 0x9d, 0xc3, 0xd8, 0x24, 0x20, 0xd5, 0x07, 0x22, 0x0c, - 0xeb, 0xba, 0x66, 0xd7, 0x22, 0x86, 0x16, 0xc0, 0x58, 0xfe, 0x6b, 0x05, 0xbc, 0x22, 0x3e, 0xec, - 0x79, 0x18, 0x56, 0xfd, 0x77, 0x12, 0x0a, 0x5e, 0x87, 0x08, 0xbd, 0x1b, 0xba, 0x77, 0xa5, 0x29, - 0xf5, 0xb4, 0xc7, 0x18, 0x74, 0x79, 0xa2, 0x6b, 0x4d, 0x9d, 0x7f, 0xad, 0x71, 0xed, 0x3a, 0xaf, - 0x71, 0x9a, 0x39, 0x77, 0xe3, 0xf4, 0x6d, 0x40, 0xd4, 0xa6, 0x7a, 0x5f, 0x3b, 0xb1, 0xa9, 0x69, - 0x75, 0x35, 0x61, 0x6c, 0x81, 0x05, 0xca, 0xfc, 0xcb, 0x23, 0xfe, 0xe1, 0x90, 0xdb, 0xfd, 0x97, - 0x49, 0x28, 0xf8, 0x41, 0xfd, 0xbc, 0x4d, 0x9b, 0x8b, 0x90, 0x93, 0x71, 0x4b, 0x74, 0x6d, 0xe4, - 0xcc, 0xef, 0x1f, 0x66, 0x42, 0xfd, 0xc3, 0x2a, 0x14, 0x06, 0x84, 0xea, 0x3c, 0xb3, 0x89, 0x4a, - 0xc2, 0x9f, 0xdf, 0x7c, 0x0f, 0x96, 0x42, 0xfd, 0x33, 0xe6, 0x79, 0xfb, 0x8d, 0x8f, 0xca, 0x89, - 0x6a, 0xfe, 0xd9, 0x97, 0x57, 0xd3, 0xfb, 0xe4, 0x29, 0xbb, 0xb3, 0xb8, 0x51, 0x6f, 0x36, 0xea, - 0x0f, 0xca, 0xc9, 0xea, 0xd2, 0xb3, 0x2f, 0xaf, 0xe6, 0x31, 0xe1, 0xb5, 0xfc, 0xcd, 0x26, 0x2c, - 0x87, 0x4f, 0x25, 0x1a, 0xfa, 0x10, 0x94, 0xee, 0x3d, 0x3c, 0xdc, 0xdb, 0xad, 0xef, 0xb4, 0x1a, - 0xda, 0xa3, 0x83, 0x56, 0xa3, 0x9c, 0x44, 0xaf, 0xc2, 0x85, 0xbd, 0xdd, 0x1f, 0x37, 0x5b, 0x5a, - 0x7d, 0x6f, 0xb7, 0xb1, 0xdf, 0xd2, 0x76, 0x5a, 0xad, 0x9d, 0xfa, 0x83, 0x72, 0x6a, 0xfb, 0x0f, - 0x0a, 0xac, 0xec, 0xd4, 0xea, 0xbb, 0x2c, 0x6c, 0x9b, 0x1d, 0x9d, 0x97, 0x79, 0x75, 0xc8, 0xf0, - 0x42, 0x6e, 0xe6, 0xeb, 0x5a, 0x75, 0x76, 0x97, 0x07, 0xdd, 0x87, 0x2c, 0xaf, 0xf1, 0xd0, 0xec, - 0xe7, 0xb6, 0xea, 0x9c, 0xb6, 0x0f, 0x5b, 0x0c, 0x77, 0x8f, 0x99, 0xef, 0x6f, 0xd5, 0xd9, 0x5d, - 0x20, 0x84, 0x41, 0x09, 0xc0, 0xe7, 0xfc, 0xf7, 0xa8, 0xea, 0x02, 0xc1, 0x06, 0xed, 0x41, 0xde, - 0x83, 0xf5, 0xf3, 0x5e, 0xc8, 0xaa, 0x73, 0xdb, 0x34, 0xcc, 0x5c, 0xa2, 0xfc, 0x9a, 0xfd, 0xdc, - 0x57, 0x9d, 0xd3, 0x73, 0x42, 0xbb, 0x90, 0x93, 0x80, 0x6a, 0xce, 0xab, 0x57, 0x75, 0x5e, 0xdb, - 0x85, 0x19, 0x2d, 0x28, 0x6c, 0xe7, 0x3f, 0x62, 0x56, 0x17, 0x68, 0xa7, 0xa1, 0x87, 0x00, 0xa1, - 0x62, 0x6b, 0x81, 0xd7, 0xc9, 0xea, 0x22, 0x6d, 0x32, 0x74, 0x00, 0x05, 0x1f, 0x54, 0xcf, 0x7d, - 0x2b, 0xac, 0xce, 0xef, 0x57, 0xa1, 0xc7, 0x50, 0x8c, 0x82, 0xc9, 0xc5, 0x5e, 0x00, 0xab, 0x0b, - 0x36, 0xa2, 0x98, 0xfe, 0x28, 0xb2, 0x5c, 0xec, 0x45, 0xb0, 0xba, 0x60, 0x5f, 0x0a, 0x7d, 0x06, - 0xab, 0x93, 0xc8, 0x6f, 0xf1, 0x07, 0xc2, 0xea, 0x39, 0x3a, 0x55, 0x68, 0x00, 0x68, 0x0a, 0x62, - 0x3c, 0xc7, 0x7b, 0x61, 0xf5, 0x3c, 0x8d, 0xab, 0x5a, 0xe3, 0xab, 0x17, 0xeb, 0xc9, 0xaf, 0x5f, - 0xac, 0x27, 0xff, 0xf1, 0x62, 0x3d, 0xf9, 0xfc, 0xe5, 0x7a, 0xe2, 0xeb, 0x97, 0xeb, 0x89, 0xbf, - 0xbd, 0x5c, 0x4f, 0xfc, 0xec, 0xad, 0xae, 0x49, 0x7b, 0xa3, 0xf6, 0x66, 0xc7, 0x1e, 0x6c, 0x85, - 0xff, 0x88, 0x30, 0xed, 0xcf, 0x11, 0xed, 0x1c, 0x4f, 0x2a, 0xb7, 0xfe, 0x13, 0x00, 0x00, 0xff, - 0xff, 0x34, 0x1a, 0x9a, 0xa7, 0x3c, 0x21, 0x00, 0x00, + // 2608 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xbd, 0x73, 0x1b, 0xc7, + 0x15, 0xc7, 0x37, 0x70, 0x8f, 0x04, 0x08, 0xae, 0x68, 0x19, 0x86, 0x65, 0x52, 0x3e, 0x8f, 0x1d, + 0x4b, 0xb6, 0xc9, 0x98, 0x1a, 0x29, 0xf2, 0x38, 0x1f, 0x26, 0x20, 0x28, 0xa0, 0xc5, 0x90, 0xcc, + 0x12, 0x92, 0xc7, 0x49, 0xac, 0xf3, 0x01, 0xb7, 0x04, 0xce, 0x02, 0xee, 0xce, 0x77, 0x0b, 0x8a, + 0x74, 0x99, 0x49, 0x1a, 0x4d, 0x0a, 0x95, 0x69, 0x3c, 0x93, 0xff, 0x20, 0x65, 0x52, 0xa5, 0x4a, + 0xe1, 0x22, 0x99, 0x71, 0x99, 0xca, 0xc9, 0x48, 0x5d, 0xfe, 0x81, 0x54, 0x99, 0xc9, 0xec, 0xc7, + 0x7d, 0x01, 0x38, 0x02, 0x8c, 0xd3, 0xa5, 0xdb, 0xdd, 0x7b, 0xef, 0x61, 0xf7, 0xed, 0xbe, 0xdf, + 0xfb, 0xed, 0x5b, 0xc0, 0xcb, 0x94, 0x58, 0x06, 0x71, 0x47, 0xa6, 0x45, 0xb7, 0xf4, 0x6e, 0xcf, + 0xdc, 0xa2, 0x67, 0x0e, 0xf1, 0x36, 0x1d, 0xd7, 0xa6, 0x36, 0x5a, 0x09, 0x3f, 0x6e, 0xb2, 0x8f, + 0xf5, 0x57, 0x22, 0xd2, 0x3d, 0xf7, 0xcc, 0xa1, 0xf6, 0x96, 0xe3, 0xda, 0xf6, 0xb1, 0x90, 0xaf, + 0x5f, 0x89, 0x7c, 0xe6, 0x76, 0xa2, 0xd6, 0x62, 0x5f, 0xa5, 0xf2, 0x23, 0x72, 0xe6, 0x7f, 0x7d, + 0x65, 0x4a, 0xd7, 0xd1, 0x5d, 0x7d, 0xe4, 0x7f, 0xde, 0xe8, 0xdb, 0x76, 0x7f, 0x48, 0xb6, 0x78, + 0xaf, 0x3b, 0x3e, 0xde, 0xa2, 0xe6, 0x88, 0x78, 0x54, 0x1f, 0x39, 0x52, 0x60, 0xad, 0x6f, 0xf7, + 0x6d, 0xde, 0xdc, 0x62, 0x2d, 0x31, 0xaa, 0xfe, 0xb5, 0x08, 0x45, 0x4c, 0x3e, 0x1f, 0x13, 0x8f, + 0xa2, 0x6d, 0xc8, 0x91, 0xde, 0xc0, 0xae, 0xa5, 0xaf, 0xa6, 0xdf, 0x5c, 0xda, 0xbe, 0xb2, 0x39, + 0xb1, 0xb8, 0x4d, 0x29, 0xd7, 0xea, 0x0d, 0xec, 0x76, 0x0a, 0x73, 0x59, 0x74, 0x13, 0xf2, 0xc7, + 0xc3, 0xb1, 0x37, 0xa8, 0x65, 0xb8, 0xd2, 0x2b, 0x49, 0x4a, 0x77, 0x99, 0x50, 0x3b, 0x85, 0x85, + 0x34, 0xfb, 0x29, 0xd3, 0x3a, 0xb6, 0x6b, 0xd9, 0xf3, 0x7f, 0x6a, 0xd7, 0x3a, 0xe6, 0x3f, 0xc5, + 0x64, 0x51, 0x03, 0xc0, 0xb4, 0x4c, 0xaa, 0xf5, 0x06, 0xba, 0x69, 0xd5, 0x72, 0x5c, 0xf3, 0xd5, + 0x64, 0x4d, 0x93, 0x36, 0x99, 0x60, 0x3b, 0x85, 0x15, 0xd3, 0xef, 0xb0, 0xe9, 0x7e, 0x3e, 0x26, + 0xee, 0x59, 0x2d, 0x7f, 0xfe, 0x74, 0x7f, 0xca, 0x84, 0xd8, 0x74, 0xb9, 0x34, 0x6a, 0xc1, 0x52, + 0x97, 0xf4, 0x4d, 0x4b, 0xeb, 0x0e, 0xed, 0xde, 0xa3, 0x5a, 0x81, 0x2b, 0xab, 0x49, 0xca, 0x0d, + 0x26, 0xda, 0x60, 0x92, 0xed, 0x14, 0x86, 0x6e, 0xd0, 0x43, 0xdf, 0x87, 0x52, 0x6f, 0x40, 0x7a, + 0x8f, 0x34, 0x7a, 0x5a, 0x2b, 0x72, 0x1b, 0x1b, 0x49, 0x36, 0x9a, 0x4c, 0xae, 0x73, 0xda, 0x4e, + 0xe1, 0x62, 0x4f, 0x34, 0xd9, 0xfa, 0x0d, 0x32, 0x34, 0x4f, 0x88, 0xcb, 0xf4, 0x4b, 0xe7, 0xaf, + 0xff, 0x8e, 0x90, 0xe4, 0x16, 0x14, 0xc3, 0xef, 0xa0, 0x1f, 0x81, 0x42, 0x2c, 0x43, 0x2e, 0x43, + 0xe1, 0x26, 0xae, 0x26, 0xee, 0xb3, 0x65, 0xf8, 0x8b, 0x28, 0x11, 0xd9, 0x46, 0xb7, 0xa1, 0xd0, + 0xb3, 0x47, 0x23, 0x93, 0xd6, 0x80, 0x6b, 0xaf, 0x27, 0x2e, 0x80, 0x4b, 0xb5, 0x53, 0x58, 0xca, + 0xa3, 0x7d, 0xa8, 0x0c, 0x4d, 0x8f, 0x6a, 0x9e, 0xa5, 0x3b, 0xde, 0xc0, 0xa6, 0x5e, 0x6d, 0x89, + 0x5b, 0x78, 0x3d, 0xc9, 0xc2, 0x9e, 0xe9, 0xd1, 0x23, 0x5f, 0xb8, 0x9d, 0xc2, 0xe5, 0x61, 0x74, + 0x80, 0xd9, 0xb3, 0x8f, 0x8f, 0x89, 0x1b, 0x18, 0xac, 0x2d, 0x9f, 0x6f, 0xef, 0x80, 0x49, 0xfb, + 0xfa, 0xcc, 0x9e, 0x1d, 0x1d, 0x40, 0x3f, 0x87, 0x4b, 0x43, 0x5b, 0x37, 0x02, 0x73, 0x5a, 0x6f, + 0x30, 0xb6, 0x1e, 0xd5, 0xca, 0xdc, 0xe8, 0xb5, 0xc4, 0x49, 0xda, 0xba, 0xe1, 0x9b, 0x68, 0x32, + 0x85, 0x76, 0x0a, 0xaf, 0x0e, 0x27, 0x07, 0xd1, 0x43, 0x58, 0xd3, 0x1d, 0x67, 0x78, 0x36, 0x69, + 0xbd, 0xc2, 0xad, 0x5f, 0x4f, 0xb2, 0xbe, 0xc3, 0x74, 0x26, 0xcd, 0x23, 0x7d, 0x6a, 0xb4, 0x51, + 0x84, 0xfc, 0x89, 0x3e, 0x1c, 0x13, 0xf5, 0x3b, 0xb0, 0x14, 0x09, 0x53, 0x54, 0x83, 0xe2, 0x88, + 0x78, 0x9e, 0xde, 0x27, 0x3c, 0xaa, 0x15, 0xec, 0x77, 0xd5, 0x0a, 0x2c, 0x47, 0x43, 0x53, 0x7d, + 0x9a, 0x0e, 0x34, 0x59, 0xd4, 0x31, 0xcd, 0x13, 0xe2, 0x7a, 0xa6, 0x6d, 0xf9, 0x9a, 0xb2, 0x8b, + 0x5e, 0x83, 0x32, 0x3f, 0x3f, 0x9a, 0xff, 0x9d, 0x85, 0x7e, 0x0e, 0x2f, 0xf3, 0xc1, 0x07, 0x52, + 0x68, 0x03, 0x96, 0x9c, 0x6d, 0x27, 0x10, 0xc9, 0x72, 0x11, 0x70, 0xb6, 0x1d, 0x5f, 0xe0, 0x55, + 0x58, 0x66, 0x2b, 0x0d, 0x24, 0x72, 0xfc, 0x47, 0x96, 0xd8, 0x98, 0x14, 0x51, 0xff, 0x92, 0x81, + 0xea, 0x64, 0x38, 0xa3, 0xdb, 0x90, 0x63, 0xc8, 0x26, 0x41, 0xaa, 0xbe, 0x29, 0x60, 0x6f, 0xd3, + 0x87, 0xbd, 0xcd, 0x8e, 0x0f, 0x7b, 0x8d, 0xd2, 0x57, 0xdf, 0x6c, 0xa4, 0x9e, 0xfe, 0x7d, 0x23, + 0x8d, 0xb9, 0x06, 0x7a, 0x89, 0x45, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x29, 0x2b, 0x2c, 0xb4, + 0x74, 0xd3, 0xda, 0x35, 0xd0, 0x1e, 0x54, 0x7b, 0xb6, 0xe5, 0x11, 0xcb, 0x1b, 0x7b, 0x9a, 0x80, + 0x55, 0x09, 0x4d, 0xb1, 0x00, 0x13, 0x60, 0xdd, 0xf4, 0x25, 0x0f, 0xb9, 0x20, 0x5e, 0xe9, 0xc5, + 0x07, 0xd0, 0x5d, 0x80, 0x13, 0x7d, 0x68, 0x1a, 0x3a, 0xb5, 0x5d, 0xaf, 0x96, 0xbb, 0x9a, 0x9d, + 0x19, 0x65, 0x0f, 0x7c, 0x91, 0xfb, 0x8e, 0xa1, 0x53, 0xd2, 0xc8, 0xb1, 0xe9, 0xe2, 0x88, 0x26, + 0x7a, 0x03, 0x56, 0x74, 0xc7, 0xd1, 0x3c, 0xaa, 0x53, 0xa2, 0x75, 0xcf, 0x28, 0xf1, 0x38, 0x6c, + 0x2d, 0xe3, 0xb2, 0xee, 0x38, 0x47, 0x6c, 0xb4, 0xc1, 0x06, 0xd1, 0xeb, 0x50, 0x61, 0x08, 0x67, + 0xea, 0x43, 0x6d, 0x40, 0xcc, 0xfe, 0x80, 0x72, 0x80, 0xca, 0xe2, 0xb2, 0x1c, 0x6d, 0xf3, 0x41, + 0xd5, 0x08, 0x76, 0x9c, 0xa3, 0x1b, 0x42, 0x90, 0x33, 0x74, 0xaa, 0x73, 0x4f, 0x2e, 0x63, 0xde, + 0x66, 0x63, 0x8e, 0x4e, 0x07, 0xd2, 0x3f, 0xbc, 0x8d, 0x2e, 0x43, 0x41, 0x9a, 0xcd, 0x72, 0xb3, + 0xb2, 0x87, 0xd6, 0x20, 0xef, 0xb8, 0xf6, 0x09, 0xe1, 0x5b, 0x57, 0xc2, 0xa2, 0xa3, 0xfe, 0x2a, + 0x03, 0xab, 0x53, 0x38, 0xc8, 0xec, 0x0e, 0x74, 0x6f, 0xe0, 0xff, 0x16, 0x6b, 0xa3, 0x5b, 0xcc, + 0xae, 0x6e, 0x10, 0x57, 0xe6, 0x8e, 0xda, 0xb4, 0xab, 0xdb, 0xfc, 0xbb, 0x74, 0x8d, 0x94, 0x46, + 0x07, 0x50, 0x1d, 0xea, 0x1e, 0xd5, 0x04, 0xae, 0x68, 0x91, 0x3c, 0x32, 0x8d, 0xa6, 0x7b, 0xba, + 0x8f, 0x44, 0xec, 0x50, 0x4b, 0x43, 0x95, 0x61, 0x6c, 0x14, 0x61, 0x58, 0xeb, 0x9e, 0x7d, 0xa1, + 0x5b, 0xd4, 0xb4, 0x88, 0x36, 0xb5, 0x73, 0x2f, 0x4d, 0x19, 0x6d, 0x9d, 0x98, 0x06, 0xb1, 0x7a, + 0xfe, 0x96, 0x5d, 0x0a, 0x94, 0x83, 0x2d, 0xf5, 0x54, 0x0c, 0x95, 0x38, 0x92, 0xa3, 0x0a, 0x64, + 0xe8, 0xa9, 0x74, 0x40, 0x86, 0x9e, 0xa2, 0xef, 0x42, 0x8e, 0x2d, 0x92, 0x2f, 0xbe, 0x32, 0x23, + 0x05, 0x4a, 0xbd, 0xce, 0x99, 0x43, 0x30, 0x97, 0x54, 0xd5, 0x20, 0x1c, 0x02, 0x74, 0x9f, 0xb4, + 0xaa, 0x5e, 0x83, 0x95, 0x09, 0xf8, 0x8e, 0xec, 0x5f, 0x3a, 0xba, 0x7f, 0xea, 0x0a, 0x94, 0x63, + 0x58, 0xad, 0x5e, 0x86, 0xb5, 0x59, 0xd0, 0xab, 0x0e, 0x82, 0xf1, 0x18, 0x84, 0xa2, 0x9b, 0x50, + 0x0a, 0xb0, 0x57, 0x84, 0xe3, 0xb4, 0xaf, 0x7c, 0x61, 0x1c, 0x88, 0xb2, 0x38, 0x64, 0xc7, 0x9a, + 0x9f, 0x87, 0x0c, 0x9f, 0x78, 0x51, 0x77, 0x9c, 0xb6, 0xee, 0x0d, 0xd4, 0x4f, 0xa1, 0x96, 0x84, + 0xab, 0x13, 0xcb, 0xc8, 0x05, 0xc7, 0xf0, 0x32, 0x14, 0x8e, 0x6d, 0x77, 0xa4, 0x53, 0x6e, 0xac, + 0x8c, 0x65, 0x8f, 0x1d, 0x4f, 0x81, 0xb1, 0x59, 0x3e, 0x2c, 0x3a, 0xaa, 0x06, 0x2f, 0x25, 0x62, + 0x2b, 0x53, 0x31, 0x2d, 0x83, 0x08, 0x7f, 0x96, 0xb1, 0xe8, 0x84, 0x86, 0xc4, 0x64, 0x45, 0x87, + 0xfd, 0xac, 0xc7, 0xd7, 0xca, 0xed, 0x2b, 0x58, 0xf6, 0xd4, 0xdf, 0x95, 0xa0, 0x84, 0x89, 0xe7, + 0x30, 0x4c, 0x40, 0x0d, 0x50, 0xc8, 0x69, 0x8f, 0x38, 0xd4, 0x87, 0xd1, 0xd9, 0xac, 0x41, 0x48, + 0xb7, 0x7c, 0x49, 0x96, 0xb2, 0x03, 0x35, 0x74, 0x43, 0xb2, 0xb2, 0x64, 0x82, 0x25, 0xd5, 0xa3, + 0xb4, 0xec, 0x96, 0x4f, 0xcb, 0xb2, 0x89, 0x59, 0x5a, 0x68, 0x4d, 0xf0, 0xb2, 0x1b, 0x92, 0x97, + 0xe5, 0xe6, 0xfc, 0x58, 0x8c, 0x98, 0x35, 0x63, 0xc4, 0x2c, 0x3f, 0x67, 0x99, 0x09, 0xcc, 0xec, + 0x96, 0xcf, 0xcc, 0x0a, 0x73, 0x66, 0x3c, 0x41, 0xcd, 0xee, 0xc6, 0xa9, 0x99, 0xa0, 0x55, 0xaf, + 0x25, 0x6a, 0x27, 0x72, 0xb3, 0x1f, 0x44, 0xb8, 0x59, 0x29, 0x91, 0x18, 0x09, 0x23, 0x33, 0xc8, + 0x59, 0x33, 0x46, 0xce, 0x94, 0x39, 0x3e, 0x48, 0x60, 0x67, 0x1f, 0x44, 0xd9, 0x19, 0x24, 0x12, + 0x3c, 0xb9, 0xdf, 0xb3, 0xe8, 0xd9, 0x7b, 0x01, 0x3d, 0x5b, 0x4a, 0xe4, 0x97, 0x72, 0x0d, 0x93, + 0xfc, 0xec, 0x60, 0x8a, 0x9f, 0x09, 0x3e, 0xf5, 0x46, 0xa2, 0x89, 0x39, 0x04, 0xed, 0x60, 0x8a, + 0xa0, 0x95, 0xe7, 0x18, 0x9c, 0xc3, 0xd0, 0x7e, 0x31, 0x9b, 0xa1, 0x25, 0x73, 0x28, 0x39, 0xcd, + 0xc5, 0x28, 0x9a, 0x96, 0x40, 0xd1, 0x56, 0xb8, 0xf9, 0xb7, 0x12, 0xcd, 0x5f, 0x9c, 0xa3, 0x5d, + 0x63, 0x19, 0x72, 0x22, 0xe6, 0x19, 0xca, 0x10, 0xd7, 0xb5, 0x5d, 0xc9, 0xb6, 0x44, 0x47, 0x7d, + 0x93, 0xe5, 0xec, 0x30, 0xbe, 0xcf, 0xe1, 0x73, 0x1c, 0xcd, 0x23, 0x31, 0xad, 0xfe, 0x31, 0x1d, + 0xea, 0xf2, 0x34, 0x17, 0xcd, 0xf7, 0x8a, 0xcc, 0xf7, 0x11, 0x96, 0x97, 0x89, 0xb3, 0xbc, 0x0d, + 0x58, 0x62, 0x28, 0x3d, 0x41, 0xe0, 0x74, 0x27, 0x20, 0x70, 0xd7, 0x61, 0x95, 0xa7, 0x61, 0xc1, + 0x05, 0x25, 0x34, 0xe7, 0x78, 0x86, 0x59, 0x61, 0x1f, 0xc4, 0xe1, 0x14, 0x18, 0xfd, 0x0e, 0x5c, + 0x8a, 0xc8, 0x06, 0xe8, 0x2f, 0xd8, 0x4c, 0x35, 0x90, 0xde, 0x91, 0x69, 0xe0, 0xcf, 0xe9, 0xd0, + 0x43, 0x21, 0xf3, 0x9b, 0x45, 0xd2, 0xd2, 0xff, 0x23, 0x92, 0x96, 0xf9, 0xaf, 0x49, 0x5a, 0x34, + 0x9b, 0x65, 0xe3, 0xd9, 0xec, 0x5f, 0xe9, 0x70, 0x4f, 0x02, 0xca, 0xd5, 0xb3, 0x0d, 0x22, 0xf3, + 0x0b, 0x6f, 0xa3, 0x2a, 0x64, 0x87, 0x76, 0x5f, 0x66, 0x11, 0xd6, 0x64, 0x52, 0x01, 0x08, 0x2b, + 0x12, 0x63, 0x83, 0xd4, 0x94, 0xe7, 0x1e, 0x96, 0xa9, 0xa9, 0x0a, 0xd9, 0x47, 0x44, 0x40, 0xe6, + 0x32, 0x66, 0x4d, 0x26, 0xc7, 0x0f, 0x19, 0x07, 0xc2, 0x65, 0x2c, 0x3a, 0xe8, 0x36, 0x28, 0xbc, + 0x0c, 0xa1, 0xd9, 0x8e, 0x27, 0xd1, 0xed, 0xe5, 0xe8, 0x5a, 0x45, 0xb5, 0x61, 0xf3, 0x90, 0xc9, + 0x1c, 0x38, 0x1e, 0x2e, 0x39, 0xb2, 0x15, 0xc9, 0xba, 0x4a, 0x8c, 0xfc, 0x5d, 0x01, 0x85, 0xcd, + 0xde, 0x73, 0xf4, 0x1e, 0xe1, 0x50, 0xa5, 0xe0, 0x70, 0x40, 0x7d, 0x08, 0x68, 0x1a, 0x70, 0x51, + 0x1b, 0x0a, 0xe4, 0x84, 0x58, 0x94, 0x6d, 0x1b, 0x73, 0xf7, 0xe5, 0x19, 0xcc, 0x8a, 0x58, 0xb4, + 0x51, 0x63, 0x4e, 0xfe, 0xe7, 0x37, 0x1b, 0x55, 0x21, 0xfd, 0xb6, 0x3d, 0x32, 0x29, 0x19, 0x39, + 0xf4, 0x0c, 0x4b, 0x7d, 0xf5, 0x0f, 0x19, 0x46, 0x73, 0x62, 0x60, 0x3c, 0xd3, 0xb7, 0xfe, 0x91, + 0xcf, 0x44, 0x28, 0xee, 0x62, 0xfe, 0x5e, 0x07, 0xe8, 0xeb, 0x9e, 0xf6, 0x58, 0xb7, 0x28, 0x31, + 0xa4, 0xd3, 0x23, 0x23, 0xa8, 0x0e, 0x25, 0xd6, 0x1b, 0x7b, 0xc4, 0x90, 0x6c, 0x3b, 0xe8, 0x47, + 0xd6, 0x59, 0xfc, 0x76, 0xeb, 0x8c, 0x7b, 0xb9, 0x34, 0xe1, 0xe5, 0x08, 0x05, 0x51, 0xa2, 0x14, + 0x84, 0xcd, 0xcd, 0x71, 0x4d, 0xdb, 0x35, 0xe9, 0x19, 0xdf, 0x9a, 0x2c, 0x0e, 0xfa, 0xea, 0xaf, + 0x33, 0x61, 0x68, 0x85, 0x2c, 0xf2, 0xff, 0xce, 0x77, 0xea, 0x6f, 0xf8, 0xdd, 0x32, 0x9e, 0x49, + 0xd1, 0x11, 0xac, 0x06, 0x91, 0xad, 0x8d, 0x79, 0xc4, 0xfb, 0x67, 0x75, 0x51, 0x68, 0xa8, 0x9e, + 0xc4, 0x87, 0x3d, 0xf4, 0x31, 0xbc, 0x38, 0x01, 0x5b, 0x81, 0xe9, 0xcc, 0xa2, 0xe8, 0xf5, 0x42, + 0x1c, 0xbd, 0x7c, 0xd3, 0xa1, 0xb3, 0xb2, 0xdf, 0x32, 0xa0, 0x76, 0xd9, 0x75, 0x25, 0x4a, 0x0c, + 0x66, 0x6e, 0xff, 0x6b, 0x50, 0x76, 0x09, 0x65, 0x57, 0xe8, 0xd8, 0x85, 0x70, 0x59, 0x0c, 0xca, + 0x6b, 0xe6, 0x21, 0xbc, 0x30, 0x93, 0x20, 0xa0, 0xef, 0x81, 0x12, 0x72, 0x8b, 0x74, 0xc2, 0xdd, + 0x2a, 0xb8, 0x2f, 0x84, 0xb2, 0xea, 0x9f, 0xd2, 0xa1, 0xc9, 0xf8, 0x0d, 0xa4, 0x05, 0x05, 0x97, + 0x78, 0xe3, 0xa1, 0xb8, 0x13, 0x54, 0xb6, 0xdf, 0x59, 0x8c, 0x5a, 0xb0, 0xd1, 0xf1, 0x90, 0x62, + 0xa9, 0xac, 0x3e, 0x84, 0x82, 0x18, 0x41, 0x4b, 0x50, 0xbc, 0xbf, 0x7f, 0x6f, 0xff, 0xe0, 0xa3, + 0xfd, 0x6a, 0x0a, 0x01, 0x14, 0x76, 0x9a, 0xcd, 0xd6, 0x61, 0xa7, 0x9a, 0x46, 0x0a, 0xe4, 0x77, + 0x1a, 0x07, 0xb8, 0x53, 0xcd, 0xb0, 0x61, 0xdc, 0xfa, 0xb0, 0xd5, 0xec, 0x54, 0xb3, 0x68, 0x15, + 0xca, 0xa2, 0xad, 0xdd, 0x3d, 0xc0, 0x3f, 0xd9, 0xe9, 0x54, 0x73, 0x91, 0xa1, 0xa3, 0xd6, 0xfe, + 0x9d, 0x16, 0xae, 0xe6, 0xd5, 0x77, 0xd9, 0xa5, 0x23, 0x81, 0x8c, 0x84, 0xd7, 0x8b, 0x74, 0xe4, + 0x7a, 0xa1, 0xfe, 0x36, 0x03, 0xf5, 0x64, 0x86, 0x81, 0x3e, 0x9c, 0x58, 0xf8, 0xf6, 0x05, 0xe8, + 0xc9, 0xc4, 0xea, 0xd1, 0xeb, 0x50, 0x71, 0xc9, 0x31, 0xa1, 0xbd, 0x81, 0x60, 0x3c, 0x22, 0x1b, + 0x96, 0x71, 0x59, 0x8e, 0x72, 0x25, 0x4f, 0x88, 0x7d, 0x46, 0x7a, 0x54, 0x13, 0x30, 0x23, 0x0e, + 0x9d, 0xc2, 0xc4, 0xd8, 0xe8, 0x91, 0x18, 0x54, 0x3f, 0xbd, 0x90, 0x2f, 0x15, 0xc8, 0xe3, 0x56, + 0x07, 0x7f, 0x5c, 0xcd, 0x22, 0x04, 0x15, 0xde, 0xd4, 0x8e, 0xf6, 0x77, 0x0e, 0x8f, 0xda, 0x07, + 0xcc, 0x97, 0x97, 0x60, 0xc5, 0xf7, 0xa5, 0x3f, 0x98, 0x57, 0x3f, 0x81, 0x4a, 0xfc, 0x5a, 0xcf, + 0x5c, 0xe8, 0xda, 0x63, 0xcb, 0xe0, 0xce, 0xc8, 0x63, 0xd1, 0x41, 0x37, 0x21, 0x7f, 0x62, 0x8b, + 0x30, 0x9b, 0x7d, 0xd6, 0x1e, 0xd8, 0x94, 0x44, 0xca, 0x02, 0x42, 0x5a, 0xfd, 0x02, 0xf2, 0x3c, + 0x6a, 0x58, 0x04, 0xf0, 0x0b, 0xba, 0xe4, 0x4b, 0xac, 0x8d, 0x3e, 0x01, 0xd0, 0x29, 0x75, 0xcd, + 0xee, 0x38, 0x34, 0xbc, 0x31, 0x3b, 0xea, 0x76, 0x7c, 0xb9, 0xc6, 0x15, 0x19, 0x7e, 0x6b, 0xa1, + 0x6a, 0x24, 0x04, 0x23, 0x06, 0xd5, 0x7d, 0xa8, 0xc4, 0x75, 0xfd, 0x0c, 0x2f, 0xe6, 0x10, 0xcf, + 0xf0, 0x82, 0xb0, 0xc9, 0x0c, 0x1f, 0xf0, 0x83, 0xac, 0x28, 0xc6, 0xf0, 0x8e, 0xfa, 0x24, 0x0d, + 0xa5, 0xce, 0xa9, 0xdc, 0x8f, 0x84, 0x3a, 0x40, 0xa8, 0x9a, 0x89, 0xde, 0x7a, 0x45, 0x61, 0x21, + 0x1b, 0x94, 0x2b, 0x3e, 0x08, 0x4e, 0x5c, 0x6e, 0xd1, 0xcb, 0x8d, 0x5f, 0xb7, 0x91, 0x51, 0xf6, + 0x3e, 0x28, 0x01, 0x66, 0x32, 0xe2, 0xa9, 0x1b, 0x86, 0x4b, 0x3c, 0x4f, 0x9e, 0x7b, 0xbf, 0xcb, + 0xcb, 0x4a, 0xf6, 0x63, 0x79, 0xaf, 0xce, 0x62, 0xd1, 0x51, 0x0d, 0x58, 0x99, 0x00, 0x5c, 0xf4, + 0x3e, 0x14, 0x9d, 0x71, 0x57, 0xf3, 0xdd, 0x33, 0xf1, 0x8c, 0xe0, 0x53, 0x9a, 0x71, 0x77, 0x68, + 0xf6, 0xee, 0x91, 0x33, 0x7f, 0x32, 0xce, 0xb8, 0x7b, 0x4f, 0x78, 0x51, 0xfc, 0x4a, 0x26, 0xfa, + 0x2b, 0x27, 0x50, 0xf2, 0x0f, 0x05, 0xfa, 0x21, 0x28, 0x01, 0x96, 0x07, 0xd5, 0xc6, 0xc4, 0x24, + 0x20, 0xcd, 0x87, 0x2a, 0x8c, 0x1f, 0x7b, 0x66, 0xdf, 0x22, 0x86, 0x16, 0x52, 0x5f, 0xfe, 0x6b, + 0x25, 0xbc, 0x22, 0x3e, 0xec, 0xf9, 0xbc, 0x57, 0xfd, 0x77, 0x1a, 0x4a, 0x7e, 0x55, 0x09, 0xbd, + 0x1b, 0x39, 0x77, 0x95, 0x19, 0x77, 0x70, 0x5f, 0x30, 0xac, 0x0c, 0xc5, 0xe7, 0x9a, 0xb9, 0xf8, + 0x5c, 0x93, 0x4a, 0x7c, 0x7e, 0xb1, 0x35, 0x77, 0xe1, 0x62, 0xeb, 0xdb, 0x80, 0xa8, 0x4d, 0xf5, + 0xa1, 0x76, 0x62, 0x53, 0xd3, 0xea, 0x6b, 0xc2, 0xd9, 0x82, 0x0b, 0x54, 0xf9, 0x97, 0x07, 0xfc, + 0xc3, 0x21, 0xf7, 0xfb, 0x2f, 0xd3, 0x50, 0x0a, 0x40, 0xfd, 0xa2, 0x85, 0x9e, 0xcb, 0x50, 0x90, + 0xb8, 0x25, 0x2a, 0x3d, 0xb2, 0x17, 0xd4, 0x1c, 0x73, 0x91, 0x9a, 0x63, 0x1d, 0x4a, 0x23, 0x42, + 0x75, 0x9e, 0xd9, 0xc4, 0xed, 0x23, 0xe8, 0x5f, 0x7f, 0x0f, 0x96, 0x22, 0x35, 0x37, 0x16, 0x79, + 0xfb, 0xad, 0x8f, 0xaa, 0xa9, 0x7a, 0xf1, 0xc9, 0x97, 0x57, 0xb3, 0xfb, 0xe4, 0x31, 0x3b, 0xb3, + 0xb8, 0xd5, 0x6c, 0xb7, 0x9a, 0xf7, 0xaa, 0xe9, 0xfa, 0xd2, 0x93, 0x2f, 0xaf, 0x16, 0x31, 0xe1, + 0xf7, 0xff, 0xeb, 0x6d, 0x58, 0x8e, 0xee, 0x4a, 0x1c, 0xfa, 0x10, 0x54, 0xee, 0xdc, 0x3f, 0xdc, + 0xdb, 0x6d, 0xee, 0x74, 0x5a, 0xda, 0x83, 0x83, 0x4e, 0xab, 0x9a, 0x46, 0x2f, 0xc2, 0xa5, 0xbd, + 0xdd, 0x1f, 0xb7, 0x3b, 0x5a, 0x73, 0x6f, 0xb7, 0xb5, 0xdf, 0xd1, 0x76, 0x3a, 0x9d, 0x9d, 0xe6, + 0xbd, 0x6a, 0x66, 0xfb, 0xf7, 0x0a, 0xac, 0xec, 0x34, 0x9a, 0xbb, 0x0c, 0xb6, 0xcd, 0x9e, 0xce, + 0xaf, 0x86, 0x4d, 0xc8, 0xf1, 0xcb, 0xdf, 0xb9, 0x2f, 0x72, 0xf5, 0xf3, 0x2b, 0x43, 0xe8, 0x2e, + 0xe4, 0xf9, 0xbd, 0x10, 0x9d, 0xff, 0x44, 0x57, 0x9f, 0x53, 0x2a, 0x62, 0x93, 0xe1, 0xe1, 0x71, + 0xee, 0x9b, 0x5d, 0xfd, 0xfc, 0xca, 0x11, 0xc2, 0xa0, 0x84, 0xe4, 0x73, 0xfe, 0x1b, 0x56, 0x7d, + 0x01, 0xb0, 0x41, 0x7b, 0x50, 0xf4, 0xaf, 0x02, 0xf3, 0x5e, 0xd5, 0xea, 0x73, 0x4b, 0x3b, 0xcc, + 0x5d, 0xe2, 0xca, 0x76, 0xfe, 0x13, 0x61, 0x7d, 0x4e, 0x9d, 0x0a, 0xed, 0x42, 0x41, 0x12, 0xaa, + 0x39, 0x2f, 0x65, 0xf5, 0x79, 0xa5, 0x1a, 0xe6, 0xb4, 0xf0, 0x32, 0x3c, 0xff, 0xe1, 0xb3, 0xbe, + 0x40, 0x09, 0x0e, 0xdd, 0x07, 0x88, 0x5c, 0xd0, 0x16, 0x78, 0xd1, 0xac, 0x2f, 0x52, 0x5a, 0x43, + 0x07, 0x50, 0x0a, 0x48, 0xf5, 0xdc, 0xf7, 0xc5, 0xfa, 0xfc, 0x1a, 0x17, 0x7a, 0x08, 0xe5, 0x38, + 0x99, 0x5c, 0xec, 0xd5, 0xb0, 0xbe, 0x60, 0xf1, 0x8a, 0xd9, 0x8f, 0x33, 0xcb, 0xc5, 0x5e, 0x11, + 0xeb, 0x0b, 0xd6, 0xb2, 0xd0, 0x67, 0xb0, 0x3a, 0xcd, 0xfc, 0x16, 0x7f, 0x54, 0xac, 0x5f, 0xa0, + 0xba, 0x85, 0x46, 0x80, 0x66, 0x30, 0xc6, 0x0b, 0xbc, 0x31, 0xd6, 0x2f, 0x52, 0xec, 0x6a, 0xb4, + 0xbe, 0x7a, 0xb6, 0x9e, 0xfe, 0xfa, 0xd9, 0x7a, 0xfa, 0x1f, 0xcf, 0xd6, 0xd3, 0x4f, 0x9f, 0xaf, + 0xa7, 0xbe, 0x7e, 0xbe, 0x9e, 0xfa, 0xdb, 0xf3, 0xf5, 0xd4, 0xcf, 0xde, 0xea, 0x9b, 0x74, 0x30, + 0xee, 0x6e, 0xf6, 0xec, 0xd1, 0x56, 0xf4, 0xcf, 0x0b, 0xb3, 0xfe, 0x50, 0xd1, 0x2d, 0xf0, 0xa4, + 0x72, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xed, 0x8f, 0xef, 0x31, 0x70, 0x21, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5212,6 +5229,18 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Priority != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x50 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x4a + } if len(m.Codespace) > 0 { i -= len(m.Codespace) copy(dAtA[i:], m.Codespace) @@ -6806,6 +6835,13 @@ func (m *ResponseCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Priority != 0 { + n += 1 + sovTypes(uint64(m.Priority)) + } return n } @@ -11020,6 +11056,57 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/config/config.go b/config/config.go index a87558249..8cb3c75d0 100644 --- a/config/config.go +++ b/config/config.go @@ -30,6 +30,9 @@ const ( BlockchainV0 = "v0" BlockchainV2 = "v2" + + MempoolV0 = "v0" + MempoolV1 = "v1" ) // NOTE: Most of the structs & relevant comments + the @@ -731,8 +734,9 @@ func (cfg *P2PConfig) ValidateBasic() error { //----------------------------------------------------------------------------- // MempoolConfig -// MempoolConfig defines the configuration options for the Tendermint mempool +// MempoolConfig defines the configuration options for the Tendermint mempool. type MempoolConfig struct { + Version string `mapstructure:"version"` RootDir string `mapstructure:"home"` Recheck bool `mapstructure:"recheck"` Broadcast bool `mapstructure:"broadcast"` @@ -757,9 +761,10 @@ type MempoolConfig struct { MaxBatchBytes int `mapstructure:"max-batch-bytes"` } -// DefaultMempoolConfig returns a default configuration for the Tendermint mempool +// DefaultMempoolConfig returns a default configuration for the Tendermint mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ + Version: MempoolV1, Recheck: true, Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement diff --git a/config/toml.go b/config/toml.go index 768f1c5ef..9fabda6ac 100644 --- a/config/toml.go +++ b/config/toml.go @@ -363,6 +363,11 @@ dial-timeout = "{{ .P2P.DialTimeout }}" ####################################################### [mempool] +# Mempool version to use: +# 1) "v0" - The legacy non-prioritized mempool reactor. +# 2) "v1" (default) - The prioritized mempool reactor. +version = "{{ .Mempool.Version }}" + recheck = {{ .Mempool.Recheck }} broadcast = {{ .Mempool.Broadcast }} diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 996867a63..754cc4cb9 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -16,7 +16,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - mempl "github.com/tendermint/tendermint/mempool" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" "github.com/tendermint/tendermint/p2p" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -64,7 +64,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { proxyAppConnCon := abcicli.NewLocalClient(mtx, app) // Make Mempool - mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() diff --git a/consensus/common_test.go b/consensus/common_test.go index eb04f109d..803a22757 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -31,7 +31,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - mempl "github.com/tendermint/tendermint/mempool" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" @@ -405,7 +405,7 @@ func newStateWithConfigAndBlockStore( proxyAppConnCon := abcicli.NewLocalClient(mtx, app) // Make Mempool - mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index d2ae9e45b..f88a43c45 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -21,7 +21,8 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/mempool" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/p2ptest" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" @@ -154,7 +155,7 @@ func waitForAndValidateBlock( require.NoError(t, validateBlock(newBlock, activeVals)) for _, tx := range txs { - require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{})) + require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(tx, nil, mempool.TxInfo{})) } } @@ -314,7 +315,7 @@ func TestReactorWithEvidence(t *testing.T) { proxyAppConnMem := abcicli.NewLocalClient(mtx, app) proxyAppConnCon := abcicli.NewLocalClient(mtx, app) - mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() @@ -400,7 +401,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } // send a tx - require.NoError(t, assertMempool(states[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{})) + require.NoError(t, assertMempool(states[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempool.TxInfo{})) var wg sync.WaitGroup for _, sub := range rts.subs { diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go index e212234a7..aa5b7eeae 100644 --- a/consensus/replay_stubs.go +++ b/consensus/replay_stubs.go @@ -36,7 +36,7 @@ func (emptyMempool) Flush() {} func (emptyMempool) FlushAppConn() error { return nil } func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } func (emptyMempool) EnableTxsAvailable() {} -func (emptyMempool) TxsBytes() int64 { return 0 } +func (emptyMempool) SizeBytes() int64 { return 0 } func (emptyMempool) TxsFront() *clist.CElement { return nil } func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } diff --git a/mempool/cache.go b/mempool/cache.go new file mode 100644 index 000000000..43174f106 --- /dev/null +++ b/mempool/cache.go @@ -0,0 +1,107 @@ +package mempool + +import ( + "container/list" + + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/types" +) + +// TxCache defines an interface for raw transaction caching in a mempool. +// Currently, a TxCache does not allow direct reading or getting of transaction +// values. A TxCache is used primarily to push transactions and removing +// transactions. Pushing via Push returns a boolean telling the caller if the +// transaction already exists in the cache or not. +type TxCache interface { + // Reset resets the cache to an empty state. + Reset() + + // Push adds the given raw transaction to the cache and returns true if it was + // newly added. Otherwise, it returns false. + Push(tx types.Tx) bool + + // Remove removes the given raw transaction from the cache. + Remove(tx types.Tx) +} + +var _ TxCache = (*LRUTxCache)(nil) + +// LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache +// only stores the hash of the raw transaction. +type LRUTxCache struct { + mtx tmsync.Mutex + size int + cacheMap map[[TxKeySize]byte]*list.Element + list *list.List +} + +func NewLRUTxCache(cacheSize int) *LRUTxCache { + return &LRUTxCache{ + size: cacheSize, + cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), + list: list.New(), + } +} + +// GetList returns the underlying linked-list that backs the LRU cache. Note, +// this should be used for testing purposes only! +func (c *LRUTxCache) GetList() *list.List { + return c.list +} + +func (c *LRUTxCache) Reset() { + c.mtx.Lock() + defer c.mtx.Unlock() + + c.cacheMap = make(map[[TxKeySize]byte]*list.Element, c.size) + c.list.Init() +} + +func (c *LRUTxCache) Push(tx types.Tx) bool { + c.mtx.Lock() + defer c.mtx.Unlock() + + key := TxKey(tx) + + moved, ok := c.cacheMap[key] + if ok { + c.list.MoveToBack(moved) + return false + } + + if c.list.Len() >= c.size { + front := c.list.Front() + if front != nil { + frontKey := front.Value.([TxKeySize]byte) + delete(c.cacheMap, frontKey) + c.list.Remove(front) + } + } + + e := c.list.PushBack(key) + c.cacheMap[key] = e + + return true +} + +func (c *LRUTxCache) Remove(tx types.Tx) { + c.mtx.Lock() + defer c.mtx.Unlock() + + key := TxKey(tx) + e := c.cacheMap[key] + delete(c.cacheMap, key) + + if e != nil { + c.list.Remove(e) + } +} + +// NopTxCache defines a no-op raw transaction cache. +type NopTxCache struct{} + +var _ TxCache = (*NopTxCache)(nil) + +func (NopTxCache) Reset() {} +func (NopTxCache) Push(types.Tx) bool { return true } +func (NopTxCache) Remove(types.Tx) {} diff --git a/mempool/cache_bench_test.go b/mempool/cache_bench_test.go new file mode 100644 index 000000000..1c26999d1 --- /dev/null +++ b/mempool/cache_bench_test.go @@ -0,0 +1,41 @@ +package mempool + +import ( + "encoding/binary" + "testing" +) + +func BenchmarkCacheInsertTime(b *testing.B) { + cache := NewLRUTxCache(b.N) + + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + cache.Push(txs[i]) + } +} + +// This benchmark is probably skewed, since we actually will be removing +// txs in parallel, which may cause some overhead due to mutex locking. +func BenchmarkCacheRemoveTime(b *testing.B) { + cache := NewLRUTxCache(b.N) + + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + cache.Push(txs[i]) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + cache.Remove(txs[i]) + } +} diff --git a/mempool/cache_test.go b/mempool/cache_test.go index d9a53f475..44b2beb01 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -2,32 +2,30 @@ package mempool import ( "crypto/rand" - "crypto/sha256" "testing" "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) func TestCacheRemove(t *testing.T) { - cache := newMapTxCache(100) + cache := NewLRUTxCache(100) numTxs := 10 + txs := make([][]byte, numTxs) for i := 0; i < numTxs; i++ { // probability of collision is 2**-256 txBytes := make([]byte, 32) _, err := rand.Read(txBytes) require.NoError(t, err) + txs[i] = txBytes cache.Push(txBytes) + // make sure its added to both the linked list and the map require.Equal(t, i+1, len(cache.cacheMap)) require.Equal(t, i+1, cache.list.Len()) } + for i := 0; i < numTxs; i++ { cache.Remove(txs[i]) // make sure its removed from both the map and the linked list @@ -35,70 +33,3 @@ func TestCacheRemove(t *testing.T) { require.Equal(t, numTxs-(i+1), cache.list.Len()) } } - -func TestCacheAfterUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) - defer cleanup() - - // reAddIndices & txsInCache can have elements > numTxsToCreate - // also assumes max index is 255 for convenience - // txs in cache also checks order of elements - tests := []struct { - numTxsToCreate int - updateIndices []int - reAddIndices []int - txsInCache []int - }{ - {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works - {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache - {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache - {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe - } - for tcIndex, tc := range tests { - for i := 0; i < tc.numTxsToCreate; i++ { - tx := types.Tx{byte(i)} - err := mempool.CheckTx(tx, nil, TxInfo{}) - require.NoError(t, err) - } - - updateTxs := []types.Tx{} - for _, v := range tc.updateIndices { - tx := types.Tx{byte(v)} - updateTxs = append(updateTxs, tx) - } - err := mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - - for _, v := range tc.reAddIndices { - tx := types.Tx{byte(v)} - _ = mempool.CheckTx(tx, nil, TxInfo{}) - } - - cache := mempool.cache.(*mapTxCache) - node := cache.list.Front() - counter := 0 - for node != nil { - require.NotEqual(t, len(tc.txsInCache), counter, - "cache larger than expected on testcase %d", tcIndex) - - nodeVal := node.Value.([sha256.Size]byte) - expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) - // Reference for reading the errors: - // >>> sha256('\x00').hexdigest() - // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' - // >>> sha256('\x01').hexdigest() - // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' - // >>> sha256('\x02').hexdigest() - // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' - - require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) - counter++ - node = node.Next() - } - require.Equal(t, len(tc.txsInCache), counter, - "cache smaller than expected on testcase %d", tcIndex) - mempool.Flush() - } -} diff --git a/mempool/errors.go b/mempool/errors.go index e33e14ca3..e3a9a2217 100644 --- a/mempool/errors.go +++ b/mempool/errors.go @@ -10,33 +10,37 @@ var ( ErrTxInCache = errors.New("tx already exists in cache") ) -// ErrTxTooLarge means the tx is too big to be sent in a message to other peers +// ErrTxTooLarge defines an error when a transaction is too big to be sent in a +// message to other peers. type ErrTxTooLarge struct { - max int - actual int + Max int + Actual int } func (e ErrTxTooLarge) Error() string { - return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.max, e.actual) + return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual) } -// ErrMempoolIsFull means Tendermint & an application can't handle that much load +// ErrMempoolIsFull defines an error where Tendermint and the application cannot +// handle that much load. type ErrMempoolIsFull struct { - numTxs int - maxTxs int - - txsBytes int64 - maxTxsBytes int64 + NumTxs int + MaxTxs int + TxsBytes int64 + MaxTxsBytes int64 } func (e ErrMempoolIsFull) Error() string { return fmt.Sprintf( "mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)", - e.numTxs, e.maxTxs, - e.txsBytes, e.maxTxsBytes) + e.NumTxs, + e.MaxTxs, + e.TxsBytes, + e.MaxTxsBytes, + ) } -// ErrPreCheck is returned when tx is too big +// ErrPreCheck defines an error where a transaction fails a pre-check. type ErrPreCheck struct { Reason error } @@ -47,6 +51,5 @@ func (e ErrPreCheck) Error() string { // IsPreCheckError returns true if err is due to pre check failure. func IsPreCheckError(err error) bool { - _, ok := err.(ErrPreCheck) - return ok + return errors.As(err, &ErrPreCheck{}) } diff --git a/mempool/ids.go b/mempool/ids.go index 286921c44..82b4a67bd 100644 --- a/mempool/ids.go +++ b/mempool/ids.go @@ -7,15 +7,17 @@ import ( "github.com/tendermint/tendermint/p2p" ) -type mempoolIDs struct { +// nolint: golint +// TODO: Rename type. +type MempoolIDs struct { mtx tmsync.RWMutex peerMap map[p2p.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used } -func newMempoolIDs() *mempoolIDs { - return &mempoolIDs{ +func NewMempoolIDs() *MempoolIDs { + return &MempoolIDs{ peerMap: make(map[p2p.NodeID]uint16), // reserve UnknownPeerID for mempoolReactor.BroadcastTx @@ -26,7 +28,7 @@ func newMempoolIDs() *mempoolIDs { // ReserveForPeer searches for the next unused ID and assigns it to the provided // peer. -func (ids *mempoolIDs) ReserveForPeer(peerID p2p.NodeID) { +func (ids *MempoolIDs) ReserveForPeer(peerID p2p.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -36,7 +38,7 @@ func (ids *mempoolIDs) ReserveForPeer(peerID p2p.NodeID) { } // Reclaim returns the ID reserved for the peer back to unused pool. -func (ids *mempoolIDs) Reclaim(peerID p2p.NodeID) { +func (ids *MempoolIDs) Reclaim(peerID p2p.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -48,7 +50,7 @@ func (ids *mempoolIDs) Reclaim(peerID p2p.NodeID) { } // GetForPeer returns an ID reserved for the peer. -func (ids *mempoolIDs) GetForPeer(peerID p2p.NodeID) uint16 { +func (ids *MempoolIDs) GetForPeer(peerID p2p.NodeID) uint16 { ids.mtx.RLock() defer ids.mtx.RUnlock() @@ -57,9 +59,9 @@ func (ids *mempoolIDs) GetForPeer(peerID p2p.NodeID) uint16 { // nextPeerID returns the next unused peer ID to use. We assume that the mutex // is already held. -func (ids *mempoolIDs) nextPeerID() uint16 { - if len(ids.activeIDs) == maxActiveIDs { - panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs)) +func (ids *MempoolIDs) nextPeerID() uint16 { + if len(ids.activeIDs) == MaxActiveIDs { + panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", MaxActiveIDs)) } _, idExists := ids.activeIDs[ids.nextID] diff --git a/mempool/ids_test.go b/mempool/ids_test.go new file mode 100644 index 000000000..b758f91bf --- /dev/null +++ b/mempool/ids_test.go @@ -0,0 +1,23 @@ +package mempool + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/p2p" +) + +func TestMempoolIDsBasic(t *testing.T) { + ids := NewMempoolIDs() + + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + ids.ReserveForPeer(peerID) + require.EqualValues(t, 1, ids.GetForPeer(peerID)) + ids.Reclaim(peerID) + + ids.ReserveForPeer(peerID) + require.EqualValues(t, 2, ids.GetForPeer(peerID)) + ids.Reclaim(peerID) +} diff --git a/mempool/mempool.go b/mempool/mempool.go index 74e328bae..ba9b7a138 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -1,18 +1,31 @@ package mempool import ( - "context" "fmt" + "math" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) +const ( + MempoolChannel = p2p.ChannelID(0x30) + + // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + PeerCatchupSleepIntervalMS = 100 + + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) + UnknownPeerID uint16 = 0 + + MaxActiveIDs = math.MaxUint16 +) + // Mempool defines the mempool interface. // // Updates to the mempool need to be synchronized with committing a block so -// apps can reset their transient state on Commit. +// applications can reset their transient state on Commit. type Mempool interface { // CheckTx executes a new transaction against the application to determine // its validity and whether it should be added to the mempool. @@ -21,24 +34,29 @@ type Mempool interface { // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes // bytes total with the condition that the total gasWanted must be less than // maxGas. + // // If both maxes are negative, there is no cap on the size of all returned // transactions (~ all available transactions). ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - // ReapMaxTxs reaps up to max transactions from the mempool. - // If max is negative, there is no cap on the size of all returned - // transactions (~ all available transactions). + // ReapMaxTxs reaps up to max transactions from the mempool. If max is + // negative, there is no cap on the size of all returned transactions + // (~ all available transactions). ReapMaxTxs(max int) types.Txs - // Lock locks the mempool. The consensus must be able to hold lock to safely update. + // Lock locks the mempool. The consensus must be able to hold lock to safely + // update. Lock() // Unlock unlocks the mempool. Unlock() - // Update informs the mempool that the given txs were committed and can be discarded. - // NOTE: this should be called *after* block is committed by consensus. - // NOTE: Lock/Unlock must be managed by caller + // Update informs the mempool that the given txs were committed and can be + // discarded. + // + // NOTE: + // 1. This should be called *after* block is committed by consensus. + // 2. Lock/Unlock must be managed by the caller. Update( blockHeight int64, blockTxs types.Txs, @@ -47,17 +65,21 @@ type Mempool interface { newPostFn PostCheckFunc, ) error - // FlushAppConn flushes the mempool connection to ensure async reqResCb calls are - // done. E.g. from CheckTx. - // NOTE: Lock/Unlock must be managed by caller + // FlushAppConn flushes the mempool connection to ensure async callback calls + // are done, e.g. from CheckTx. + // + // NOTE: + // 1. Lock/Unlock must be managed by caller. FlushAppConn() error - // Flush removes all transactions from the mempool and cache + // Flush removes all transactions from the mempool and caches. Flush() - // TxsAvailable returns a channel which fires once for every height, - // and only when transactions are available in the mempool. - // NOTE: the returned channel may be nil if EnableTxsAvailable was not called. + // TxsAvailable returns a channel which fires once for every height, and only + // when transactions are available in the mempool. + // + // NOTE: + // 1. The returned channel may be nil if EnableTxsAvailable was not called. TxsAvailable() <-chan struct{} // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will @@ -67,12 +89,10 @@ type Mempool interface { // Size returns the number of transactions in the mempool. Size() int - // TxsBytes returns the total size of all txs in the mempool. - TxsBytes() int64 + // SizeBytes returns the total size of all txs in the mempool. + SizeBytes() int64 } -//-------------------------------------------------------------------------------- - // PreCheckFunc is an optional filter executed before CheckTx and rejects // transaction if false is returned. An example would be to ensure that a // transaction doesn't exceeded the block size. @@ -83,29 +103,16 @@ type PreCheckFunc func(types.Tx) error // transaction doesn't require more gas than available for the block. type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error -// TxInfo are parameters that get passed when attempting to add a tx to the -// mempool. -type TxInfo struct { - // SenderID is the internal peer ID used in the mempool to identify the - // sender, storing 2 bytes with each tx instead of 20 bytes for the p2p.ID. - SenderID uint16 - // SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging. - SenderP2PID p2p.NodeID - // Context is the optional context to cancel CheckTx - Context context.Context -} - -//-------------------------------------------------------------------------------- - -// PreCheckMaxBytes checks that the size of the transaction is smaller or equal to the expected maxBytes. +// PreCheckMaxBytes checks that the size of the transaction is smaller or equal +// to the expected maxBytes. func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { return func(tx types.Tx) error { txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) if txSize > maxBytes { - return fmt.Errorf("tx size is too big: %d, max: %d", - txSize, maxBytes) + return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) } + return nil } } @@ -125,6 +132,7 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { return fmt.Errorf("gas wanted %d is greater than max gas %d", res.GasWanted, maxGas) } + return nil } } diff --git a/mempool/metrics.go b/mempool/metrics.go index 5e4eaf5ed..5d3022e80 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -18,10 +18,25 @@ const ( type Metrics struct { // Size of the mempool. Size metrics.Gauge + // Histogram of transaction sizes, in bytes. TxSizeBytes metrics.Histogram + // Number of failed transactions. FailedTxs metrics.Counter + + // RejectedTxs defines the number of rejected transactions. These are + // transactions that passed CheckTx but failed to make it into the mempool + // due to resource limits, e.g. mempool is full and no lower priority + // transactions exist in the mempool. + RejectedTxs metrics.Counter + + // EvictedTxs defines the number of evicted transactions. These are valid + // transactions that passed CheckTx and existed in the mempool but were later + // evicted to make room for higher priority valid transactions that passed + // CheckTx. + EvictedTxs metrics.Counter + // Number of times transactions are rechecked in the mempool. RecheckTimes metrics.Counter } @@ -41,6 +56,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "size", Help: "Size of the mempool (number of uncommitted transactions).", }, labels).With(labelsAndValues...), + TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -48,12 +64,28 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Transaction sizes in bytes.", Buckets: stdprometheus.ExponentialBuckets(1, 3, 17), }, labels).With(labelsAndValues...), + FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "failed_txs", Help: "Number of failed transactions.", }, labels).With(labelsAndValues...), + + RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rejected_txs", + Help: "Number of rejected transactions.", + }, labels).With(labelsAndValues...), + + EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "evicted_txs", + Help: "Number of evicted transactions.", + }, labels).With(labelsAndValues...), + RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -69,6 +101,8 @@ func NopMetrics() *Metrics { Size: discard.NewGauge(), TxSizeBytes: discard.NewHistogram(), FailedTxs: discard.NewCounter(), + RejectedTxs: discard.NewCounter(), + EvictedTxs: discard.NewCounter(), RecheckTimes: discard.NewCounter(), } } diff --git a/mempool/mock/mempool.go b/mempool/mock/mempool.go index 2b780b997..723ce791a 100644 --- a/mempool/mock/mempool.go +++ b/mempool/mock/mempool.go @@ -33,7 +33,7 @@ func (Mempool) Flush() {} func (Mempool) FlushAppConn() error { return nil } func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } func (Mempool) EnableTxsAvailable() {} -func (Mempool) TxsBytes() int64 { return 0 } +func (Mempool) SizeBytes() int64 { return 0 } func (Mempool) TxsFront() *clist.CElement { return nil } func (Mempool) TxsWaitChan() <-chan struct{} { return nil } diff --git a/mempool/tx.go b/mempool/tx.go new file mode 100644 index 000000000..8bdc82294 --- /dev/null +++ b/mempool/tx.go @@ -0,0 +1,37 @@ +package mempool + +import ( + "context" + "crypto/sha256" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +// TxKeySize defines the size of the transaction's key used for indexing. +const TxKeySize = sha256.Size + +// TxKey is the fixed length array key used as an index. +func TxKey(tx types.Tx) [TxKeySize]byte { + return sha256.Sum256(tx) +} + +// TxHashFromBytes returns the hash of a transaction from raw bytes. +func TxHashFromBytes(tx []byte) []byte { + return types.Tx(tx).Hash() +} + +// TxInfo are parameters that get passed when attempting to add a tx to the +// mempool. +type TxInfo struct { + // SenderID is the internal peer ID used in the mempool to identify the + // sender, storing two bytes with each transaction instead of 20 bytes for + // the p2p.NodeID. + SenderID uint16 + + // SenderNodeID is the actual p2p.NodeID of the sender. + SenderNodeID p2p.NodeID + + // Context is the optional context to cancel CheckTx + Context context.Context +} diff --git a/mempool/bench_test.go b/mempool/v0/bench_test.go similarity index 51% rename from mempool/bench_test.go rename to mempool/v0/bench_test.go index a6c07f98d..43fc44c32 100644 --- a/mempool/bench_test.go +++ b/mempool/v0/bench_test.go @@ -1,4 +1,4 @@ -package mempool +package v0 import ( "encoding/binary" @@ -6,42 +6,48 @@ import ( "testing" "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/proxy" ) func BenchmarkReap(b *testing.B) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mempool.config.Size = 100000 + mp.config.Size = 100000 size := 10000 for i := 0; i < size; i++ { tx := make([]byte, 8) binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil { + if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil { b.Fatal(err) } } b.ResetTimer() for i := 0; i < b.N; i++ { - mempool.ReapMaxBytesMaxGas(100000000, 10000000) + mp.ReapMaxBytesMaxGas(100000000, 10000000) } } func BenchmarkCheckTx(b *testing.B) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mempool.config.Size = 1000000 + mp.config.Size = 1000000 + + b.ResetTimer() for i := 0; i < b.N; i++ { + b.StopTimer() tx := make([]byte, 8) binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil { + b.StartTimer() + + if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil { b.Fatal(err) } } @@ -50,10 +56,10 @@ func BenchmarkCheckTx(b *testing.B) { func BenchmarkParallelCheckTx(b *testing.B) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mempool.config.Size = 100000000 + mp.config.Size = 100000000 var txcnt uint64 next := func() uint64 { @@ -65,7 +71,7 @@ func BenchmarkParallelCheckTx(b *testing.B) { for pb.Next() { tx := make([]byte, 8) binary.BigEndian.PutUint64(tx, next()) - if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil { + if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil { b.Fatal(err) } } @@ -75,49 +81,20 @@ func BenchmarkParallelCheckTx(b *testing.B) { func BenchmarkCheckDuplicateTx(b *testing.B) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mempool.config.Size = 1000000 + mp.config.Size = 1000000 for i := 0; i < b.N; i++ { tx := make([]byte, 8) binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil { + if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil { b.Fatal(err) } - if err := mempool.CheckTx(tx, nil, TxInfo{}); err == nil { + if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err == nil { b.Fatal("tx should be duplicate") } } } - -func BenchmarkCacheInsertTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Push(txs[i]) - } -} - -// This benchmark is probably skewed, since we actually will be removing -// txs in parallel, which may cause some overhead due to mutex locking. -func BenchmarkCacheRemoveTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - cache.Push(txs[i]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Remove(txs[i]) - } -} diff --git a/mempool/v0/cache_test.go b/mempool/v0/cache_test.go new file mode 100644 index 000000000..fab6a6011 --- /dev/null +++ b/mempool/v0/cache_test.go @@ -0,0 +1,81 @@ +package v0 + +import ( + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +func TestCacheAfterUpdate(t *testing.T) { + app := kvstore.NewApplication() + cc := proxy.NewLocalClientCreator(app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + // reAddIndices & txsInCache can have elements > numTxsToCreate + // also assumes max index is 255 for convenience + // txs in cache also checks order of elements + tests := []struct { + numTxsToCreate int + updateIndices []int + reAddIndices []int + txsInCache []int + }{ + {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works + {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache + {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache + {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe + } + for tcIndex, tc := range tests { + for i := 0; i < tc.numTxsToCreate; i++ { + tx := types.Tx{byte(i)} + err := mp.CheckTx(tx, nil, mempool.TxInfo{}) + require.NoError(t, err) + } + + updateTxs := []types.Tx{} + for _, v := range tc.updateIndices { + tx := types.Tx{byte(v)} + updateTxs = append(updateTxs, tx) + } + err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) + require.NoError(t, err) + + for _, v := range tc.reAddIndices { + tx := types.Tx{byte(v)} + _ = mp.CheckTx(tx, nil, mempool.TxInfo{}) + } + + cache := mp.cache.(*mempool.LRUTxCache) + node := cache.GetList().Front() + counter := 0 + for node != nil { + require.NotEqual(t, len(tc.txsInCache), counter, + "cache larger than expected on testcase %d", tcIndex) + + nodeVal := node.Value.([sha256.Size]byte) + expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) + // Reference for reading the errors: + // >>> sha256('\x00').hexdigest() + // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' + // >>> sha256('\x01').hexdigest() + // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' + // >>> sha256('\x02').hexdigest() + // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' + + require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) + counter++ + node = node.Next() + } + require.Equal(t, len(tc.txsInCache), counter, + "cache smaller than expected on testcase %d", tcIndex) + mp.Flush() + } +} diff --git a/mempool/clist_mempool.go b/mempool/v0/clist_mempool.go similarity index 81% rename from mempool/clist_mempool.go rename to mempool/v0/clist_mempool.go index e59ce72ca..8b415310c 100644 --- a/mempool/clist_mempool.go +++ b/mempool/v0/clist_mempool.go @@ -1,10 +1,8 @@ -package mempool +package v0 import ( "bytes" - "container/list" "context" - "crypto/sha256" "fmt" "sync" "sync/atomic" @@ -15,16 +13,12 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) -// TxKeySize is the size of the transaction key index -const TxKeySize = sha256.Size - -//-------------------------------------------------------------------------------- - // CListMempool is an ordered in-memory pool for transactions before they are // proposed in a consensus round. Transaction validity is checked using the // CheckTx abci message before the transaction is added to the pool. The @@ -44,8 +38,8 @@ type CListMempool struct { // Exclusive mutex for Update method to prevent concurrent execution of // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. updateMtx tmsync.RWMutex - preCheck PreCheckFunc - postCheck PostCheckFunc + preCheck mempool.PreCheckFunc + postCheck mempool.PostCheckFunc txs *clist.CList // concurrent linked-list of good txs proxyAppConn proxy.AppConnMempool @@ -62,26 +56,27 @@ type CListMempool struct { // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. - cache txCache + cache mempool.TxCache - logger log.Logger - - metrics *Metrics + logger log.Logger + metrics *mempool.Metrics } -var _ Mempool = &CListMempool{} +var _ mempool.Mempool = &CListMempool{} // CListMempoolOption sets an optional parameter on the mempool. type CListMempoolOption func(*CListMempool) -// NewCListMempool returns a new mempool with the given configuration and connection to an application. +// NewCListMempool returns a new mempool with the given configuration and +// connection to an application. func NewCListMempool( config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64, options ...CListMempoolOption, ) *CListMempool { - mempool := &CListMempool{ + + mp := &CListMempool{ config: config, proxyAppConn: proxyAppConn, txs: clist.New(), @@ -89,18 +84,22 @@ func NewCListMempool( recheckCursor: nil, recheckEnd: nil, logger: log.NewNopLogger(), - metrics: NopMetrics(), + metrics: mempool.NopMetrics(), } + if config.CacheSize > 0 { - mempool.cache = newMapTxCache(config.CacheSize) + mp.cache = mempool.NewLRUTxCache(config.CacheSize) } else { - mempool.cache = nopTxCache{} + mp.cache = mempool.NopTxCache{} } - proxyAppConn.SetResponseCallback(mempool.globalCb) + + proxyAppConn.SetResponseCallback(mp.globalCb) + for _, option := range options { - option(mempool) + option(mp) } - return mempool + + return mp } // NOTE: not thread safe - should only be called once, on startup @@ -116,19 +115,19 @@ func (mem *CListMempool) SetLogger(l log.Logger) { // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns // false. This is ran before CheckTx. Only applies to the first created block. // After that, Update overwrites the existing value. -func WithPreCheck(f PreCheckFunc) CListMempoolOption { +func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption { return func(mem *CListMempool) { mem.preCheck = f } } // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns // false. This is ran after CheckTx. Only applies to the first created block. // After that, Update overwrites the existing value. -func WithPostCheck(f PostCheckFunc) CListMempoolOption { +func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption { return func(mem *CListMempool) { mem.postCheck = f } } // WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) CListMempoolOption { +func WithMetrics(metrics *mempool.Metrics) CListMempoolOption { return func(mem *CListMempool) { mem.metrics = metrics } } @@ -148,7 +147,7 @@ func (mem *CListMempool) Size() int { } // Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsBytes() int64 { +func (mem *CListMempool) SizeBytes() int64 { return atomic.LoadInt64(&mem.txsBytes) } @@ -200,7 +199,7 @@ func (mem *CListMempool) TxsWaitChan() <-chan struct{} { // CONTRACT: Either cb will get called, or err returned. // // Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) error { +func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { mem.updateMtx.RLock() // use defer to unlock mutex because application (*local client*) might panic defer mem.updateMtx.RUnlock() @@ -212,12 +211,17 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx } if txSize > mem.config.MaxTxBytes { - return ErrTxTooLarge{mem.config.MaxTxBytes, txSize} + return mempool.ErrTxTooLarge{ + Max: mem.config.MaxTxBytes, + Actual: txSize, + } } if mem.preCheck != nil { if err := mem.preCheck(tx); err != nil { - return ErrPreCheck{err} + return mempool.ErrPreCheck{ + Reason: err, + } } } @@ -226,19 +230,19 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx return err } - if !mem.cache.Push(tx) { + if !mem.cache.Push(tx) { // if the transaction already exists in the cache // Record a new sender for a tx we've already seen. // Note it's possible a tx is still in the cache but no longer in the mempool // (eg. after committing a block, txs are removed from mempool but not cache), // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(TxKey(tx)); ok { + if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, // but they can spam the same tx with little cost to them atm. if loaded { - return ErrTxInCache + return mempool.ErrTxInCache } } @@ -256,7 +260,7 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx mem.cache.Remove(tx) return err } - reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb)) + reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderNodeID, cb)) return nil } @@ -319,7 +323,7 @@ func (mem *CListMempool) reqResCb( // - resCbFirstTime (lock not held) if tx is valid func (mem *CListMempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) - mem.txsMap.Store(TxKey(memTx.tx), e) + mem.txsMap.Store(mempool.TxKey(memTx.tx), e) atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } @@ -330,7 +334,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) { func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() - mem.txsMap.Delete(TxKey(tx)) + mem.txsMap.Delete(mempool.TxKey(tx)) atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) if removeFromCache { @@ -339,7 +343,7 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC } // RemoveTxByKey removes a transaction from the mempool by its TxKey index. -func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bool) { +func (mem *CListMempool) RemoveTxByKey(txKey [mempool.TxKeySize]byte, removeFromCache bool) { if e, ok := mem.txsMap.Load(txKey); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) if memTx != nil { @@ -351,13 +355,15 @@ func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bo func (mem *CListMempool) isFull(txSize int) error { var ( memSize = mem.Size() - txsBytes = mem.TxsBytes() + txsBytes = mem.SizeBytes() ) if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { - return ErrMempoolIsFull{ - memSize, mem.config.Size, - txsBytes, mem.config.MaxTxsBytes, + return mempool.ErrMempoolIsFull{ + NumTxs: memSize, + MaxTxs: mem.config.Size, + TxsBytes: txsBytes, + MaxTxsBytes: mem.config.MaxTxsBytes, } } @@ -397,8 +403,9 @@ func (mem *CListMempool) resCbFirstTime( } memTx.senders.Store(peerID, true) mem.addTx(memTx) - mem.logger.Debug("added good transaction", - "tx", txID(tx), + mem.logger.Debug( + "added good transaction", + "tx", mempool.TxHashFromBytes(tx), "res", r, "height", memTx.height, "total", mem.Size(), @@ -406,14 +413,21 @@ func (mem *CListMempool) resCbFirstTime( mem.notifyTxsAvailable() } else { // ignore bad transaction - mem.logger.Debug("rejected bad transaction", - "tx", txID(tx), "peerID", peerP2PID, "res", r, "err", postCheckErr) + mem.logger.Debug( + "rejected bad transaction", + "tx", mempool.TxHashFromBytes(tx), + "peerID", peerP2PID, + "res", r, + "err", postCheckErr, + ) mem.metrics.FailedTxs.Add(1) + if !mem.config.KeepInvalidTxsInCache { // remove from cache (it might be good later) mem.cache.Remove(tx) } } + default: // ignore other messages } @@ -442,7 +456,7 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { // Good, nothing to do. } else { // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr) + mem.logger.Debug("tx is no longer valid", "tx", mempool.TxHashFromBytes(tx), "res", r, "err", postCheckErr) // NOTE: we remove tx from the cache because it might be good later mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) } @@ -547,8 +561,8 @@ func (mem *CListMempool) Update( height int64, txs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, - preCheck PreCheckFunc, - postCheck PostCheckFunc, + preCheck mempool.PreCheckFunc, + postCheck mempool.PostCheckFunc, ) error { // Set height mem.height = height @@ -580,7 +594,7 @@ func (mem *CListMempool) Update( // Mempool after: // 100 // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(TxKey(tx)); ok { + if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { mem.removeTx(tx, e.(*clist.CElement), false) } } @@ -652,98 +666,3 @@ type mempoolTx struct { func (memTx *mempoolTx) Height() int64 { return atomic.LoadInt64(&memTx.height) } - -//-------------------------------------------------------------------------------- - -type txCache interface { - Reset() - Push(tx types.Tx) bool - Remove(tx types.Tx) -} - -// mapTxCache maintains a LRU cache of transactions. This only stores the hash -// of the tx, due to memory concerns. -type mapTxCache struct { - mtx tmsync.Mutex - size int - cacheMap map[[TxKeySize]byte]*list.Element - list *list.List -} - -var _ txCache = (*mapTxCache)(nil) - -// newMapTxCache returns a new mapTxCache. -func newMapTxCache(cacheSize int) *mapTxCache { - return &mapTxCache{ - size: cacheSize, - cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), - list: list.New(), - } -} - -// Reset resets the cache to an empty state. -func (cache *mapTxCache) Reset() { - cache.mtx.Lock() - cache.cacheMap = make(map[[TxKeySize]byte]*list.Element, cache.size) - cache.list.Init() - cache.mtx.Unlock() -} - -// Push adds the given tx to the cache and returns true. It returns -// false if tx is already in the cache. -func (cache *mapTxCache) Push(tx types.Tx) bool { - cache.mtx.Lock() - defer cache.mtx.Unlock() - - // Use the tx hash in the cache - txHash := TxKey(tx) - if moved, exists := cache.cacheMap[txHash]; exists { - cache.list.MoveToBack(moved) - return false - } - - if cache.list.Len() >= cache.size { - popped := cache.list.Front() - if popped != nil { - poppedTxHash := popped.Value.([TxKeySize]byte) - delete(cache.cacheMap, poppedTxHash) - cache.list.Remove(popped) - } - } - e := cache.list.PushBack(txHash) - cache.cacheMap[txHash] = e - return true -} - -// Remove removes the given tx from the cache. -func (cache *mapTxCache) Remove(tx types.Tx) { - cache.mtx.Lock() - txHash := TxKey(tx) - popped := cache.cacheMap[txHash] - delete(cache.cacheMap, txHash) - if popped != nil { - cache.list.Remove(popped) - } - - cache.mtx.Unlock() -} - -type nopTxCache struct{} - -var _ txCache = (*nopTxCache)(nil) - -func (nopTxCache) Reset() {} -func (nopTxCache) Push(types.Tx) bool { return true } -func (nopTxCache) Remove(types.Tx) {} - -//-------------------------------------------------------------------------------- - -// TxKey is the fixed length array hash used as the key in maps. -func TxKey(tx types.Tx) [TxKeySize]byte { - return sha256.Sum256(tx) -} - -// txID is a hash of the Tx. -func txID(tx []byte) []byte { - return types.Tx(tx).Hash() -} diff --git a/mempool/clist_mempool_test.go b/mempool/v0/clist_mempool_test.go similarity index 71% rename from mempool/clist_mempool_test.go rename to mempool/v0/clist_mempool_test.go index 57357b954..0fd39103a 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/v0/clist_mempool_test.go @@ -1,4 +1,4 @@ -package mempool +package v0 import ( "context" @@ -23,6 +23,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -42,9 +43,11 @@ func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CL if err != nil { panic(err) } - mempool := NewCListMempool(config.Mempool, appConnMem, 0) - mempool.SetLogger(log.TestingLogger()) - return mempool, func() { os.RemoveAll(config.RootDir) } + + mp := NewCListMempool(config.Mempool, appConnMem, 0) + mp.SetLogger(log.TestingLogger()) + + return mp, func() { os.RemoveAll(config.RootDir) } } func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { @@ -65,9 +68,9 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { } } -func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs { +func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { txs := make(types.Txs, count) - txInfo := TxInfo{SenderID: peerID} + txInfo := mempool.TxInfo{SenderID: peerID} for i := 0; i < count; i++ { txBytes := make([]byte, 20) txs[i] = txBytes @@ -75,11 +78,11 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs if err != nil { t.Error(err) } - if err := mempool.CheckTx(txBytes, nil, txInfo); err != nil { + if err := mp.CheckTx(txBytes, nil, txInfo); err != nil { // Skip invalid txs. // TestMempoolFilters will fail otherwise. It asserts a number of txs // returned. - if IsPreCheckError(err) { + if mempool.IsPreCheckError(err) { continue } t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) @@ -91,18 +94,18 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs func TestReapMaxBytesMaxGas(t *testing.T) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() // Ensure gas calculation behaves as expected - checkTxs(t, mempool, 1, UnknownPeerID) - tx0 := mempool.TxsFront().Value.(*mempoolTx) + checkTxs(t, mp, 1, mempool.UnknownPeerID) + tx0 := mp.TxsFront().Value.(*mempoolTx) // assert that kv store has gas wanted = 1. require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") // ensure each tx is 20 bytes long require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") - mempool.Flush() + mp.Flush() // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. // each tx has 20 bytes @@ -129,18 +132,18 @@ func TestReapMaxBytesMaxGas(t *testing.T) { {20, 20000, 30, 20}, } for tcIndex, tt := range tests { - checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) - got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) + checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) + got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", len(got), tt.expectedNumTxs, tcIndex) - mempool.Flush() + mp.Flush() } } func TestMempoolFilters(t *testing.T) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() emptyTxArr := []types.Tx{[]byte{}} @@ -151,63 +154,63 @@ func TestMempoolFilters(t *testing.T) { // each tx has 20 bytes tests := []struct { numTxsToCreate int - preFilter PreCheckFunc - postFilter PostCheckFunc + preFilter mempool.PreCheckFunc + postFilter mempool.PostCheckFunc expectedNumTxs int }{ {10, nopPreFilter, nopPostFilter, 10}, - {10, PreCheckMaxBytes(10), nopPostFilter, 0}, - {10, PreCheckMaxBytes(22), nopPostFilter, 10}, - {10, nopPreFilter, PostCheckMaxGas(-1), 10}, - {10, nopPreFilter, PostCheckMaxGas(0), 0}, - {10, nopPreFilter, PostCheckMaxGas(1), 10}, - {10, nopPreFilter, PostCheckMaxGas(3000), 10}, - {10, PreCheckMaxBytes(10), PostCheckMaxGas(20), 0}, - {10, PreCheckMaxBytes(30), PostCheckMaxGas(20), 10}, - {10, PreCheckMaxBytes(22), PostCheckMaxGas(1), 10}, - {10, PreCheckMaxBytes(22), PostCheckMaxGas(0), 0}, + {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, + {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, + {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, + {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, + {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, + {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, + {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, + {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, + {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, + {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, } for tcIndex, tt := range tests { - err := mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) + err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) require.NoError(t, err) - checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) - require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) - mempool.Flush() + checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) + require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) + mp.Flush() } } func TestMempoolUpdate(t *testing.T) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() // 1. Adds valid txs to the cache { - err := mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) require.NoError(t, err) - err = mempool.CheckTx([]byte{0x01}, nil, TxInfo{}) + err = mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{}) require.NoError(t, err) } // 2. Removes valid txs from the mempool { - err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{}) + err := mp.CheckTx([]byte{0x02}, nil, mempool.TxInfo{}) require.NoError(t, err) - err = mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) require.NoError(t, err) - assert.Zero(t, mempool.Size()) + assert.Zero(t, mp.Size()) } // 3. Removes invalid transactions from the cache and the mempool (if present) { - err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{}) + err := mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{}) require.NoError(t, err) - err = mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) + err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) require.NoError(t, err) - assert.Zero(t, mempool.Size()) + assert.Zero(t, mp.Size()) - err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{}) + err = mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{}) require.NoError(t, err) } } @@ -217,7 +220,7 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) { cc := proxy.NewLocalClientCreator(app) wcfg := cfg.DefaultConfig() wcfg.Mempool.KeepInvalidTxsInCache = true - mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg) + mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) defer cleanup() // 1. An invalid transaction must remain in the cache after Update @@ -228,22 +231,22 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) { b := make([]byte, 8) binary.BigEndian.PutUint64(b, 1) - err := mempool.CheckTx(b, nil, TxInfo{}) + err := mp.CheckTx(b, nil, mempool.TxInfo{}) require.NoError(t, err) // simulate new block _ = app.DeliverTx(abci.RequestDeliverTx{Tx: a}) _ = app.DeliverTx(abci.RequestDeliverTx{Tx: b}) - err = mempool.Update(1, []types.Tx{a, b}, + err = mp.Update(1, []types.Tx{a, b}, []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) require.NoError(t, err) // a must be added to the cache - err = mempool.CheckTx(a, nil, TxInfo{}) + err = mp.CheckTx(a, nil, mempool.TxInfo{}) require.NoError(t, err) // b must remain in the cache - err = mempool.CheckTx(b, nil, TxInfo{}) + err = mp.CheckTx(b, nil, mempool.TxInfo{}) require.NoError(t, err) } @@ -253,9 +256,9 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) { binary.BigEndian.PutUint64(a, 0) // remove a from the cache to test (2) - mempool.cache.Remove(a) + mp.cache.Remove(a) - err := mempool.CheckTx(a, nil, TxInfo{}) + err := mp.CheckTx(a, nil, mempool.TxInfo{}) require.NoError(t, err) } } @@ -263,52 +266,52 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) { func TestTxsAvailable(t *testing.T) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mempool.EnableTxsAvailable() + mp.EnableTxsAvailable() timeoutMS := 500 // with no txs, it shouldnt fire - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // send a bunch of txs, it should only fire once - txs := checkTxs(t, mempool, 100, UnknownPeerID) - ensureFire(t, mempool.TxsAvailable(), timeoutMS) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) + ensureFire(t, mp.TxsAvailable(), timeoutMS) + ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // call update with half the txs. // it should fire once now for the new height // since there are still txs left committedTxs, txs := txs[:50], txs[50:] - if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { + if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } - ensureFire(t, mempool.TxsAvailable(), timeoutMS) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + ensureFire(t, mp.TxsAvailable(), timeoutMS) + ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mempool, 50, UnknownPeerID) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) + ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // now call update with all the txs. it should not fire as there are no txs left committedTxs = append(txs, moreTxs...) //nolint: gocritic - if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { + if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // send a bunch more txs, it should only fire once - checkTxs(t, mempool, 100, UnknownPeerID) - ensureFire(t, mempool.TxsAvailable(), timeoutMS) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) + checkTxs(t, mp, 100, mempool.UnknownPeerID) + ensureFire(t, mp.TxsAvailable(), timeoutMS) + ensureNoFire(t, mp.TxsAvailable(), timeoutMS) } func TestSerialReap(t *testing.T) { app := counter.NewApplication(true) cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() appConnCon, _ := cc.NewABCIClient() @@ -324,7 +327,7 @@ func TestSerialReap(t *testing.T) { // This will succeed txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := mempool.CheckTx(txBytes, nil, TxInfo{}) + err := mp.CheckTx(txBytes, nil, mempool.TxInfo{}) _, cached := cacheMap[string(txBytes)] if cached { require.NotNil(t, err, "expected error for cached tx") @@ -334,13 +337,13 @@ func TestSerialReap(t *testing.T) { cacheMap[string(txBytes)] = struct{}{} // Duplicates are cached and should return error - err = mempool.CheckTx(txBytes, nil, TxInfo{}) + err = mp.CheckTx(txBytes, nil, mempool.TxInfo{}) require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") } } reapCheck := func(exp int) { - txs := mempool.ReapMaxBytesMaxGas(-1, -1) + txs := mp.ReapMaxBytesMaxGas(-1, -1) require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) } @@ -351,7 +354,7 @@ func TestSerialReap(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(i)) txs = append(txs, txBytes) } - if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { + if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } } @@ -443,7 +446,7 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { tx := tmrand.Bytes(testCase.len) - err := mempl.CheckTx(tx, nil, TxInfo{}) + err := mempl.CheckTx(tx, nil, mempool.TxInfo{}) bv := gogotypes.BytesValue{Value: tx} bz, err2 := bv.Marshal() require.NoError(t, err2) @@ -452,7 +455,10 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { if !testCase.err { require.NoError(t, err, caseString) } else { - require.Equal(t, err, ErrTxTooLarge{maxTxSize, testCase.len}, caseString) + require.Equal(t, err, mempool.ErrTxTooLarge{ + Max: maxTxSize, + Actual: testCase.len, + }, caseString) } } } @@ -462,50 +468,50 @@ func TestMempoolTxsBytes(t *testing.T) { cc := proxy.NewLocalClientCreator(app) config := cfg.ResetTestRoot("mempool_test") config.Mempool.MaxTxsBytes = 10 - mempool, cleanup := newMempoolWithAppAndConfig(cc, config) + mp, cleanup := newMempoolWithAppAndConfig(cc, config) defer cleanup() // 1. zero by default - assert.EqualValues(t, 0, mempool.TxsBytes()) + assert.EqualValues(t, 0, mp.SizeBytes()) // 2. len(tx) after CheckTx - err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{}) + err := mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{}) require.NoError(t, err) - assert.EqualValues(t, 1, mempool.TxsBytes()) + assert.EqualValues(t, 1, mp.SizeBytes()) // 3. zero again after tx is removed by Update - err = mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) require.NoError(t, err) - assert.EqualValues(t, 0, mempool.TxsBytes()) + assert.EqualValues(t, 0, mp.SizeBytes()) // 4. zero after Flush - err = mempool.CheckTx([]byte{0x02, 0x03}, nil, TxInfo{}) + err = mp.CheckTx([]byte{0x02, 0x03}, nil, mempool.TxInfo{}) require.NoError(t, err) - assert.EqualValues(t, 2, mempool.TxsBytes()) + assert.EqualValues(t, 2, mp.SizeBytes()) - mempool.Flush() - assert.EqualValues(t, 0, mempool.TxsBytes()) + mp.Flush() + assert.EqualValues(t, 0, mp.SizeBytes()) // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. - err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil, TxInfo{}) + err = mp.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil, mempool.TxInfo{}) require.NoError(t, err) - err = mempool.CheckTx([]byte{0x05}, nil, TxInfo{}) + err = mp.CheckTx([]byte{0x05}, nil, mempool.TxInfo{}) if assert.Error(t, err) { - assert.IsType(t, ErrMempoolIsFull{}, err) + assert.IsType(t, mempool.ErrMempoolIsFull{}, err) } // 6. zero after tx is rechecked and removed due to not being valid anymore app2 := counter.NewApplication(true) cc = proxy.NewLocalClientCreator(app2) - mempool, cleanup = newMempoolWithApp(cc) + mp, cleanup = newMempoolWithApp(cc) defer cleanup() txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(0)) - err = mempool.CheckTx(txBytes, nil, TxInfo{}) + err = mp.CheckTx(txBytes, nil, mempool.TxInfo{}) require.NoError(t, err) - assert.EqualValues(t, 8, mempool.TxsBytes()) + assert.EqualValues(t, 8, mp.SizeBytes()) appConnCon, _ := cc.NewABCIClient() appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) @@ -525,18 +531,18 @@ func TestMempoolTxsBytes(t *testing.T) { require.NotEmpty(t, res2.Data) // Pretend like we committed nothing so txBytes gets rechecked and removed. - err = mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) + err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) require.NoError(t, err) - assert.EqualValues(t, 0, mempool.TxsBytes()) + assert.EqualValues(t, 0, mp.SizeBytes()) // 7. Test RemoveTxByKey function - err = mempool.CheckTx([]byte{0x06}, nil, TxInfo{}) + err = mp.CheckTx([]byte{0x06}, nil, mempool.TxInfo{}) require.NoError(t, err) - assert.EqualValues(t, 1, mempool.TxsBytes()) - mempool.RemoveTxByKey(TxKey([]byte{0x07}), true) - assert.EqualValues(t, 1, mempool.TxsBytes()) - mempool.RemoveTxByKey(TxKey([]byte{0x06}), true) - assert.EqualValues(t, 0, mempool.TxsBytes()) + assert.EqualValues(t, 1, mp.SizeBytes()) + mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true) + assert.EqualValues(t, 1, mp.SizeBytes()) + mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true) + assert.EqualValues(t, 0, mp.SizeBytes()) } @@ -554,7 +560,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { } }) config := cfg.ResetTestRoot("mempool_test") - mempool, cleanup := newMempoolWithAppAndConfig(cc, config) + mp, cleanup := newMempoolWithAppAndConfig(cc, config) defer cleanup() // generate small number of txs @@ -574,9 +580,9 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { tx := txs[txNum] // this will err with ErrTxInCache many times ... - mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error + mp.CheckTx(tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error } - err := mempool.FlushAppConn() + err := mp.FlushAppConn() require.NoError(t, err) } diff --git a/mempool/doc.go b/mempool/v0/doc.go similarity index 98% rename from mempool/doc.go rename to mempool/v0/doc.go index 7e6363e12..3b5d0d20d 100644 --- a/mempool/doc.go +++ b/mempool/v0/doc.go @@ -20,4 +20,4 @@ // broadcastTxRoutine(). // TODO: Better handle abci client errors. (make it automatically handle connection errors) -package mempool +package v0 diff --git a/mempool/reactor.go b/mempool/v0/reactor.go similarity index 93% rename from mempool/reactor.go rename to mempool/v0/reactor.go index 005bb390e..33d101475 100644 --- a/mempool/reactor.go +++ b/mempool/v0/reactor.go @@ -1,9 +1,8 @@ -package mempool +package v0 import ( "errors" "fmt" - "math" "sync" "time" @@ -12,6 +11,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/types" @@ -22,19 +22,6 @@ var ( _ p2p.Wrapper = (*protomem.Message)(nil) ) -const ( - MempoolChannel = p2p.ChannelID(0x30) - - // peerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind - peerCatchupSleepIntervalMS = 100 - - // UnknownPeerID is the peer ID to use when running CheckTx when there is - // no peer (e.g. RPC) - UnknownPeerID uint16 = 0 - - maxActiveIDs = math.MaxUint16 -) - // PeerManager defines the interface contract required for getting necessary // peer information. This should eventually be replaced with a message-oriented // approach utilizing the p2p stack. @@ -50,7 +37,7 @@ type Reactor struct { config *cfg.MempoolConfig mempool *CListMempool - ids *mempoolIDs + ids *mempool.MempoolIDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. @@ -74,7 +61,7 @@ func NewReactor( logger log.Logger, config *cfg.MempoolConfig, peerMgr PeerManager, - mempool *CListMempool, + mp *CListMempool, mempoolCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, ) *Reactor { @@ -82,8 +69,8 @@ func NewReactor( r := &Reactor{ config: config, peerMgr: peerMgr, - mempool: mempool, - ids: newMempoolIDs(), + mempool: mp, + ids: mempool.NewMempoolIDs(), mempoolCh: mempoolCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), @@ -110,10 +97,10 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe } return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - MempoolChannel: { + mempool.MempoolChannel: { MsgType: new(protomem.Message), Descriptor: &p2p.ChannelDescriptor{ - ID: byte(MempoolChannel), + ID: byte(mempool.MempoolChannel), Priority: 5, RecvMessageCapacity: batchMsg.Size(), @@ -175,14 +162,14 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { return errors.New("empty txs received from peer") } - txInfo := TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} + txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} if len(envelope.From) != 0 { - txInfo.SenderP2PID = envelope.From + txInfo.SenderNodeID = envelope.From } for _, tx := range protoTxs { if err := r.mempool.CheckTx(types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", txID(tx)), "err", err) + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) } } @@ -206,7 +193,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err r.Logger.Debug("received message", "peer", envelope.From) switch chID { - case MempoolChannel: + case mempool.MempoolChannel: err = r.handleMempoolMessage(envelope) default: @@ -361,7 +348,7 @@ func (r *Reactor) broadcastTxRoutine(peerID p2p.NodeID, closer *tmsync.Closer) { height := r.peerMgr.GetHeight(peerID) if height > 0 && height < memTx.Height()-1 { // allow for a lag of one block - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) + time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) continue } } @@ -378,7 +365,11 @@ func (r *Reactor) broadcastTxRoutine(peerID p2p.NodeID, closer *tmsync.Closer) { Txs: [][]byte{memTx.tx}, }, } - r.Logger.Debug("gossiped tx to peer", "tx", fmt.Sprintf("%X", txID(memTx.tx)), "peer", peerID) + r.Logger.Debug( + "gossiped tx to peer", + "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), + "peer", peerID, + ) } select { diff --git a/mempool/reactor_test.go b/mempool/v0/reactor_test.go similarity index 89% rename from mempool/reactor_test.go rename to mempool/v0/reactor_test.go index 71d572247..2f1cae3a2 100644 --- a/mempool/reactor_test.go +++ b/mempool/v0/reactor_test.go @@ -1,4 +1,4 @@ -package mempool +package v0 import ( "sync" @@ -12,6 +12,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/p2ptest" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" @@ -48,7 +49,7 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := p2p.ChannelDescriptor{ID: byte(MempoolChannel)} + chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) for nodeID := range rts.network.Nodes { @@ -163,7 +164,7 @@ func TestReactorBroadcastTxs(t *testing.T) { primary := rts.nodes[0] secondaries := rts.nodes[1:] - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) // run the router rts.start(t) @@ -200,7 +201,7 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update the whole mempool - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) go func() { defer wg.Done() @@ -219,7 +220,7 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update none - _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, UnknownPeerID) + _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID) go func() { defer wg.Done() @@ -263,21 +264,6 @@ func TestReactorNoBroadcastToSender(t *testing.T) { rts.assertMempoolChannelsDrained(t) } -func TestMempoolIDsBasic(t *testing.T) { - ids := newMempoolIDs() - - peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") - require.NoError(t, err) - - ids.ReserveForPeer(peerID) - require.EqualValues(t, 1, ids.GetForPeer(peerID)) - ids.Reclaim(peerID) - - ids.ReserveForPeer(peerID) - require.EqualValues(t, 2, ids.GetForPeer(peerID)) - ids.Reclaim(peerID) -} - func TestReactor_MaxTxBytes(t *testing.T) { numNodes := 2 config := cfg.TestConfig() @@ -290,7 +276,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { // Broadcast a tx, which has the max size and ensure it's received by the // second reactor. tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes) - err := rts.reactors[primary].mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID}) + err := rts.reactors[primary].mempool.CheckTx(tx1, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) require.NoError(t, err) rts.start(t) @@ -304,7 +290,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { // broadcast a tx, which is beyond the max size and ensure it's not sent tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1) - err = rts.mempools[primary].CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID}) + err = rts.mempools[primary].CheckTx(tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) require.Error(t, err) rts.assertMempoolChannelsDrained(t) @@ -315,7 +301,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { // we're creating a single node network, but not starting the // network. - rts := setup(t, config.Mempool, 1, maxActiveIDs+1) + rts := setup(t, config.Mempool, 1, mempool.MaxActiveIDs+1) nodeID := rts.nodes[0] @@ -323,7 +309,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { require.NoError(t, err) // ensure the reactor does not panic (i.e. exhaust active IDs) - for i := 0; i < maxActiveIDs+1; i++ { + for i := 0; i < mempool.MaxActiveIDs+1; i++ { rts.peerChans[nodeID] <- p2p.PeerUpdate{ Status: p2p.PeerStatusUp, NodeID: peerID, @@ -361,12 +347,12 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { } // 0 is already reserved for UnknownPeerID - ids := newMempoolIDs() + ids := mempool.NewMempoolIDs() peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) - for i := 0; i < maxActiveIDs-1; i++ { + for i := 0; i < mempool.MaxActiveIDs-1; i++ { ids.ReserveForPeer(peerID) } diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go new file mode 100644 index 000000000..f49c182dd --- /dev/null +++ b/mempool/v1/mempool.go @@ -0,0 +1,763 @@ +package v1 + +import ( + "bytes" + "context" + "fmt" + "sync/atomic" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/clist" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +var _ mempool.Mempool = (*TxMempool)(nil) + +// TxMempoolOption sets an optional parameter on the TxMempool. +type TxMempoolOption func(*TxMempool) + +// TxMempool defines a prioritized mempool data structure used by the v1 mempool +// reactor. It keeps a thread-safe priority queue of transactions that is used +// when a block proposer constructs a block and a thread-safe linked-list that +// is used to gossip transactions to peers in a FIFO manner. +type TxMempool struct { + logger log.Logger + metrics *mempool.Metrics + config *config.MempoolConfig + proxyAppConn proxy.AppConnMempool + + // txsAvailable fires once for each height when the mempool is not empty + txsAvailable chan struct{} + notifiedTxsAvailable bool + + // height defines the last block height process during Update() + height int64 + + // sizeBytes defines the total size of the mempool (sum of all tx bytes) + sizeBytes int64 + + // cache defines a fixed-size cache of already seen transactions as this + // reduces pressure on the proxyApp. + cache mempool.TxCache + + // txStore defines the main storage of valid transactions. Indexes are built + // on top of this store. + txStore *TxStore + + // gossipIndex defines the gossiping index of valid transactions via a + // thread-safe linked-list. We also use the gossip index as a cursor for + // rechecking transactions already in the mempool. + gossipIndex *clist.CList + + // recheckCursor and recheckEnd are used as cursors based on the gossip index + // to recheck transactions that are already in the mempool. Iteration is not + // thread-safe and transaction may be mutated in serial order. + // + // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for + // iterator and cursor management when rechecking transactions. If the gossip + // index changes or is removed in a future refactor, this will have to be + // refactored. Instead, we should consider just keeping a slice of a snapshot + // of the mempool's current transactions during Update and an integer cursor + // into that slice. This, however, requires additional O(n) space complexity. + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // priorityIndex defines the priority index of valid transactions via a + // thread-safe priority queue. + priorityIndex *TxPriorityQueue + + // A read/write lock is used to safe guard updates, insertions and deletions + // from the mempool. A read-lock is implicitly acquired when executing CheckTx, + // however, a caller must explicitly grab a write-lock via Lock when updating + // the mempool via Update(). + mtx tmsync.RWMutex + preCheck mempool.PreCheckFunc + postCheck mempool.PostCheckFunc +} + +func NewTxMempool( + logger log.Logger, + cfg *config.MempoolConfig, + proxyAppConn proxy.AppConnMempool, + height int64, + options ...TxMempoolOption, +) *TxMempool { + + txmp := &TxMempool{ + logger: logger, + config: cfg, + proxyAppConn: proxyAppConn, + height: height, + cache: mempool.NopTxCache{}, + metrics: mempool.NopMetrics(), + txStore: NewTxStore(), + gossipIndex: clist.New(), + priorityIndex: NewTxPriorityQueue(), + } + + if cfg.CacheSize > 0 { + txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize) + } + + proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) + + for _, opt := range options { + opt(txmp) + } + + return txmp +} + +// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) +// returns an error. This is executed before CheckTx. It only applies to the +// first created block. After that, Update() overwrites the existing value. +func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.preCheck = f } +} + +// WithPostCheck sets a filter for the mempool to reject a transaction if +// f(tx, resp) returns an error. This is executed after CheckTx. It only applies +// to the first created block. After that, Update overwrites the existing value. +func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.postCheck = f } +} + +// WithMetrics sets the mempool's metrics collector. +func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { + return func(txmp *TxMempool) { txmp.metrics = metrics } +} + +// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly +// release the lock when finished. +func (txmp *TxMempool) Lock() { + txmp.mtx.Lock() +} + +// Unlock releases a write-lock on the mempool. +func (txmp *TxMempool) Unlock() { + txmp.mtx.Unlock() +} + +// Size returns the number of valid transactions in the mempool. It is +// thread-safe. +func (txmp *TxMempool) Size() int { + return txmp.txStore.Size() +} + +// SizeBytes return the total sum in bytes of all the valid transactions in the +// mempool. It is thread-safe. +func (txmp *TxMempool) SizeBytes() int64 { + return atomic.LoadInt64(&txmp.sizeBytes) +} + +// FlushAppConn executes FlushSync on the mempool's proxyAppConn. +// +// NOTE: The caller must obtain a write-lock via Lock() prior to execution. +func (txmp *TxMempool) FlushAppConn() error { + return txmp.proxyAppConn.FlushSync(context.Background()) +} + +// WaitForNextTx returns a blocking channel that will be closed when the next +// valid transaction is available to gossip. It is thread-safe. +func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { + return txmp.gossipIndex.WaitChan() +} + +// NextGossipTx returns the next valid transaction to gossip. A caller must wait +// for WaitForNextTx to signal a transaction is available to gossip first. It is +// thread-safe. +func (txmp *TxMempool) NextGossipTx() *WrappedTx { + return txmp.gossipIndex.Front().Value.(*WrappedTx) +} + +// EnableTxsAvailable enables the mempool to trigger events when transactions +// are available on a block by block basis. +func (txmp *TxMempool) EnableTxsAvailable() { + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + + txmp.txsAvailable = make(chan struct{}, 1) +} + +// TxsAvailable returns a channel which fires once for every height, and only +// when transactions are available in the mempool. It is thread-safe. +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { + return txmp.txsAvailable +} + +// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires +// a read-lock attempts to execute the application's CheckTx ABCI method via +// CheckTxAsync. We return an error if any of the following happen: +// +// - The CheckTxAsync execution fails. +// - The transaction already exists in the cache and we've already received the +// transaction from the peer. Otherwise, if it solely exists in the cache, we +// return nil. +// - The transaction size exceeds the maximum transaction size as defined by the +// configuration provided to the mempool. +// - The transaction fails Pre-Check (if it is defined). +// - The proxyAppConn fails, e.g. the buffer is full. +// +// If the mempool is full, we still execute CheckTx and attempt to find a lower +// priority transaction to evict. If such a transaction exists, we remove the +// lower priority transaction and add the new one with higher priority. +// +// NOTE: +// - The applications' CheckTx implementation may panic. +// - The caller is not to explicitly require any locks for executing CheckTx. +func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + txSize := len(tx) + if txSize > txmp.config.MaxTxBytes { + return mempool.ErrTxTooLarge{ + Max: txmp.config.MaxTxBytes, + Actual: txSize, + } + } + + if txmp.preCheck != nil { + if err := txmp.preCheck(tx); err != nil { + return mempool.ErrPreCheck{ + Reason: err, + } + } + } + + if err := txmp.proxyAppConn.Error(); err != nil { + return err + } + + txHash := mempool.TxKey(tx) + + // We add the transaction to the mempool's cache and if the transaction already + // exists, i.e. false is returned, then we check if we've seen this transaction + // from the same sender and error if we have. Otherwise, we return nil. + if !txmp.cache.Push(tx) { + wtx, ok := txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) + if wtx != nil && ok { + // We already have the transaction stored and the we've already seen this + // transaction from txInfo.SenderID. + return mempool.ErrTxInCache + } + + txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) + return nil + } + + ctx := txInfo.Context + if ctx == nil { + ctx = context.Background() + } + + reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) + if err != nil { + txmp.cache.Remove(tx) + return err + } + + reqRes.SetCallback(func(res *abci.Response) { + if txmp.recheckCursor != nil { + panic("recheck cursor is non-nil in CheckTx callback") + } + + wtx := &WrappedTx{ + tx: tx, + hash: txHash, + timestamp: time.Now().UTC(), + } + txmp.initTxCallback(wtx, res, txInfo) + + if cb != nil { + cb(res) + } + }) + + return nil +} + +// Flush flushes out the mempool. It acquires a read-lock, fetches all the +// transactions currently in the transaction store and removes each transaction +// from the store and all indexes and finally resets the cache. +// +// NOTE: +// - Flushing the mempool may leave the mempool in an inconsistent state. +func (txmp *TxMempool) Flush() { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + for _, wtx := range txmp.txStore.GetAllTxs() { + if !txmp.txStore.IsTxRemoved(wtx.hash) { + txmp.txStore.RemoveTx(wtx) + txmp.priorityIndex.RemoveTx(wtx) + txmp.gossipIndex.Remove(wtx.gossipEl) + wtx.gossipEl.DetachPrev() + } + } + + atomic.SwapInt64(&txmp.sizeBytes, 0) + txmp.cache.Reset() +} + +// ReapMaxBytesMaxGas returns a list of transactions within the provided size +// and gas constraints. Transaction are retrieved in priority order. +// +// NOTE: +// - A read-lock is acquired. +// - Transactions returned are not actually removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + var ( + totalGas int64 + totalSize int64 + ) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) + defer func() { + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + }() + + txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) + for txmp.priorityIndex.NumTxs() > 0 { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) + + // Ensure we have capacity for the transaction with respect to the + // transaction size. + if maxBytes > -1 && totalSize+size > maxBytes { + return txs[:len(txs)-1] + } + + totalSize += size + + // ensure we have capacity for the transaction with respect to total gas + gas := totalGas + wtx.gasWanted + if maxGas > -1 && gas > maxGas { + return txs[:len(txs)-1] + } + + totalGas = gas + } + + return txs +} + +// ReapMaxTxs returns a list of transactions within the provided number of +// transactions bound. Transaction are retrieved in priority order. +// +// NOTE: +// - A read-lock is acquired. +// - Transactions returned are not actually removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + numTxs := txmp.priorityIndex.NumTxs() + if max < 0 { + max = numTxs + } + + cap := tmmath.MinInt(numTxs, max) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, cap) + defer func() { + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + }() + + txs := make([]types.Tx, 0, cap) + for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + } + + return txs +} + +// Update iterates over all the transactions provided by the caller, i.e. the +// block producer, and removes them from the cache (if applicable) and removes +// the transactions from the main transaction store and associated indexes. +// Finally, if there are trainsactions remaining in the mempool, we initiate a +// re-CheckTx for them (if applicable), otherwise, we notify the caller more +// transactions are available. +// +// NOTE: +// - The caller must explicitly acquire a write-lock via Lock(). +func (txmp *TxMempool) Update( + blockHeight int64, + blockTxs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + newPreFn mempool.PreCheckFunc, + newPostFn mempool.PostCheckFunc, +) error { + + txmp.height = blockHeight + txmp.notifiedTxsAvailable = false + + if newPreFn != nil { + txmp.preCheck = newPreFn + } + if newPostFn != nil { + txmp.postCheck = newPostFn + } + + for i, tx := range blockTxs { + if deliverTxResponses[i].Code == abci.CodeTypeOK { + // add the valid committed transaction to the cache (if missing) + _ = txmp.cache.Push(tx) + } else if !txmp.config.KeepInvalidTxsInCache { + // allow invalid transactions to be re-submitted + txmp.cache.Remove(tx) + } + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(mempool.TxKey(tx)); wtx != nil { + txmp.removeTx(wtx, false) + } + } + + // If there any uncommitted transactions left in the mempool, we either + // initiate re-CheckTx per remaining transaction or notify that remaining + // transactions are left. + if txmp.Size() > 0 { + if txmp.config.Recheck { + txmp.logger.Debug( + "executing re-CheckTx for all remaining transactions", + "num_txs", txmp.Size(), + "height", blockHeight, + ) + txmp.updateReCheckTxs() + } else { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) + return nil +} + +// initTxCallback performs the initial, i.e. the first, callback after CheckTx +// has been executed by the ABCI application. In other words, initTxCallback is +// called after executing CheckTx when we see a unique transaction for the first +// time. CheckTx can be called again for the same transaction at a later point +// in time when re-checking, however, this callback will not be called. +// +// After the ABCI application executes CheckTx, initTxCallback is called with +// the ABCI *Response object and TxInfo. If postCheck is defined on the mempool, +// we execute that first. If there is no error from postCheck (if defined) and +// the ABCI CheckTx response code is OK, we attempt to insert the transaction. +// +// When attempting to insert the transaction, we first check if there is +// sufficient capacity. If there is sufficient capacity, the transaction is +// inserted into the txStore and indexed across all indexes. Otherwise, if the +// mempool is full, we attempt to find a lower priority transaction to evict in +// place of the new incoming transaction. If no such transaction exists, the +// new incoming transaction is rejected. +// +// If the new incoming transaction fails CheckTx or postCheck fails, we reject +// the new incoming transaction. +// +// NOTE: +// - An explicit lock is NOT required. +func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) { + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if ok { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) + } + + if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { + sender := checkTxRes.CheckTx.Sender + priority := checkTxRes.CheckTx.Priority + + if len(sender) > 0 { + if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { + txmp.logger.Error( + "rejected incoming good transaction; tx already exists for sender", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "sender", sender, + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + } + + if err := txmp.canAddTx(wtx); err != nil { + evictTxs := txmp.priorityIndex.GetEvictableTxs( + priority, + int64(wtx.Size()), + txmp.SizeBytes(), + txmp.config.MaxTxsBytes, + ) + if len(evictTxs) == 0 { + // No room for the new incoming transaction so we just remove it from + // the cache. + txmp.cache.Remove(wtx.tx) + txmp.logger.Error( + "rejected incoming good transaction; mempool full", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err.Error(), + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + + // evict an existing transaction(s) + // + // NOTE: + // - The transaction, toEvict, can be removed while a concurrent + // reCheckTx callback is being executed for the same transaction. + for _, toEvict := range evictTxs { + txmp.removeTx(toEvict, true) + txmp.logger.Debug( + "evicted existing good transaction; mempool full", + "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), + "old_priority", toEvict.priority, + "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "new_priority", wtx.priority, + ) + txmp.metrics.EvictedTxs.Add(1) + } + } + + wtx.gasWanted = checkTxRes.CheckTx.GasWanted + wtx.height = txmp.height + wtx.priority = priority + wtx.sender = sender + wtx.peers = map[uint16]struct{}{ + txInfo.SenderID: {}, + } + + txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) + txmp.metrics.Size.Set(float64(txmp.Size())) + + txmp.insertTx(wtx) + txmp.logger.Debug( + "inserted good transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "height", txmp.height, + "num_txs", txmp.Size(), + ) + txmp.notifyTxsAvailable() + + } else { + // ignore bad transactions + txmp.logger.Info( + "rejected bad transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "peer_id", txInfo.SenderNodeID, + "code", checkTxRes.CheckTx.Code, + "post_check_err", err, + ) + + txmp.metrics.FailedTxs.Add(1) + + if !txmp.config.KeepInvalidTxsInCache { + txmp.cache.Remove(wtx.tx) + } + } + } +} + +// defaultTxCallback performs the default CheckTx application callback. This is +// NOT executed when a transaction is first seen/received. Instead, this callback +// is executed during re-checking transactions (if enabled). A caller, i.e a +// block proposer, acquires a mempool write-lock via Lock() and when executing +// Update(), if the mempool is non-empty and Recheck is enabled, then all +// remaining transactions will be rechecked via CheckTxAsync. The order in which +// they are rechecked must be the same order in which this callback is called +// per transaction. +func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { + if txmp.recheckCursor == nil { + return + } + + txmp.metrics.RecheckTimes.Add(1) + + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if ok { + tx := req.GetCheckTx().Tx + wtx := txmp.recheckCursor.Value.(*WrappedTx) + if !bytes.Equal(tx, wtx.tx) { + panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), mempool.TxKey(tx))) + } + + // Only evaluate transactions that have not been removed. This can happen + // if an existing transaction is evicted during CheckTx and while this + // callback is being executed for the same evicted transaction. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes.CheckTx) + } + + if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { + wtx.priority = checkTxRes.CheckTx.Priority + } else { + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(wtx.tx)), + "err", err, + "code", checkTxRes.CheckTx.Code, + ) + + if wtx.gossipEl != txmp.recheckCursor { + panic("corrupted reCheckTx cursor") + } + + txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) + } + } + + // move reCheckTx cursor to next element + if txmp.recheckCursor == txmp.recheckEnd { + txmp.recheckCursor = nil + } else { + txmp.recheckCursor = txmp.recheckCursor.Next() + } + + if txmp.recheckCursor == nil { + txmp.logger.Debug("finished rechecking transactions") + + if txmp.Size() > 0 { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) + } +} + +// updateReCheckTxs updates the recheck cursors by using the gossipIndex. For +// each transaction, it executes CheckTxAsync. The global callback defined on +// the proxyAppConn will be executed for each transaction after CheckTx is +// executed. +// +// NOTE: +// - The caller must have a write-lock when executing updateReCheckTxs. +func (txmp *TxMempool) updateReCheckTxs() { + if txmp.Size() == 0 { + panic("attempted to update re-CheckTx txs when mempool is empty") + } + + txmp.recheckCursor = txmp.gossipIndex.Front() + txmp.recheckEnd = txmp.gossipIndex.Back() + ctx := context.Background() + + for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { + wtx := e.Value.(*WrappedTx) + + // Only execute CheckTx if the transaction is not marked as removed which + // could happen if the transaction was evicted. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ + Tx: wtx.tx, + Type: abci.CheckTxType_Recheck, + }) + if err != nil { + // no need in retrying since the tx will be rechecked after the next block + txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) + } + } + } + + if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { + txmp.logger.Error("failed to flush transactions during rechecking", "err", err) + } +} + +// canAddTx returns an error if we cannot insert the provided *WrappedTx into +// the mempool due to mempool configured constraints. Otherwise, nil is returned +// and the transaction can be inserted into the mempool. +func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { + var ( + numTxs = txmp.Size() + sizeBytes = txmp.SizeBytes() + ) + + if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { + return mempool.ErrMempoolIsFull{ + NumTxs: numTxs, + MaxTxs: txmp.config.Size, + TxsBytes: sizeBytes, + MaxTxsBytes: txmp.config.MaxTxsBytes, + } + } + + return nil +} + +func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + txmp.txStore.SetTx(wtx) + txmp.priorityIndex.PushTx(wtx) + + // Insert the transaction into the gossip index and mark the reference to the + // linked-list element, which will be needed at a later point when the + // transaction is removed. + gossipEl := txmp.gossipIndex.PushBack(wtx) + wtx.gossipEl = gossipEl + + atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) +} + +func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { + if txmp.txStore.IsTxRemoved(wtx.hash) { + return + } + + txmp.txStore.RemoveTx(wtx) + txmp.priorityIndex.RemoveTx(wtx) + + // Remove the transaction from the gossip index and cleanup the linked-list + // element so it can be garbage collected. + txmp.gossipIndex.Remove(wtx.gossipEl) + wtx.gossipEl.DetachPrev() + + atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) + + if removeFromCache { + txmp.cache.Remove(wtx.tx) + } +} + +func (txmp *TxMempool) notifyTxsAvailable() { + if txmp.Size() == 0 { + panic("attempt to notify txs available but mempool is empty!") + } + + if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { + // channel cap is 1, so this will send once + txmp.notifiedTxsAvailable = true + + select { + case txmp.txsAvailable <- struct{}{}: + default: + } + } +} diff --git a/mempool/v1/mempool_bench_test.go b/mempool/v1/mempool_bench_test.go new file mode 100644 index 000000000..bad8ec8ab --- /dev/null +++ b/mempool/v1/mempool_bench_test.go @@ -0,0 +1,31 @@ +package v1 + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/mempool" +) + +func BenchmarkTxMempool_CheckTx(b *testing.B) { + txmp := setup(b, 10000) + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + b.StopTimer() + prefix := make([]byte, 20) + _, err := rng.Read(prefix) + require.NoError(b, err) + + priority := int64(rng.Intn(9999-1000) + 1000) + tx := []byte(fmt.Sprintf("%X=%d", prefix, priority)) + b.StartTimer() + + require.NoError(b, txmp.CheckTx(tx, nil, mempool.TxInfo{})) + } +} diff --git a/mempool/v1/mempool_test.go b/mempool/v1/mempool_test.go new file mode 100644 index 000000000..6f7b149c2 --- /dev/null +++ b/mempool/v1/mempool_test.go @@ -0,0 +1,432 @@ +package v1 + +import ( + "bytes" + "fmt" + "math/rand" + "os" + "sort" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/code" + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +// application extends the KV store application by overriding CheckTx to provide +// transaction priority based on the value in the key/value pair. +type application struct { + *kvstore.Application +} + +type testTx struct { + tx types.Tx + priority int64 +} + +func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { + var ( + priority int64 + sender string + ) + + // infer the priority from the raw transaction value (sender=key=value) + parts := bytes.Split(req.Tx, []byte("=")) + if len(parts) == 3 { + v, err := strconv.ParseInt(string(parts[2]), 10, 64) + if err != nil { + return abci.ResponseCheckTx{ + Priority: priority, + Code: 100, + GasWanted: 1, + } + } + + priority = v + sender = string(parts[0]) + } else { + return abci.ResponseCheckTx{ + Priority: priority, + Code: 101, + GasWanted: 1, + } + } + + return abci.ResponseCheckTx{ + Priority: priority, + Sender: sender, + Code: code.CodeTypeOK, + GasWanted: 1, + } +} + +func setup(t testing.TB, cacheSize int) *TxMempool { + t.Helper() + + app := &application{kvstore.NewApplication()} + cc := proxy.NewLocalClientCreator(app) + + cfg := config.ResetTestRoot(t.Name()) + cfg.Mempool.CacheSize = cacheSize + + appConnMem, err := cc.NewABCIClient() + require.NoError(t, err) + require.NoError(t, appConnMem.Start()) + + t.Cleanup(func() { + os.RemoveAll(cfg.RootDir) + require.NoError(t, appConnMem.Stop()) + }) + + return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0) +} + +func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { + txs := make([]testTx, numTxs) + txInfo := mempool.TxInfo{SenderID: peerID} + + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + for i := 0; i < numTxs; i++ { + prefix := make([]byte, 20) + _, err := rng.Read(prefix) + require.NoError(t, err) + + // sender := make([]byte, 10) + // _, err = rng.Read(sender) + // require.NoError(t, err) + + priority := int64(rng.Intn(9999-1000) + 1000) + + txs[i] = testTx{ + tx: []byte(fmt.Sprintf("sender-%d=%X=%d", i, prefix, priority)), + priority: priority, + } + require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo)) + } + + return txs +} + +func TestTxMempool_TxsAvailable(t *testing.T) { + txmp := setup(t, 0) + txmp.EnableTxsAvailable() + + ensureNoTxFire := func() { + timer := time.NewTimer(500 * time.Millisecond) + select { + case <-txmp.TxsAvailable(): + require.Fail(t, "unexpected transactions event") + case <-timer.C: + } + } + + ensureTxFire := func() { + timer := time.NewTimer(500 * time.Millisecond) + select { + case <-txmp.TxsAvailable(): + case <-timer.C: + require.Fail(t, "expected transactions event") + } + } + + // ensure no event as we have not executed any transactions yet + ensureNoTxFire() + + // Execute CheckTx for some transactions and ensure TxsAvailable only fires + // once. + txs := checkTxs(t, txmp, 100, 0) + ensureTxFire() + ensureNoTxFire() + + rawTxs := make([]types.Tx, len(txs)) + for i, tx := range txs { + rawTxs[i] = tx.tx + } + + responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + for i := 0; i < len(responses); i++ { + responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + } + + // commit half the transactions and ensure we fire an event + txmp.Lock() + require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + txmp.Unlock() + ensureTxFire() + ensureNoTxFire() + + // Execute CheckTx for more transactions and ensure we do not fire another + // event as we're still on the same height (1). + _ = checkTxs(t, txmp, 100, 0) + ensureNoTxFire() +} + +func TestTxMempool_Size(t *testing.T) { + txmp := setup(t, 0) + txs := checkTxs(t, txmp, 100, 0) + require.Equal(t, len(txs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + + rawTxs := make([]types.Tx, len(txs)) + for i, tx := range txs { + rawTxs[i] = tx.tx + } + + responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + for i := 0; i < len(responses); i++ { + responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + } + + txmp.Lock() + require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + txmp.Unlock() + + require.Equal(t, len(rawTxs)/2, txmp.Size()) + require.Equal(t, int64(2750), txmp.SizeBytes()) +} + +func TestTxMempool_Flush(t *testing.T) { + txmp := setup(t, 0) + txs := checkTxs(t, txmp, 100, 0) + require.Equal(t, len(txs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + + rawTxs := make([]types.Tx, len(txs)) + for i, tx := range txs { + rawTxs[i] = tx.tx + } + + responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + for i := 0; i < len(responses); i++ { + responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + } + + txmp.Lock() + require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + txmp.Unlock() + + txmp.Flush() + require.Zero(t, txmp.Size()) + require.Equal(t, int64(0), txmp.SizeBytes()) +} + +func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { + txmp := setup(t, 0) + tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + + txMap := make(map[[mempool.TxKeySize]byte]testTx) + priorities := make([]int64, len(tTxs)) + for i, tTx := range tTxs { + txMap[mempool.TxKey(tTx.tx)] = tTx + priorities[i] = tTx.priority + } + + sort.Slice(priorities, func(i, j int) bool { + // sort by priority, i.e. decreasing order + return priorities[i] > priorities[j] + }) + + ensurePrioritized := func(reapedTxs types.Txs) { + reapedPriorities := make([]int64, len(reapedTxs)) + for i, rTx := range reapedTxs { + reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + } + + require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) + } + + // reap by gas capacity only + reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + require.Len(t, reapedTxs, 50) + + // reap by transaction bytes only + reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + require.Len(t, reapedTxs, 17) + + // Reap by both transaction bytes and gas, where the size yields 31 reaped + // transactions and the gas limit reaps 26 transactions. + reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + require.Len(t, reapedTxs, 26) +} + +func TestTxMempool_ReapMaxTxs(t *testing.T) { + txmp := setup(t, 0) + tTxs := checkTxs(t, txmp, 100, 0) + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + + txMap := make(map[[mempool.TxKeySize]byte]testTx) + priorities := make([]int64, len(tTxs)) + for i, tTx := range tTxs { + txMap[mempool.TxKey(tTx.tx)] = tTx + priorities[i] = tTx.priority + } + + sort.Slice(priorities, func(i, j int) bool { + // sort by priority, i.e. decreasing order + return priorities[i] > priorities[j] + }) + + ensurePrioritized := func(reapedTxs types.Txs) { + reapedPriorities := make([]int64, len(reapedTxs)) + for i, rTx := range reapedTxs { + reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + } + + require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) + } + + // reap all transactions + reapedTxs := txmp.ReapMaxTxs(-1) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + require.Len(t, reapedTxs, len(tTxs)) + + // reap a single transaction + reapedTxs = txmp.ReapMaxTxs(1) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + require.Len(t, reapedTxs, 1) + + // reap half of the transactions + reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) + require.Equal(t, int64(5490), txmp.SizeBytes()) + require.Len(t, reapedTxs, len(tTxs)/2) +} + +func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { + txmp := setup(t, 0) + + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + tx := make([]byte, txmp.config.MaxTxsBytes+1) + _, err := rng.Read(tx) + require.NoError(t, err) + + require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0})) +} + +func TestTxMempool_CheckTxSamePeer(t *testing.T) { + txmp := setup(t, 100) + peerID := uint16(1) + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + prefix := make([]byte, 20) + _, err := rng.Read(prefix) + require.NoError(t, err) + + tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50)) + + require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID})) + require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID})) +} + +func TestTxMempool_CheckTxSameSender(t *testing.T) { + txmp := setup(t, 100) + peerID := uint16(1) + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + prefix1 := make([]byte, 20) + _, err := rng.Read(prefix1) + require.NoError(t, err) + + prefix2 := make([]byte, 20) + _, err = rng.Read(prefix2) + require.NoError(t, err) + + tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50)) + tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50)) + + require.NoError(t, txmp.CheckTx(tx1, nil, mempool.TxInfo{SenderID: peerID})) + require.Equal(t, 1, txmp.Size()) + require.NoError(t, txmp.CheckTx(tx2, nil, mempool.TxInfo{SenderID: peerID})) + require.Equal(t, 1, txmp.Size()) +} + +func TestTxMempool_ConcurrentTxs(t *testing.T) { + txmp := setup(t, 100) + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + checkTxDone := make(chan struct{}) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + for i := 0; i < 20; i++ { + _ = checkTxs(t, txmp, 100, 0) + dur := rng.Intn(1000-500) + 500 + time.Sleep(time.Duration(dur) * time.Millisecond) + } + + wg.Done() + close(checkTxDone) + }() + + wg.Add(1) + go func() { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + defer wg.Done() + + var height int64 = 1 + + for range ticker.C { + reapedTxs := txmp.ReapMaxTxs(200) + if len(reapedTxs) > 0 { + responses := make([]*abci.ResponseDeliverTx, len(reapedTxs)) + for i := 0; i < len(responses); i++ { + var code uint32 + + if i%10 == 0 { + code = 100 + } else { + code = abci.CodeTypeOK + } + + responses[i] = &abci.ResponseDeliverTx{Code: code} + } + + txmp.Lock() + require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil)) + txmp.Unlock() + + height++ + } else { + // only return once we know we finished the CheckTx loop + select { + case <-checkTxDone: + return + default: + } + } + } + }() + + wg.Wait() + require.Zero(t, txmp.Size()) + require.Zero(t, txmp.SizeBytes()) +} diff --git a/mempool/v1/priority_queue.go b/mempool/v1/priority_queue.go new file mode 100644 index 000000000..df74a92d3 --- /dev/null +++ b/mempool/v1/priority_queue.go @@ -0,0 +1,159 @@ +package v1 + +import ( + "container/heap" + "sort" + + tmsync "github.com/tendermint/tendermint/internal/libs/sync" +) + +var _ heap.Interface = (*TxPriorityQueue)(nil) + +// TxPriorityQueue defines a thread-safe priority queue for valid transactions. +type TxPriorityQueue struct { + mtx tmsync.RWMutex + txs []*WrappedTx +} + +func NewTxPriorityQueue() *TxPriorityQueue { + pq := &TxPriorityQueue{ + txs: make([]*WrappedTx, 0), + } + + heap.Init(pq) + + return pq +} + +// GetEvictableTxs attempts to find and return a list of *WrappedTx than can be +// evicted to make room for another *WrappedTx with higher priority. If no such +// list of *WrappedTx exists, nil will be returned. The returned list of *WrappedTx +// indicate that these transactions can be removed due to them being of lower +// priority and that their total sum in size allows room for the incoming +// transaction according to the mempool's configured limits. +func (pq *TxPriorityQueue) GetEvictableTxs(priority, txSize, totalSize, cap int64) []*WrappedTx { + pq.mtx.RLock() + defer pq.mtx.RUnlock() + + txs := make([]*WrappedTx, len(pq.txs)) + copy(txs, pq.txs) + + sort.Slice(txs, func(i, j int) bool { + return txs[i].priority < txs[j].priority + }) + + var ( + toEvict []*WrappedTx + i int + ) + + currSize := totalSize + + // Loop over all transactions in ascending priority order evaluating those + // that are only of less priority than the provided argument. We continue + // evaluating transactions until there is sufficient capacity for the new + // transaction (size) as defined by txSize. + for i < len(txs) && txs[i].priority < priority { + toEvict = append(toEvict, txs[i]) + currSize -= int64(txs[i].Size()) + + if currSize+txSize <= cap { + return toEvict + } + + i++ + } + + return nil +} + +// NumTxs returns the number of transactions in the priority queue. It is +// thread safe. +func (pq *TxPriorityQueue) NumTxs() int { + pq.mtx.RLock() + defer pq.mtx.RUnlock() + + return len(pq.txs) +} + +// RemoveTx removes a specific transaction from the priority queue. +func (pq *TxPriorityQueue) RemoveTx(tx *WrappedTx) { + pq.mtx.Lock() + defer pq.mtx.Unlock() + + if tx.heapIndex < len(pq.txs) { + heap.Remove(pq, tx.heapIndex) + } +} + +// PushTx adds a valid transaction to the priority queue. It is thread safe. +func (pq *TxPriorityQueue) PushTx(tx *WrappedTx) { + pq.mtx.Lock() + defer pq.mtx.Unlock() + + heap.Push(pq, tx) +} + +// PopTx removes the top priority transaction from the queue. It is thread safe. +func (pq *TxPriorityQueue) PopTx() *WrappedTx { + pq.mtx.Lock() + defer pq.mtx.Unlock() + + x := heap.Pop(pq) + if x != nil { + return x.(*WrappedTx) + } + + return nil +} + +// Push implements the Heap interface. +// +// NOTE: A caller should never call Push. Use PushTx instead. +func (pq *TxPriorityQueue) Push(x interface{}) { + n := len(pq.txs) + item := x.(*WrappedTx) + item.heapIndex = n + pq.txs = append(pq.txs, item) +} + +// Pop implements the Heap interface. +// +// NOTE: A caller should never call Pop. Use PopTx instead. +func (pq *TxPriorityQueue) Pop() interface{} { + old := pq.txs + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.heapIndex = -1 // for safety + pq.txs = old[0 : n-1] + return item +} + +// Len implements the Heap interface. +// +// NOTE: A caller should never call Len. Use NumTxs instead. +func (pq *TxPriorityQueue) Len() int { + return len(pq.txs) +} + +// Less implements the Heap interface. It returns true if the transaction at +// position i in the queue is of less priority than the transaction at position j. +func (pq *TxPriorityQueue) Less(i, j int) bool { + // If there exists two transactions with the same priority, consider the one + // that we saw the earliest as the higher priority transaction. + if pq.txs[i].priority == pq.txs[j].priority { + return pq.txs[i].timestamp.Before(pq.txs[j].timestamp) + } + + // We want Pop to give us the highest, not lowest, priority so we use greater + // than here. + return pq.txs[i].priority > pq.txs[j].priority +} + +// Swap implements the Heap interface. It swaps two transactions in the queue. +func (pq *TxPriorityQueue) Swap(i, j int) { + pq.txs[i], pq.txs[j] = pq.txs[j], pq.txs[i] + pq.txs[i].heapIndex = i + pq.txs[j].heapIndex = j +} diff --git a/mempool/v1/priority_queue_test.go b/mempool/v1/priority_queue_test.go new file mode 100644 index 000000000..c0048f388 --- /dev/null +++ b/mempool/v1/priority_queue_test.go @@ -0,0 +1,176 @@ +package v1 + +import ( + "math/rand" + "sort" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestTxPriorityQueue(t *testing.T) { + pq := NewTxPriorityQueue() + numTxs := 1000 + + priorities := make([]int, numTxs) + + var wg sync.WaitGroup + for i := 1; i <= numTxs; i++ { + priorities[i-1] = i + wg.Add(1) + + go func(i int) { + pq.PushTx(&WrappedTx{ + priority: int64(i), + timestamp: time.Now(), + }) + + wg.Done() + }(i) + } + + sort.Sort(sort.Reverse(sort.IntSlice(priorities))) + + wg.Wait() + require.Equal(t, numTxs, pq.NumTxs()) + + // Wait a second and push a tx with a duplicate priority + time.Sleep(time.Second) + now := time.Now() + pq.PushTx(&WrappedTx{ + priority: 1000, + timestamp: now, + }) + require.Equal(t, 1001, pq.NumTxs()) + + tx := pq.PopTx() + require.Equal(t, 1000, pq.NumTxs()) + require.Equal(t, int64(1000), tx.priority) + require.NotEqual(t, now, tx.timestamp) + + gotPriorities := make([]int, 0) + for pq.NumTxs() > 0 { + gotPriorities = append(gotPriorities, int(pq.PopTx().priority)) + } + + require.Equal(t, priorities, gotPriorities) +} + +func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) { + pq := NewTxPriorityQueue() + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + values := make([]int, 1000) + + for i := 0; i < 1000; i++ { + tx := make([]byte, 5) // each tx is 5 bytes + _, err := rng.Read(tx) + require.NoError(t, err) + + x := rng.Intn(100000) + pq.PushTx(&WrappedTx{ + tx: tx, + priority: int64(x), + }) + + values[i] = x + } + + sort.Ints(values) + + max := values[len(values)-1] + min := values[0] + totalSize := int64(len(values) * 5) + + testCases := []struct { + name string + priority, txSize, totalSize, cap int64 + expectedLen int + }{ + { + name: "larest priority; single tx", + priority: int64(max + 1), + txSize: 5, + totalSize: totalSize, + cap: totalSize, + expectedLen: 1, + }, + { + name: "larest priority; multi tx", + priority: int64(max + 1), + txSize: 17, + totalSize: totalSize, + cap: totalSize, + expectedLen: 4, + }, + { + name: "larest priority; out of capacity", + priority: int64(max + 1), + txSize: totalSize + 1, + totalSize: totalSize, + cap: totalSize, + expectedLen: 0, + }, + { + name: "smallest priority; no tx", + priority: int64(min - 1), + txSize: 5, + totalSize: totalSize, + cap: totalSize, + expectedLen: 0, + }, + { + name: "small priority; no tx", + priority: int64(min), + txSize: 5, + totalSize: totalSize, + cap: totalSize, + expectedLen: 0, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + evictTxs := pq.GetEvictableTxs(tc.priority, tc.txSize, tc.totalSize, tc.cap) + require.Len(t, evictTxs, tc.expectedLen) + }) + } +} + +func TestTxPriorityQueue_RemoveTx(t *testing.T) { + pq := NewTxPriorityQueue() + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + numTxs := 1000 + + values := make([]int, numTxs) + + for i := 0; i < numTxs; i++ { + x := rng.Intn(100000) + pq.PushTx(&WrappedTx{ + priority: int64(x), + }) + + values[i] = x + } + + require.Equal(t, numTxs, pq.NumTxs()) + + sort.Ints(values) + max := values[len(values)-1] + + wtx := pq.txs[pq.NumTxs()/2] + pq.RemoveTx(wtx) + require.Equal(t, numTxs-1, pq.NumTxs()) + require.Equal(t, int64(max), pq.PopTx().priority) + require.Equal(t, numTxs-2, pq.NumTxs()) + + require.NotPanics(t, func() { + pq.RemoveTx(&WrappedTx{heapIndex: numTxs}) + pq.RemoveTx(&WrappedTx{heapIndex: numTxs + 1}) + }) + require.Equal(t, numTxs-2, pq.NumTxs()) +} diff --git a/mempool/v1/reactor.go b/mempool/v1/reactor.go new file mode 100644 index 000000000..cb9df868d --- /dev/null +++ b/mempool/v1/reactor.go @@ -0,0 +1,394 @@ +package v1 + +import ( + "errors" + "fmt" + "sync" + "time" + + cfg "github.com/tendermint/tendermint/config" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/p2p" + protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +var ( + _ service.Service = (*Reactor)(nil) + _ p2p.Wrapper = (*protomem.Message)(nil) +) + +// PeerManager defines the interface contract required for getting necessary +// peer information. This should eventually be replaced with a message-oriented +// approach utilizing the p2p stack. +type PeerManager interface { + GetHeight(p2p.NodeID) int64 +} + +// Reactor implements a service that contains mempool of txs that are broadcasted +// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping +// txs to the peers you received it from. +type Reactor struct { + service.BaseService + + config *cfg.MempoolConfig + mempool *TxMempool + ids *mempool.MempoolIDs + + // XXX: Currently, this is the only way to get information about a peer. Ideally, + // we rely on message-oriented communication to get necessary peer data. + // ref: https://github.com/tendermint/tendermint/issues/5670 + peerMgr PeerManager + + mempoolCh *p2p.Channel + peerUpdates *p2p.PeerUpdates + closeCh chan struct{} + + // peerWG is used to coordinate graceful termination of all peer broadcasting + // goroutines. + peerWG sync.WaitGroup + + mtx tmsync.Mutex + peerRoutines map[p2p.NodeID]*tmsync.Closer +} + +// NewReactor returns a reference to a new reactor. +func NewReactor( + logger log.Logger, + config *cfg.MempoolConfig, + peerMgr PeerManager, + txmp *TxMempool, + mempoolCh *p2p.Channel, + peerUpdates *p2p.PeerUpdates, +) *Reactor { + + r := &Reactor{ + config: config, + peerMgr: peerMgr, + mempool: txmp, + ids: mempool.NewMempoolIDs(), + mempoolCh: mempoolCh, + peerUpdates: peerUpdates, + closeCh: make(chan struct{}), + peerRoutines: make(map[p2p.NodeID]*tmsync.Closer), + } + + r.BaseService = *service.NewBaseService(logger, "Mempool", r) + return r +} + +// GetChannelShims returns a map of ChannelDescriptorShim objects, where each +// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding +// p2p proto.Message the new p2p Channel is responsible for handling. +// +// +// TODO: Remove once p2p refactor is complete. +// ref: https://github.com/tendermint/tendermint/issues/5670 +func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { + largestTx := make([]byte, config.MaxTxBytes) + batchMsg := protomem.Message{ + Sum: &protomem.Message_Txs{ + Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, + }, + } + + return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ + mempool.MempoolChannel: { + MsgType: new(protomem.Message), + Descriptor: &p2p.ChannelDescriptor{ + ID: byte(mempool.MempoolChannel), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + + MaxSendBytes: 5000, + }, + }, + } +} + +// OnStart starts separate go routines for each p2p Channel and listens for +// envelopes on each. In addition, it also listens for peer updates and handles +// messages on that p2p channel accordingly. The caller must be sure to execute +// OnStop to ensure the outbound p2p Channels are closed. +func (r *Reactor) OnStart() error { + if !r.config.Broadcast { + r.Logger.Info("tx broadcasting is disabled") + } + + go r.processMempoolCh() + go r.processPeerUpdates() + + return nil +} + +// OnStop stops the reactor by signaling to all spawned goroutines to exit and +// blocking until they all exit. +func (r *Reactor) OnStop() { + r.mtx.Lock() + for _, c := range r.peerRoutines { + c.Close() + } + r.mtx.Unlock() + + // wait for all spawned peer tx broadcasting goroutines to gracefully exit + r.peerWG.Wait() + + // Close closeCh to signal to all spawned goroutines to gracefully exit. All + // p2p Channels should execute Close(). + close(r.closeCh) + + // Wait for all p2p Channels to be closed before returning. This ensures we + // can easily reason about synchronization of all p2p Channels and ensure no + // panics will occur. + <-r.mempoolCh.Done() + <-r.peerUpdates.Done() +} + +// handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. +// For every tx in the message, we execute CheckTx. It returns an error if an +// empty set of txs are sent in an envelope or if we receive an unexpected +// message type. +func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { + logger := r.Logger.With("peer", envelope.From) + + switch msg := envelope.Message.(type) { + case *protomem.Txs: + protoTxs := msg.GetTxs() + if len(protoTxs) == 0 { + return errors.New("empty txs received from peer") + } + + txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} + if len(envelope.From) != 0 { + txInfo.SenderNodeID = envelope.From + } + + for _, tx := range protoTxs { + if err := r.mempool.CheckTx(types.Tx(tx), nil, txInfo); err != nil { + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) + } + } + + default: + return fmt.Errorf("received unknown message: %T", msg) + } + + return nil +} + +// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. +// It will handle errors and any possible panics gracefully. A caller can handle +// any error returned by sending a PeerError on the respective channel. +func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("panic in processing message: %v", e) + } + }() + + r.Logger.Debug("received message", "peer", envelope.From) + + switch chID { + case mempool.MempoolChannel: + err = r.handleMempoolMessage(envelope) + + default: + err = fmt.Errorf("unknown channel ID (%d) for envelope (%T)", chID, envelope.Message) + } + + return err +} + +// processMempoolCh implements a blocking event loop where we listen for p2p +// Envelope messages from the mempoolCh. +func (r *Reactor) processMempoolCh() { + defer r.mempoolCh.Close() + + for { + select { + case envelope := <-r.mempoolCh.In: + if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { + r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) + r.mempoolCh.Error <- p2p.PeerError{ + NodeID: envelope.From, + Err: err, + } + } + + case <-r.closeCh: + r.Logger.Debug("stopped listening on mempool channel; closing...") + return + } + } +} + +// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we +// check if the reactor is running and if we've already started a tx broadcasting +// goroutine or not. If not, we start one for the newly added peer. For down or +// removed peers, we remove the peer from the mempool peer ID set and signal to +// stop the tx broadcasting goroutine. +func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { + r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) + + r.mtx.Lock() + defer r.mtx.Unlock() + + switch peerUpdate.Status { + case p2p.PeerStatusUp: + // Do not allow starting new tx broadcast loops after reactor shutdown + // has been initiated. This can happen after we've manually closed all + // peer broadcast loops and closed r.closeCh, but the router still sends + // in-flight peer updates. + if !r.IsRunning() { + return + } + + if r.config.Broadcast { + // Check if we've already started a goroutine for this peer, if not we create + // a new done channel so we can explicitly close the goroutine if the peer + // is later removed, we increment the waitgroup so the reactor can stop + // safely, and finally start the goroutine to broadcast txs to that peer. + _, ok := r.peerRoutines[peerUpdate.NodeID] + if !ok { + closer := tmsync.NewCloser() + + r.peerRoutines[peerUpdate.NodeID] = closer + r.peerWG.Add(1) + + r.ids.ReserveForPeer(peerUpdate.NodeID) + + // start a broadcast routine ensuring all txs are forwarded to the peer + go r.broadcastTxRoutine(peerUpdate.NodeID, closer) + } + } + + case p2p.PeerStatusDown: + r.ids.Reclaim(peerUpdate.NodeID) + + // Check if we've started a tx broadcasting goroutine for this peer. + // If we have, we signal to terminate the goroutine via the channel's closure. + // This will internally decrement the peer waitgroup and remove the peer + // from the map of peer tx broadcasting goroutines. + closer, ok := r.peerRoutines[peerUpdate.NodeID] + if ok { + closer.Close() + } + } +} + +// processPeerUpdates initiates a blocking process where we listen for and handle +// PeerUpdate messages. When the reactor is stopped, we will catch the signal and +// close the p2p PeerUpdatesCh gracefully. +func (r *Reactor) processPeerUpdates() { + defer r.peerUpdates.Close() + + for { + select { + case peerUpdate := <-r.peerUpdates.Updates(): + r.processPeerUpdate(peerUpdate) + + case <-r.closeCh: + r.Logger.Debug("stopped listening on peer updates channel; closing...") + return + } + } +} + +func (r *Reactor) broadcastTxRoutine(peerID p2p.NodeID, closer *tmsync.Closer) { + peerMempoolID := r.ids.GetForPeer(peerID) + var memTx *WrappedTx + + // remove the peer ID from the map of routines and mark the waitgroup as done + defer func() { + r.mtx.Lock() + delete(r.peerRoutines, peerID) + r.mtx.Unlock() + + r.peerWG.Done() + + if e := recover(); e != nil { + r.Logger.Error("recovering from broadcasting mempool loop", "err", e) + } + }() + + for { + if !r.IsRunning() { + return + } + + // This happens because the CElement we were looking at got garbage + // collected (removed). That is, .NextWait() returned nil. Go ahead and + // start from the beginning. + if memTx == nil { + select { + case <-r.mempool.WaitForNextTx(): // wait until a tx is available + if memTx = r.mempool.NextGossipTx(); memTx == nil { + continue + } + + case <-closer.Done(): + // The peer is marked for removal via a PeerUpdate as the doneCh was + // explicitly closed to signal we should exit. + return + + case <-r.closeCh: + // The reactor has signaled that we are stopped and thus we should + // implicitly exit this peer's goroutine. + return + } + } + + if r.peerMgr != nil { + height := r.peerMgr.GetHeight(peerID) + if height > 0 && height < memTx.height-1 { + // allow for a lag of one block + time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) + continue + } + } + + // NOTE: Transaction batching was disabled due to: + // https://github.com/tendermint/tendermint/issues/5796 + if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok { + // Send the mempool tx to the corresponding peer. Note, the peer may be + // behind and thus would not be able to process the mempool tx correctly. + r.mempoolCh.Out <- p2p.Envelope{ + To: peerID, + Message: &protomem.Txs{ + Txs: [][]byte{memTx.tx}, + }, + } + r.Logger.Debug( + "gossiped tx to peer", + "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), + "peer", peerID, + ) + } + + select { + case <-memTx.gossipEl.NextWaitChan(): + // If there is a next element in gossip index, we point memTx to that node's + // value, otherwise we reset memTx to nil which will be checked at the + // parent for loop. + next := memTx.gossipEl.Next() + if next != nil { + memTx = next.Value.(*WrappedTx) + } else { + memTx = nil + } + + case <-closer.Done(): + // The peer is marked for removal via a PeerUpdate as the doneCh was + // explicitly closed to signal we should exit. + return + + case <-r.closeCh: + // The reactor has signaled that we are stopped and thus we should + // implicitly exit this peer's goroutine. + return + } + } +} diff --git a/mempool/v1/tx.go b/mempool/v1/tx.go new file mode 100644 index 000000000..6680a724a --- /dev/null +++ b/mempool/v1/tx.go @@ -0,0 +1,200 @@ +package v1 + +import ( + "time" + + "github.com/tendermint/tendermint/internal/libs/clist" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +// WrappedTx defines a wrapper around a raw transaction with additional metadata +// that is used for indexing. +type WrappedTx struct { + // tx represents the raw binary transaction data + tx types.Tx + + // hash defines the transaction hash and the primary key used in the mempool + hash [mempool.TxKeySize]byte + + // height defines the height at which the transaction was validated at + height int64 + + // gasWanted defines the amount of gas the transaction sender requires + gasWanted int64 + + // priority defines the transaction's priority as specified by the application + // in the ResponseCheckTx response. + priority int64 + + // sender defines the transaction's sender as specified by the application in + // the ResponseCheckTx response. + sender string + + // timestamp is the time at which the node first received the transaction from + // a peer. It is used as a second dimension is prioritizing transactions when + // two transactions have the same priority. + timestamp time.Time + + // peers records a mapping of all peers that sent a given transaction + peers map[uint16]struct{} + + // heapIndex defines the index of the item in the heap + heapIndex int + + // gossipEl references the linked-list element in the gossip index + gossipEl *clist.CElement + + // removed marks the transaction as removed from the mempool. This is set + // during RemoveTx and is needed due to the fact that a given existing + // transaction in the mempool can be evicted when it is simultaneously having + // a reCheckTx callback executed. + removed bool +} + +func (wtx *WrappedTx) Size() int { + return len(wtx.tx) +} + +// TxStore implements a thread-safe mapping of valid transaction(s). +// +// NOTE: +// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative +// access is not allowed. Regardless, it is not expected for the mempool to +// need mutative access. +type TxStore struct { + mtx tmsync.RWMutex + hashTxs map[[mempool.TxKeySize]byte]*WrappedTx // primary index + senderTxs map[string]*WrappedTx // sender is defined by the ABCI application +} + +func NewTxStore() *TxStore { + return &TxStore{ + senderTxs: make(map[string]*WrappedTx), + hashTxs: make(map[[mempool.TxKeySize]byte]*WrappedTx), + } +} + +// Size returns the total number of transactions in the store. +func (txs *TxStore) Size() int { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return len(txs.hashTxs) +} + +// GetAllTxs returns all the transactions currently in the store. +func (txs *TxStore) GetAllTxs() []*WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wTxs := make([]*WrappedTx, len(txs.hashTxs)) + i := 0 + for _, wtx := range txs.hashTxs { + wTxs[i] = wtx + i++ + } + + return wTxs +} + +// GetTxBySender returns a *WrappedTx by the transaction's sender property +// defined by the ABCI application. +func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.senderTxs[sender] +} + +// GetTxByHash returns a *WrappedTx by the transaction's hash. +func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.hashTxs[hash] +} + +// IsTxRemoved returns true if a transaction by hash is marked as removed and +// false otherwise. +func (txs *TxStore) IsTxRemoved(hash [mempool.TxKeySize]byte) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx, ok := txs.hashTxs[hash] + if ok { + return wtx.removed + } + + return false +} + +// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a +// non-empty sender, we additionally store the transaction by the sender as +// defined by the ABCI application. +func (txs *TxStore) SetTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + txs.senderTxs[wtx.sender] = wtx + } + + txs.hashTxs[mempool.TxKey(wtx.tx)] = wtx +} + +// RemoveTx removes a *WrappedTx from the transaction store. It deletes all +// indexes of the transaction. +func (txs *TxStore) RemoveTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + delete(txs.senderTxs, wtx.sender) + } + + delete(txs.hashTxs, mempool.TxKey(wtx.tx)) + wtx.removed = true +} + +// TxHasPeer returns true if a transaction by hash has a given peer ID and false +// otherwise. If the transaction does not exist, false is returned. +func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return false + } + + _, ok := wtx.peers[peerID] + return ok +} + +// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the +// given peerID to the WrappedTx's set of peers that sent us this transaction. +// We return true if we've already recorded the given peer for this transaction +// and false otherwise. If the transaction does not exist by hash, we return +// (nil, false). +func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID uint16) (*WrappedTx, bool) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return nil, false + } + + if wtx.peers == nil { + wtx.peers = make(map[uint16]struct{}) + } + + if _, ok := wtx.peers[peerID]; ok { + return wtx, true + } + + wtx.peers[peerID] = struct{}{} + return wtx, false +} diff --git a/mempool/v1/tx_test.go b/mempool/v1/tx_test.go new file mode 100644 index 000000000..d4a0b26bf --- /dev/null +++ b/mempool/v1/tx_test.go @@ -0,0 +1,134 @@ +package v1 + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/mempool" +) + +func TestTxStore_GetTxBySender(t *testing.T) { + txs := NewTxStore() + wtx := &WrappedTx{ + tx: []byte("test_tx"), + sender: "foo", + priority: 1, + timestamp: time.Now(), + } + + res := txs.GetTxBySender(wtx.sender) + require.Nil(t, res) + + txs.SetTx(wtx) + + res = txs.GetTxBySender(wtx.sender) + require.NotNil(t, res) + require.Equal(t, wtx, res) +} + +func TestTxStore_GetTxByHash(t *testing.T) { + txs := NewTxStore() + wtx := &WrappedTx{ + tx: []byte("test_tx"), + sender: "foo", + priority: 1, + timestamp: time.Now(), + } + + key := mempool.TxKey(wtx.tx) + res := txs.GetTxByHash(key) + require.Nil(t, res) + + txs.SetTx(wtx) + + res = txs.GetTxByHash(key) + require.NotNil(t, res) + require.Equal(t, wtx, res) +} + +func TestTxStore_SetTx(t *testing.T) { + txs := NewTxStore() + wtx := &WrappedTx{ + tx: []byte("test_tx"), + priority: 1, + timestamp: time.Now(), + } + + key := mempool.TxKey(wtx.tx) + txs.SetTx(wtx) + + res := txs.GetTxByHash(key) + require.NotNil(t, res) + require.Equal(t, wtx, res) + + wtx.sender = "foo" + txs.SetTx(wtx) + + res = txs.GetTxByHash(key) + require.NotNil(t, res) + require.Equal(t, wtx, res) +} + +func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) { + txs := NewTxStore() + wtx := &WrappedTx{ + tx: []byte("test_tx"), + priority: 1, + timestamp: time.Now(), + } + + key := mempool.TxKey(wtx.tx) + txs.SetTx(wtx) + + res, ok := txs.GetOrSetPeerByTxHash(mempool.TxKey([]byte("test_tx_2")), 15) + require.Nil(t, res) + require.False(t, ok) + + res, ok = txs.GetOrSetPeerByTxHash(key, 15) + require.NotNil(t, res) + require.False(t, ok) + + res, ok = txs.GetOrSetPeerByTxHash(key, 15) + require.NotNil(t, res) + require.True(t, ok) + + require.True(t, txs.TxHasPeer(key, 15)) + require.False(t, txs.TxHasPeer(key, 16)) +} + +func TestTxStore_RemoveTx(t *testing.T) { + txs := NewTxStore() + wtx := &WrappedTx{ + tx: []byte("test_tx"), + priority: 1, + timestamp: time.Now(), + } + + txs.SetTx(wtx) + + key := mempool.TxKey(wtx.tx) + res := txs.GetTxByHash(key) + require.NotNil(t, res) + + txs.RemoveTx(res) + + res = txs.GetTxByHash(key) + require.Nil(t, res) +} + +func TestTxStore_Size(t *testing.T) { + txStore := NewTxStore() + numTxs := 1000 + + for i := 0; i < numTxs; i++ { + txStore.SetTx(&WrappedTx{ + tx: []byte(fmt.Sprintf("test_tx_%d", i)), + priority: int64(i), + timestamp: time.Now(), + }) + } + + require.Equal(t, numTxs, txStore.Size()) +} diff --git a/node/node.go b/node/node.go index 8e152e8a1..a4c427504 100644 --- a/node/node.go +++ b/node/node.go @@ -28,7 +28,7 @@ import ( "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/light" - mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" "github.com/tendermint/tendermint/privval" @@ -70,8 +70,8 @@ type Node struct { stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk bcReactor service.Service // for fast-syncing - mempoolReactor *mempl.Reactor // for gossipping transactions - mempool mempl.Mempool + mempoolReactor service.Service // for gossipping transactions + mempool mempool.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node @@ -253,9 +253,12 @@ func NewNode(config *cfg.Config, return nil, fmt.Errorf("failed to create router: %w", err) } - mpReactorShim, mpReactor, mempool := createMempoolReactor( + mpReactorShim, mpReactor, mp, err := createMempoolReactor( config, proxyApp, state, memplMetrics, peerManager, router, logger, ) + if err != nil { + return nil, err + } evReactorShim, evReactor, evPool, err := createEvidenceReactor( config, dbProvider, stateDB, blockStore, peerManager, router, logger, @@ -269,13 +272,13 @@ func NewNode(config *cfg.Config, stateStore, logger.With("module", "state"), proxyApp.Consensus(), - mempool, + mp, evPool, sm.BlockExecutorWithMetrics(smMetrics), ) csReactorShim, csReactor, csState := createConsensusReactor( - config, state, blockExec, blockStore, mempool, evPool, + config, state, blockExec, blockStore, mp, evPool, privValidator, csMetrics, stateSync || fastSync, eventBus, peerManager, router, consensusLogger, ) @@ -426,7 +429,7 @@ func NewNode(config *cfg.Config, blockStore: blockStore, bcReactor: bcReactor, mempoolReactor: mpReactor, - mempool: mempool, + mempool: mp, consensusState: csState, consensusReactor: csReactor, stateSyncReactor: stateSyncReactor, @@ -993,12 +996,12 @@ func (n *Node) ConsensusReactor() *cs.Reactor { } // MempoolReactor returns the Node's mempool reactor. -func (n *Node) MempoolReactor() *mempl.Reactor { +func (n *Node) MempoolReactor() service.Service { return n.mempoolReactor } // Mempool returns the Node's mempool. -func (n *Node) Mempool() mempl.Mempool { +func (n *Node) Mempool() mempool.Mempool { return n.mempool } @@ -1149,19 +1152,19 @@ func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { type Provider func(*cfg.Config, log.Logger) (*Node, error) // MetricsProvider returns a consensus, p2p and mempool Metrics. -type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) +type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) // DefaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { - return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { + return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) { if config.Prometheus { return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), - mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), + mempool.PrometheusMetrics(config.Namespace, "chain_id", chainID), sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) } - return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() + return cs.NopMetrics(), p2p.NopMetrics(), mempool.NopMetrics(), sm.NopMetrics() } } diff --git a/node/node_test.go b/node/node_test.go index 81e3c11e4..3679fbb17 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -24,7 +24,8 @@ import ( "github.com/tendermint/tendermint/evidence" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" - mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/mempool" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" "github.com/tendermint/tendermint/p2p" p2pmock "github.com/tendermint/tendermint/p2p/mock" "github.com/tendermint/tendermint/privval" @@ -226,16 +227,15 @@ func TestCreateProposalBlock(t *testing.T) { state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerAddr, _ := state.Validators.GetByIndex(0) - // Make Mempool - mempool := mempl.NewCListMempool( + mp := mempoolv0.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempl.WithMetrics(mempl.NopMetrics()), - mempl.WithPreCheck(sm.TxPreCheck(state)), - mempl.WithPostCheck(sm.TxPostCheck(state)), + mempoolv0.WithMetrics(mempool.NopMetrics()), + mempoolv0.WithPreCheck(sm.TxPreCheck(state)), + mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mempool.SetLogger(logger) + mp.SetLogger(logger) // Make EvidencePool evidenceDB := dbm.NewMemDB() @@ -262,7 +262,7 @@ func TestCreateProposalBlock(t *testing.T) { txLength := 100 for i := 0; i <= maxBytes/txLength; i++ { tx := tmrand.Bytes(txLength) - err := mempool.CheckTx(tx, nil, mempl.TxInfo{}) + err := mp.CheckTx(tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -270,7 +270,7 @@ func TestCreateProposalBlock(t *testing.T) { stateStore, logger, proxyApp.Consensus(), - mempool, + mp, evidencePool, ) @@ -317,27 +317,27 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mempool := mempl.NewCListMempool( + mp := mempoolv0.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempl.WithMetrics(mempl.NopMetrics()), - mempl.WithPreCheck(sm.TxPreCheck(state)), - mempl.WithPostCheck(sm.TxPostCheck(state)), + mempoolv0.WithMetrics(mempool.NopMetrics()), + mempoolv0.WithPreCheck(sm.TxPreCheck(state)), + mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mempool.SetLogger(logger) + mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) tx := tmrand.Bytes(txLength - 4) // to account for the varint - err = mempool.CheckTx(tx, nil, mempl.TxInfo{}) + err = mp.CheckTx(tx, nil, mempool.TxInfo{}) assert.NoError(t, err) blockExec := sm.NewBlockExecutor( stateStore, logger, proxyApp.Consensus(), - mempool, + mp, sm.EmptyEvidencePool{}, ) @@ -375,26 +375,26 @@ func TestMaxProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mempool := mempl.NewCListMempool( + mp := mempoolv0.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempl.WithMetrics(mempl.NopMetrics()), - mempl.WithPreCheck(sm.TxPreCheck(state)), - mempl.WithPostCheck(sm.TxPostCheck(state)), + mempoolv0.WithMetrics(mempool.NopMetrics()), + mempoolv0.WithPreCheck(sm.TxPreCheck(state)), + mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mempool.SetLogger(logger) + mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) tx := tmrand.Bytes(txLength - 6) // to account for the varint - err = mempool.CheckTx(tx, nil, mempl.TxInfo{}) + err = mp.CheckTx(tx, nil, mempool.TxInfo{}) assert.NoError(t, err) // now produce more txs than what a normal block can hold with 10 smaller txs // At the end of the test, only the single big tx should be added for i := 0; i < 10; i++ { tx := tmrand.Bytes(10) - err = mempool.CheckTx(tx, nil, mempl.TxInfo{}) + err = mp.CheckTx(tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -402,7 +402,7 @@ func TestMaxProposalBlockSize(t *testing.T) { stateStore, logger, proxyApp.Consensus(), - mempool, + mp, sm.EmptyEvidencePool{}, ) diff --git a/node/setup.go b/node/setup.go index 0d21b9171..51a35f7db 100644 --- a/node/setup.go +++ b/node/setup.go @@ -22,8 +22,10 @@ import ( "github.com/tendermint/tendermint/evidence" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmStrings "github.com/tendermint/tendermint/libs/strings" - mempl "github.com/tendermint/tendermint/mempool" + tmstrings "github.com/tendermint/tendermint/libs/strings" + "github.com/tendermint/tendermint/mempool" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" @@ -192,25 +194,14 @@ func createMempoolReactor( config *cfg.Config, proxyApp proxy.AppConns, state sm.State, - memplMetrics *mempl.Metrics, + memplMetrics *mempool.Metrics, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *mempl.Reactor, *mempl.CListMempool) { - - logger = logger.With("module", "mempool") - mempool := mempl.NewCListMempool( - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempl.WithMetrics(memplMetrics), - mempl.WithPreCheck(sm.TxPreCheck(state)), - mempl.WithPostCheck(sm.TxPostCheck(state)), - ) - - mempool.SetLogger(logger) +) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) { - channelShims := mempl.GetChannelShims(config.Mempool) + logger = logger.With("module", "mempool", "version", config.Mempool.Version) + channelShims := mempoolv0.GetChannelShims(config.Mempool) reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) var ( @@ -226,20 +217,63 @@ func createMempoolReactor( peerUpdates = reactorShim.PeerUpdates } - reactor := mempl.NewReactor( - logger, - config.Mempool, - peerManager, - mempool, - channels[mempl.MempoolChannel], - peerUpdates, - ) + switch config.Mempool.Version { + case cfg.MempoolV0: + mp := mempoolv0.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempoolv0.WithMetrics(memplMetrics), + mempoolv0.WithPreCheck(sm.TxPreCheck(state)), + mempoolv0.WithPostCheck(sm.TxPostCheck(state)), + ) - if config.Consensus.WaitForTxs() { - mempool.EnableTxsAvailable() - } + mp.SetLogger(logger) - return reactorShim, reactor, mempool + reactor := mempoolv0.NewReactor( + logger, + config.Mempool, + peerManager, + mp, + channels[mempool.MempoolChannel], + peerUpdates, + ) + + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } + + return reactorShim, reactor, mp, nil + + case cfg.MempoolV1: + mp := mempoolv1.NewTxMempool( + logger, + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempoolv1.WithMetrics(memplMetrics), + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) + + reactor := mempoolv1.NewReactor( + logger, + config.Mempool, + peerManager, + mp, + channels[mempool.MempoolChannel], + peerUpdates, + ) + + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } + + return reactorShim, reactor, mp, nil + + default: + return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", config.Mempool.Version) + } } func createEvidenceReactor( @@ -344,7 +378,7 @@ func createConsensusReactor( state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, - mempool *mempl.CListMempool, + mp mempool.Mempool, evidencePool *evidence.Pool, privValidator types.PrivValidator, csMetrics *cs.Metrics, @@ -360,7 +394,7 @@ func createConsensusReactor( state.Copy(), blockExec, blockStore, - mempool, + mp, evidencePool, cs.StateMetrics(csMetrics), ) @@ -408,7 +442,7 @@ func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{}, p2p.MConnTransportOptions{ MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers + - len(tmStrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")), + len(tmstrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")), ), }, ) @@ -445,7 +479,7 @@ func createPeerManager( } privatePeerIDs := make(map[p2p.NodeID]struct{}) - for _, id := range tmStrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") { + for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") { privatePeerIDs[p2p.NodeID(id)] = struct{}{} } @@ -461,7 +495,7 @@ func createPeerManager( } peers := []p2p.NodeAddress{} - for _, p := range tmStrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { return nil, fmt.Errorf("invalid peer address %q: %w", p, err) @@ -471,7 +505,7 @@ func createPeerManager( options.PersistentPeers = append(options.PersistentPeers, address.NodeID) } - for _, p := range tmStrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { return nil, fmt.Errorf("invalid peer address %q: %w", p, err) @@ -638,7 +672,7 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, sw *p2p.Switch, logger log.Logger) *pex.Reactor { reactorConfig := &pex.ReactorConfig{ - Seeds: tmStrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "), + Seeds: tmstrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "), SeedMode: config.Mode == cfg.ModeSeed, // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 // blocks assuming 10s blocks ~ 28 hours. @@ -711,7 +745,7 @@ func makeNodeInfo( byte(cs.DataChannel), byte(cs.VoteChannel), byte(cs.VoteSetBitsChannel), - byte(mempl.MempoolChannel), + byte(mempool.MempoolChannel), byte(evidence.EvidenceChannel), byte(statesync.SnapshotChannel), byte(statesync.ChunkChannel), diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index b34b3d192..2bb08a714 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -194,6 +194,8 @@ message ResponseCheckTx { int64 gas_used = 6 [json_name = "gas_used"]; repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; string codespace = 8; + string sender = 9; + int64 priority = 10; } message ResponseDeliverTx { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 54ddafd61..72ef54b32 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -434,7 +434,7 @@ func TestUnconfirmedTxs(t *testing.T) { assert.Equal(t, 1, res.Count) assert.Equal(t, 1, res.Total) - assert.Equal(t, mempool.TxsBytes(), res.TotalBytes) + assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) } @@ -466,7 +466,7 @@ func TestNumUnconfirmedTxs(t *testing.T) { assert.Equal(t, mempoolSize, res.Count) assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, mempool.TxsBytes(), res.TotalBytes) + assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) } mempool.Flush() diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 6bd138824..9e40429f8 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -142,7 +142,7 @@ func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*c return &ctypes.ResultUnconfirmedTxs{ Count: len(txs), Total: env.Mempool.Size(), - TotalBytes: env.Mempool.TxsBytes(), + TotalBytes: env.Mempool.SizeBytes(), Txs: txs}, nil } @@ -152,7 +152,7 @@ func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.Result return &ctypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), - TotalBytes: env.Mempool.TxsBytes()}, nil + TotalBytes: env.Mempool.SizeBytes()}, nil } // CheckTx checks the transaction without executing it. The transaction won't diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index d53cf09fb..08710f168 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -87,11 +87,12 @@ func TestApp_Tx(t *testing.T) { hash := tx.Hash() waitTime := 20 * time.Second + require.Eventuallyf(t, func() bool { txResp, err := client.Tx(ctx, hash, false) return err == nil && bytes.Equal(txResp.Tx, tx) }, waitTime, time.Second, - "submitted tx wasn't committed after %v", waitTime, + "submitted tx %X wasn't committed after %v", hash, waitTime, ) // NOTE: we don't test abci query of the light client diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index 3193b169d..e72b077c0 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -3,11 +3,12 @@ package checktx import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" - mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/mempool" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" "github.com/tendermint/tendermint/proxy" ) -var mempool mempl.Mempool +var mp mempool.Mempool func init() { app := kvstore.NewApplication() @@ -21,11 +22,11 @@ func init() { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mempool = mempl.NewCListMempool(cfg, appConnMem, 0) + mp = mempoolv0.NewCListMempool(cfg, appConnMem, 0) } func Fuzz(data []byte) int { - err := mempool.CheckTx(data, nil, mempl.TxInfo{}) + err := mp.CheckTx(data, nil, mempool.TxInfo{}) if err != nil { return 0 }