You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

346 lines
10 KiB

  1. package psql
  2. import (
  3. "context"
  4. "database/sql"
  5. "flag"
  6. "fmt"
  7. "io/ioutil"
  8. "log"
  9. "os"
  10. "os/signal"
  11. "testing"
  12. "time"
  13. "github.com/adlio/schema"
  14. "github.com/gogo/protobuf/proto"
  15. "github.com/ory/dockertest"
  16. "github.com/ory/dockertest/docker"
  17. "github.com/stretchr/testify/assert"
  18. "github.com/stretchr/testify/require"
  19. abci "github.com/tendermint/tendermint/abci/types"
  20. "github.com/tendermint/tendermint/internal/state/indexer"
  21. "github.com/tendermint/tendermint/types"
  22. // Register the Postgres database driver.
  23. _ "github.com/lib/pq"
  24. )
  25. // Verify that the type satisfies the EventSink interface.
  26. var _ indexer.EventSink = (*EventSink)(nil)
  27. var (
  28. doPauseAtExit = flag.Bool("pause-at-exit", false,
  29. "If true, pause the test until interrupted at shutdown, to allow debugging")
  30. // A hook that test cases can call to obtain the shared database instance
  31. // used for testing the sink. This is initialized in TestMain (see below).
  32. testDB func() *sql.DB
  33. )
  34. const (
  35. user = "postgres"
  36. password = "secret"
  37. port = "5432"
  38. dsn = "postgres://%s:%s@localhost:%s/%s?sslmode=disable"
  39. dbName = "postgres"
  40. chainID = "test-chainID"
  41. viewBlockEvents = "block_events"
  42. viewTxEvents = "tx_events"
  43. )
  44. func TestMain(m *testing.M) {
  45. flag.Parse()
  46. // Set up docker and start a container running PostgreSQL.
  47. pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL"))
  48. if err != nil {
  49. log.Fatalf("Creating docker pool: %v", err)
  50. }
  51. resource, err := pool.RunWithOptions(&dockertest.RunOptions{
  52. Repository: "postgres",
  53. Tag: "13",
  54. Env: []string{
  55. "POSTGRES_USER=" + user,
  56. "POSTGRES_PASSWORD=" + password,
  57. "POSTGRES_DB=" + dbName,
  58. "listen_addresses = '*'",
  59. },
  60. ExposedPorts: []string{port},
  61. }, func(config *docker.HostConfig) {
  62. // set AutoRemove to true so that stopped container goes away by itself
  63. config.AutoRemove = true
  64. config.RestartPolicy = docker.RestartPolicy{
  65. Name: "no",
  66. }
  67. })
  68. if err != nil {
  69. log.Fatalf("Starting docker pool: %v", err)
  70. }
  71. if *doPauseAtExit {
  72. log.Print("Pause at exit is enabled, containers will not expire")
  73. } else {
  74. const expireSeconds = 60
  75. _ = resource.Expire(expireSeconds)
  76. log.Printf("Container expiration set to %d seconds", expireSeconds)
  77. }
  78. // Connect to the database, clear any leftover data, and install the
  79. // indexing schema.
  80. conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName)
  81. var db *sql.DB
  82. if err := pool.Retry(func() error {
  83. sink, err := NewEventSink(conn, chainID)
  84. if err != nil {
  85. return err
  86. }
  87. db = sink.DB() // set global for test use
  88. return db.Ping()
  89. }); err != nil {
  90. log.Fatalf("Connecting to database: %v", err)
  91. }
  92. if err := resetDatabase(db); err != nil {
  93. log.Fatalf("Flushing database: %v", err)
  94. }
  95. sm, err := readSchema()
  96. if err != nil {
  97. log.Fatalf("Reading schema: %v", err)
  98. }
  99. migrator := schema.NewMigrator()
  100. if err := migrator.Apply(db, sm); err != nil {
  101. log.Fatalf("Applying schema: %v", err)
  102. }
  103. // Set up the hook for tests to get the shared database handle.
  104. testDB = func() *sql.DB { return db }
  105. // Run the selected test cases.
  106. code := m.Run()
  107. // Clean up and shut down the database container.
  108. if *doPauseAtExit {
  109. log.Print("Testing complete, pausing for inspection. Send SIGINT to resume teardown")
  110. waitForInterrupt()
  111. log.Print("(resuming)")
  112. }
  113. log.Print("Shutting down database")
  114. if err := pool.Purge(resource); err != nil {
  115. log.Printf("WARNING: Purging pool failed: %v", err)
  116. }
  117. if err := db.Close(); err != nil {
  118. log.Printf("WARNING: Closing database failed: %v", err)
  119. }
  120. os.Exit(code)
  121. }
  122. func TestType(t *testing.T) {
  123. psqlSink := &EventSink{store: testDB(), chainID: chainID}
  124. assert.Equal(t, indexer.PSQL, psqlSink.Type())
  125. }
  126. func TestIndexing(t *testing.T) {
  127. t.Run("IndexBlockEvents", func(t *testing.T) {
  128. indexer := &EventSink{store: testDB(), chainID: chainID}
  129. require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader()))
  130. verifyBlock(t, 1)
  131. verifyBlock(t, 2)
  132. verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(1) })
  133. verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) })
  134. verifyNotImplemented(t, "block search", func() (bool, error) {
  135. v, err := indexer.SearchBlockEvents(context.Background(), nil)
  136. return v != nil, err
  137. })
  138. require.NoError(t, verifyTimeStamp(tableBlocks))
  139. // Attempting to reindex the same events should gracefully succeed.
  140. require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader()))
  141. })
  142. t.Run("IndexTxEvents", func(t *testing.T) {
  143. indexer := &EventSink{store: testDB(), chainID: chainID}
  144. txResult := txResultWithEvents([]abci.Event{
  145. makeIndexedEvent("account.number", "1"),
  146. makeIndexedEvent("account.owner", "Ivan"),
  147. makeIndexedEvent("account.owner", "Yulieta"),
  148. {Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}},
  149. })
  150. require.NoError(t, indexer.IndexTxEvents([]*abci.TxResult{txResult}))
  151. txr, err := loadTxResult(types.Tx(txResult.Tx).Hash())
  152. require.NoError(t, err)
  153. assert.Equal(t, txResult, txr)
  154. require.NoError(t, verifyTimeStamp(tableTxResults))
  155. require.NoError(t, verifyTimeStamp(viewTxEvents))
  156. verifyNotImplemented(t, "getTxByHash", func() (bool, error) {
  157. txr, err := indexer.GetTxByHash(types.Tx(txResult.Tx).Hash())
  158. return txr != nil, err
  159. })
  160. verifyNotImplemented(t, "tx search", func() (bool, error) {
  161. txr, err := indexer.SearchTxEvents(context.Background(), nil)
  162. return txr != nil, err
  163. })
  164. // try to insert the duplicate tx events.
  165. err = indexer.IndexTxEvents([]*abci.TxResult{txResult})
  166. require.NoError(t, err)
  167. })
  168. }
  169. func TestStop(t *testing.T) {
  170. indexer := &EventSink{store: testDB()}
  171. require.NoError(t, indexer.Stop())
  172. }
  173. // newTestBlockHeader constructs a fresh copy of a block header containing
  174. // known test values to exercise the indexer.
  175. func newTestBlockHeader() types.EventDataNewBlockHeader {
  176. return types.EventDataNewBlockHeader{
  177. Header: types.Header{Height: 1},
  178. ResultBeginBlock: abci.ResponseBeginBlock{
  179. Events: []abci.Event{
  180. makeIndexedEvent("begin_event.proposer", "FCAA001"),
  181. makeIndexedEvent("thingy.whatzit", "O.O"),
  182. },
  183. },
  184. ResultEndBlock: abci.ResponseEndBlock{
  185. Events: []abci.Event{
  186. makeIndexedEvent("end_event.foo", "100"),
  187. makeIndexedEvent("thingy.whatzit", "-.O"),
  188. },
  189. },
  190. }
  191. }
  192. // readSchema loads the indexing database schema file
  193. func readSchema() ([]*schema.Migration, error) {
  194. const filename = "schema.sql"
  195. contents, err := ioutil.ReadFile(filename)
  196. if err != nil {
  197. return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err)
  198. }
  199. return []*schema.Migration{{
  200. ID: time.Now().Local().String() + " db schema",
  201. Script: string(contents),
  202. }}, nil
  203. }
  204. // resetDB drops all the data from the test database.
  205. func resetDatabase(db *sql.DB) error {
  206. _, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`)
  207. if err != nil {
  208. return fmt.Errorf("dropping tables: %v", err)
  209. }
  210. _, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`)
  211. if err != nil {
  212. return fmt.Errorf("dropping views: %v", err)
  213. }
  214. return nil
  215. }
  216. // txResultWithEvents constructs a fresh transaction result with fixed values
  217. // for testing, that includes the specified events.
  218. func txResultWithEvents(events []abci.Event) *abci.TxResult {
  219. return &abci.TxResult{
  220. Height: 1,
  221. Index: 0,
  222. Tx: types.Tx("HELLO WORLD"),
  223. Result: abci.ResponseDeliverTx{
  224. Data: []byte{0},
  225. Code: abci.CodeTypeOK,
  226. Log: "",
  227. Events: events,
  228. },
  229. }
  230. }
  231. func loadTxResult(hash []byte) (*abci.TxResult, error) {
  232. hashString := fmt.Sprintf("%X", hash)
  233. var resultData []byte
  234. if err := testDB().QueryRow(`
  235. SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1;
  236. `, hashString).Scan(&resultData); err != nil {
  237. return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err)
  238. }
  239. txr := new(abci.TxResult)
  240. if err := proto.Unmarshal(resultData, txr); err != nil {
  241. return nil, fmt.Errorf("unmarshaling txr: %v", err)
  242. }
  243. return txr, nil
  244. }
  245. func verifyTimeStamp(tableName string) error {
  246. return testDB().QueryRow(fmt.Sprintf(`
  247. SELECT DISTINCT %[1]s.created_at
  248. FROM %[1]s
  249. WHERE %[1]s.created_at >= $1;
  250. `, tableName), time.Now().Add(-2*time.Second)).Err()
  251. }
  252. func verifyBlock(t *testing.T, height int64) {
  253. // Check that the blocks table contains an entry for this height.
  254. if err := testDB().QueryRow(`
  255. SELECT height FROM `+tableBlocks+` WHERE height = $1;
  256. `, height).Err(); err == sql.ErrNoRows {
  257. t.Errorf("No block found for height=%d", height)
  258. } else if err != nil {
  259. t.Fatalf("Database query failed: %v", err)
  260. }
  261. // Verify the presence of begin_block and end_block events.
  262. if err := testDB().QueryRow(`
  263. SELECT type, height, chain_id FROM `+viewBlockEvents+`
  264. WHERE height = $1 AND type = $2 AND chain_id = $3;
  265. `, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows {
  266. t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height)
  267. } else if err != nil {
  268. t.Fatalf("Database query failed: %v", err)
  269. }
  270. if err := testDB().QueryRow(`
  271. SELECT type, height, chain_id FROM `+viewBlockEvents+`
  272. WHERE height = $1 AND type = $2 AND chain_id = $3;
  273. `, height, types.EventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows {
  274. t.Errorf("No %q event found for height=%d", types.EventTypeEndBlock, height)
  275. } else if err != nil {
  276. t.Fatalf("Database query failed: %v", err)
  277. }
  278. }
  279. // verifyNotImplemented calls f and verifies that it returns both a
  280. // false-valued flag and a non-nil error whose string matching the expected
  281. // "not supported" message with label prefixed.
  282. func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) {
  283. t.Helper()
  284. t.Logf("Verifying that %q reports it is not implemented", label)
  285. want := label + " is not supported via the postgres event sink"
  286. ok, err := f()
  287. assert.False(t, ok)
  288. require.NotNil(t, err)
  289. assert.Equal(t, want, err.Error())
  290. }
  291. // waitForInterrupt blocks until a SIGINT is received by the process.
  292. func waitForInterrupt() {
  293. ch := make(chan os.Signal, 1)
  294. signal.Notify(ch, os.Interrupt)
  295. <-ch
  296. }