From 52ed9944162012a5ad44fa611f263545757d6021 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 12 Oct 2021 16:49:45 -0400 Subject: [PATCH] test: cleanup rpc/client and node test fixtures (#7112) --- node/node.go | 14 +- node/node_test.go | 94 +- rpc/client/event_test.go | 200 +--- rpc/client/examples_test.go | 112 +- rpc/client/http/http.go | 11 +- rpc/client/main_test.go | 7 +- rpc/client/rpc_test.go | 1319 ++++++++++++------------ rpc/jsonrpc/client/http_json_client.go | 3 +- 8 files changed, 828 insertions(+), 932 deletions(-) diff --git a/node/node.go b/node/node.go index 87e9b0dbd..6092f4c43 100644 --- a/node/node.go +++ b/node/node.go @@ -641,12 +641,16 @@ func (n *nodeImpl) OnStop() { n.Logger.Info("Stopping Node") - // first stop the non-reactor services - if err := n.eventBus.Stop(); err != nil { - n.Logger.Error("Error closing eventBus", "err", err) + if n.eventBus != nil { + // first stop the non-reactor services + if err := n.eventBus.Stop(); err != nil { + n.Logger.Error("Error closing eventBus", "err", err) + } } - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) + if n.indexerService != nil { + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } } if n.config.Mode != config.ModeSeed { diff --git a/node/node_test.go b/node/node_test.go index 61fd3fa21..d5ea39aa6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -31,6 +31,7 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -46,11 +47,21 @@ func TestNodeStartStop(t *testing.T) { require.NoError(t, err) require.NoError(t, ns.Start()) + t.Cleanup(func() { + if ns.IsRunning() { + assert.NoError(t, ns.Stop()) + ns.Wait() + } + }) + n, ok := ns.(*nodeImpl) require.True(t, ok) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // wait for the node to produce a block - blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock) + blocksSub, err := n.EventBus().Subscribe(ctx, "node_test", types.EventQueryNewBlock) require.NoError(t, err) select { case <-blocksSub.Out(): @@ -87,6 +98,14 @@ func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl n, ok := ns.(*nodeImpl) require.True(t, ok) + + t.Cleanup(func() { + if ns.IsRunning() { + assert.NoError(t, ns.Stop()) + ns.Wait() + } + }) + return n } @@ -100,7 +119,6 @@ func TestNodeDelayedStart(t *testing.T) { n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) require.NoError(t, n.Start()) - defer n.Stop() //nolint:errcheck // ignore for tests startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) @@ -165,8 +183,13 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addrNoPrefix - _, err := newDefaultNode(cfg, log.TestingLogger()) + n, err := newDefaultNode(cfg, log.TestingLogger()) assert.Error(t, err) + + if n != nil && n.IsRunning() { + assert.NoError(t, n.Stop()) + n.Wait() + } } func TestNodeSetPrivValIPC(t *testing.T) { @@ -211,6 +234,9 @@ func testFreeAddr(t *testing.T) string { // create a proposal block using real and full // mempool and evidence pool and validate it. func TestCreateProposalBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) @@ -222,7 +248,7 @@ func TestCreateProposalBlock(t *testing.T) { logger := log.TestingLogger() const height int64 = 1 - state, stateDB, privVals := state(1, height) + state, stateDB, privVals := state(t, 1, height) stateStore := sm.NewStore(stateDB) maxBytes := 16384 const partSize uint32 = 256 @@ -266,7 +292,7 @@ func TestCreateProposalBlock(t *testing.T) { txLength := 100 for i := 0; i <= maxBytes/txLength; i++ { tx := tmrand.Bytes(txLength) - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err := mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -303,6 +329,9 @@ func TestCreateProposalBlock(t *testing.T) { } func TestMaxTxsProposalBlockSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) @@ -314,7 +343,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { logger := log.TestingLogger() const height int64 = 1 - state, stateDB, _ := state(1, height) + state, stateDB, _ := state(t, 1, height) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 16384 @@ -336,7 +365,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) tx := tmrand.Bytes(txLength - 4) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) blockExec := sm.NewBlockExecutor( @@ -365,6 +394,9 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { } func TestMaxProposalBlockSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) @@ -375,7 +407,7 @@ func TestMaxProposalBlockSize(t *testing.T) { logger := log.TestingLogger() - state, stateDB, _ := state(types.MaxVotesCount, int64(1)) + state, stateDB, _ := state(t, types.MaxVotesCount, int64(1)) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 1024 * 1024 * 2 @@ -402,7 +434,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // At the end of the test, only the single big tx should be added for i := 0; i < 10; i++ { tx := tmrand.Bytes(10) - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -493,14 +525,17 @@ func TestNodeNewSeedNode(t *testing.T) { defaultGenesisDocProviderFunc(cfg), log.TestingLogger(), ) + require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) err = n.Start() require.NoError(t, err) - assert.True(t, n.pexReactor.IsRunning()) + + require.NoError(t, n.Stop()) + } func TestNodeSetEventSink(t *testing.T) { @@ -511,7 +546,7 @@ func TestNodeSetEventSink(t *testing.T) { setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { eventBus, err := createAndStartEventBus(logger) require.NoError(t, err) - + t.Cleanup(func() { require.NoError(t, eventBus.Stop()) }) genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) @@ -521,6 +556,22 @@ func TestNodeSetEventSink(t *testing.T) { t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) return eventSinks } + cleanup := func(ns service.Service) func() { + return func() { + n, ok := ns.(*nodeImpl) + if !ok { + return + } + if n == nil { + return + } + if !n.IsRunning() { + return + } + assert.NoError(t, n.Stop()) + n.Wait() + } + } eventSinks := setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) @@ -542,6 +593,7 @@ func TestNodeSetEventSink(t *testing.T) { ns, err := newDefaultNode(cfg, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("unsupported event sink type"), err) + t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{} eventSinks = setupTest(t, cfg) @@ -553,6 +605,7 @@ func TestNodeSetEventSink(t *testing.T) { ns, err = newDefaultNode(cfg, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) + t.Cleanup(cleanup(ns)) var psqlConn = "test" @@ -591,18 +644,21 @@ func TestNodeSetEventSink(t *testing.T) { var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"} cfg.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(cfg, logger) require.Error(t, err) assert.Equal(t, e, err) + t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} cfg.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(cfg, logger) require.Error(t, err) assert.Equal(t, e, err) + t.Cleanup(cleanup(ns)) } -func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { +func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { + t.Helper() privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { @@ -623,17 +679,15 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() + t.Cleanup(func() { require.NoError(t, stateDB.Close()) }) + stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) for i := 1; i < int(height); i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) } return s, stateDB, privVals } diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 54d19a201..3b91de107 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "fmt" - "reflect" "testing" "time" @@ -17,7 +16,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var waitForEventTimeout = 8 * time.Second +const waitForEventTimeout = 2 * time.Second // MakeTxKV returns a text transaction, allong with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { @@ -26,164 +25,41 @@ func MakeTxKV() ([]byte, []byte, []byte) { return k, v, append(k, append([]byte("="), v...)...) } -func TestHeaderEvents(t *testing.T) { - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - i, c := i, c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - _, ok := evt.(types.EventDataNewBlockHeader) - require.True(t, ok, "%d: %#v", i, evt) - // TODO: more checks... - }) - } -} - -// subscribe to new blocks and make sure height increments by 1 -func TestBlockEvents(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - const subscriber = "TestBlockEvents" - - eventCh, err := c.Subscribe(context.Background(), subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) - require.NoError(t, err) - t.Cleanup(func() { - if err := c.UnsubscribeAll(context.Background(), subscriber); err != nil { - t.Error(err) - } - }) - - var firstBlockHeight int64 - for i := int64(0); i < 3; i++ { - event := <-eventCh - blockEvent, ok := event.Data.(types.EventDataNewBlock) - require.True(t, ok) - - block := blockEvent.Block - - if firstBlockHeight == 0 { - firstBlockHeight = block.Header.Height - } - - require.Equal(t, firstBlockHeight+i, block.Header.Height) - } - }) - } -} - -func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "async") } -func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } - -func testTxEventsSent(t *testing.T, broadcastMethod string) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - // make the tx - _, _, tx := MakeTxKV() - - // send - go func() { - var ( - txres *coretypes.ResultBroadcastTx - err error - ctx = context.Background() - ) - switch broadcastMethod { - case "async": - txres, err = c.BroadcastTxAsync(ctx, tx) - case "sync": - txres, err = c.BroadcastTxSync(ctx, tx) - default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) - } - if assert.NoError(t, err) { - assert.Equal(t, txres.Code, abci.CodeTypeOK) - } - }() - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) - require.Nil(t, err) - - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok) - - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} - -// Test HTTPClient resubscribes upon disconnect && subscription error. -// Test Local client resubscribes upon subscription error. -func TestClientsResubscribe(t *testing.T) { - // TODO(melekes) -} - -func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // on Subscribe - _, err := c.Subscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on Unsubscribe - err = c.Unsubscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on UnsubscribeAll - err = c.UnsubscribeAll(ctx, "TestHeaderEvents") - assert.Error(t, err) +func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, c client.Client) { + // make the tx + _, _, tx := MakeTxKV() + + // send + done := make(chan struct{}) + go func() { + defer close(done) + var ( + txres *coretypes.ResultBroadcastTx + err error + ) + switch broadcastMethod { + case "async": + txres, err = c.BroadcastTxAsync(ctx, tx) + case "sync": + txres, err = c.BroadcastTxSync(ctx, tx) + default: + panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) + } + if assert.NoError(t, err) { + assert.Equal(t, txres.Code, abci.CodeTypeOK) + } + }() + + // and wait for confirmation + evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) + require.Nil(t, err) + + // and make sure it has the proper info + txe, ok := evt.(types.EventDataTx) + require.True(t, ok) + + // make sure this is the proper tx + require.EqualValues(t, tx, txe.Tx) + require.True(t, txe.Result.IsOK()) + <-done } diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 1acc29c11..38fb4fcf7 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -3,16 +3,20 @@ package client_test import ( "bytes" "context" - "fmt" "log" + "net/http" + "testing" + "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/rpc/coretypes" rpctest "github.com/tendermint/tendermint/rpc/test" ) -func ExampleHTTP_simple() { +func TestHTTPSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -29,9 +33,7 @@ func ExampleHTTP_simple() { // Create our RPC client rpcAddr := conf.RPC.ListenAddress c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) // Create a transaction k := []byte("name") @@ -41,6 +43,7 @@ func ExampleHTTP_simple() { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). bres, err := c.BroadcastTxCommit(context.Background(), tx) + require.NoError(t, err) if err != nil { log.Fatal(err) } @@ -50,30 +53,19 @@ func ExampleHTTP_simple() { // Now try to fetch the value for the key qres, err := c.ABCIQuery(context.Background(), "/key", k) - if err != nil { - log.Fatal(err) - } - if qres.Response.IsErr() { - log.Fatal("ABCIQuery failed") - } - if !bytes.Equal(qres.Response.Key, k) { - log.Fatal("returned key does not match queried key") - } - if !bytes.Equal(qres.Response.Value, v) { - log.Fatal("returned value does not match sent value") - } - - fmt.Println("Sent tx :", string(tx)) - fmt.Println("Queried for :", string(qres.Response.Key)) - fmt.Println("Got value :", string(qres.Response.Value)) - - // Output: - // Sent tx : name=satoshi - // Queried for : name - // Got value : satoshi + require.NoError(t, err) + require.False(t, qres.Response.IsErr(), "ABCIQuery failed") + require.True(t, bytes.Equal(qres.Response.Key, k), + "returned key does not match queried key") + require.True(t, bytes.Equal(qres.Response.Value, v), + "returned value does not match sent value [%s]", string(v)) + + assert.Equal(t, "name=satoshi", string(tx), "sent tx") + assert.Equal(t, "name", string(qres.Response.Key), "queried for") + assert.Equal(t, "satoshi", string(qres.Response.Value), "got value") } -func ExampleHTTP_batching() { +func TestHTTPBatching(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -88,10 +80,8 @@ func ExampleHTTP_batching() { defer func() { _ = closer(ctx) }() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) + require.NoError(t, err) // Create our two transactions k1 := []byte("firstName") @@ -111,41 +101,51 @@ func ExampleHTTP_batching() { for _, tx := range txs { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil { - log.Fatal(err) - } + _, err := batch.BroadcastTxSync(ctx, tx) + require.NoError(t, err) } // Send the batch of 2 transactions - if _, err := batch.Send(context.Background()); err != nil { - log.Fatal(err) - } - - // Now let's query for the original results as a batch - keys := [][]byte{k1, k2} - for _, key := range keys { - if _, err := batch.ABCIQuery(context.Background(), "/key", key); err != nil { - log.Fatal(err) - } - } + _, err = batch.Send(ctx) + require.NoError(t, err) + + // wait for the transaction to land, we could poll more for + // the transactions to land definitively. + require.Eventually(t, + func() bool { + // Now let's query for the original results as a batch + exists := 0 + for _, key := range [][]byte{k1, k2} { + _, err := batch.ABCIQuery(context.Background(), "/key", key) + if err == nil { + exists++ + + } + } + return exists == 2 + }, + 10*time.Second, + time.Second, + ) // Send the 2 queries and keep the results - results, err := batch.Send(context.Background()) - if err != nil { - log.Fatal(err) - } + results, err := batch.Send(ctx) + require.NoError(t, err) + require.Len(t, results, 2) // Each result in the returned list is the deserialized result of each // respective ABCIQuery response for _, result := range results { qr, ok := result.(*coretypes.ResultABCIQuery) - if !ok { - log.Fatal("invalid result type from ABCIQuery request") + require.True(t, ok, "invalid result type from ABCIQuery request") + + switch string(qr.Response.Key) { + case "firstName": + require.Equal(t, "satoshi", string(qr.Response.Value)) + case "lastName": + require.Equal(t, "nakamoto", string(qr.Response.Value)) + default: + t.Fatalf("encountered unknown key %q", string(qr.Response.Key)) } - fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) } - - // Output: - // firstName = satoshi - // lastName = nakamoto } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index d0c1d5621..5bd7b398a 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -2,6 +2,7 @@ package http import ( "context" + "errors" "net/http" "time" @@ -120,20 +121,20 @@ func NewWithTimeout(remote string, t time.Duration) (*HTTP, error) { } // NewWithClient allows you to set a custom http client. An error is returned -// on invalid remote. The function panics when client is nil. +// on invalid remote. The function returns an error when client is nil +// or an invalid remote. func NewWithClient(remote string, c *http.Client) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } return NewWithClientAndWSOptions(remote, c, DefaultWSOptions()) } // NewWithClientAndWSOptions allows you to set a custom http client and -// WebSocket options. An error is returned on invalid remote. The function -// panics when client is nil. +// WebSocket options. An error is returned on invalid remote or nil client. func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } rpc, err := jsonrpcclient.NewWithHTTPClient(remote, c) if err != nil { diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index 4e2c0405c..c2e0dc3cd 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -7,6 +7,7 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" @@ -30,9 +31,11 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) require.NoError(t, err) t.Cleanup(func() { - _ = closer(ctx) cancel() - app.Close() + assert.NoError(t, node.Stop()) + assert.NoError(t, closer(ctx)) + assert.NoError(t, app.Close()) + node.Wait() _ = os.RemoveAll(dir) }) return node, conf diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 8a6845831..d7554d174 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -33,10 +33,16 @@ func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) + return c } @@ -44,10 +50,18 @@ func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Du t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.NewWithTimeout(rpcAddr, timeout) + + http.DefaultClient.Timeout = timeout + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + http.DefaultClient.Timeout = 0 + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) return c } @@ -63,382 +77,433 @@ func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client. require.NoError(t, err) return []client.Client{ - getHTTPClient(t, conf), ncl, + getHTTPClient(t, conf), } } -func TestNilCustomHTTPClient(t *testing.T) { - require.Panics(t, func() { - _, _ = rpchttp.NewWithClient("http://example.com", nil) - }) - require.Panics(t, func() { - _, _ = rpcclient.NewWithHTTPClient("http://example.com", nil) - }) -} - -func TestParseInvalidAddress(t *testing.T) { - _, conf := NodeSuite(t) - // should remove trailing / - invalidRemote := conf.RPC.ListenAddress + "/" - _, err := rpchttp.New(invalidRemote) - require.NoError(t, err) -} - -func TestCustomHTTPClient(t *testing.T) { - _, conf := NodeSuite(t) - remote := conf.RPC.ListenAddress - c, err := rpchttp.NewWithClient(remote, http.DefaultClient) - require.Nil(t, err) - status, err := c.Status(context.Background()) - require.NoError(t, err) - require.NotNil(t, status) -} - -func TestCorsEnabled(t *testing.T) { - _, conf := NodeSuite(t) - origin := conf.RPC.CORSAllowedOrigins[0] - remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") - - req, err := http.NewRequest("GET", remote, nil) - require.Nil(t, err, "%+v", err) - req.Header.Set("Origin", origin) - c := &http.Client{} - resp, err := c.Do(req) - require.Nil(t, err, "%+v", err) - defer resp.Body.Close() - - assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) -} - -// Make sure status is correct (we connect properly) -func TestStatus(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - moniker := conf.Moniker - status, err := c.Status(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.Equal(t, moniker, status.NodeInfo.Moniker) - } -} - -// Make sure info is correct (we connect properly) -func TestInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // status, err := c.Status() - // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // TODO: this is not correct - fix merkleeyes! - // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) - assert.True(t, strings.Contains(info.Response.Data, "size")) - } -} - -func TestNetInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) - } -} - -func TestDumpConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - assert.Empty(t, cons.Peers) - } -} - -func TestConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - } -} - -func TestHealth(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - _, err := nc.Health(ctx) - require.Nil(t, err, "%d: %+v", i, err) - } -} - -func TestGenesisAndValidators(t *testing.T) { +func TestClientOperations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - - // make sure this is the right genesis file - gen, err := c.Genesis(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) - gval := gen.Genesis.Validators[0] - - // get the current validators - h := int64(1) - vals, err := c.Validators(ctx, &h, nil, nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - require.Equal(t, 1, vals.Count) - require.Equal(t, 1, vals.Total) - val := vals.Validators[0] - - // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) - assert.Equal(t, gval.PubKey, val.PubKey) - } -} - -func TestGenesisChunked(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + _, conf := NodeSuite(t) - n, conf := NodeSuite(t) + t.Run("NilCustomHTTPClient", func(t *testing.T) { + _, err := rpchttp.NewWithClient("http://example.com", nil) + require.Error(t, err) - for _, c := range GetClients(t, n, conf) { - first, err := c.GenesisChunked(ctx, 0) + _, err = rpcclient.NewWithHTTPClient("http://example.com", nil) + require.Error(t, err) + }) + t.Run("ParseInvalidAddress", func(t *testing.T) { + // should remove trailing / + invalidRemote := conf.RPC.ListenAddress + "/" + _, err := rpchttp.New(invalidRemote) require.NoError(t, err) - - decoded := make([]string, 0, first.TotalChunks) - for i := 0; i < first.TotalChunks; i++ { - chunk, err := c.GenesisChunked(ctx, uint(i)) + }) + t.Run("CustomHTTPClient", func(t *testing.T) { + remote := conf.RPC.ListenAddress + c, err := rpchttp.NewWithClient(remote, http.DefaultClient) + require.Nil(t, err) + status, err := c.Status(ctx) + require.NoError(t, err) + require.NotNil(t, status) + }) + t.Run("CorsEnabled", func(t *testing.T) { + origin := conf.RPC.CORSAllowedOrigins[0] + remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") + + req, err := http.NewRequestWithContext(ctx, "GET", remote, nil) + require.Nil(t, err, "%+v", err) + req.Header.Set("Origin", origin) + resp, err := http.DefaultClient.Do(req) + require.Nil(t, err, "%+v", err) + defer resp.Body.Close() + + assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) + }) + t.Run("Batching", func(t *testing.T) { + t.Run("JSONRPCCalls", func(t *testing.T) { + c := getHTTPClient(t, conf) + testBatchedJSONRPCCalls(ctx, t, c) + }) + t.Run("JSONRPCCallsCancellation", func(t *testing.T) { + _, _, tx1 := MakeTxKV() + _, _, tx2 := MakeTxKV() + + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.BroadcastTxCommit(ctx, tx1) require.NoError(t, err) - data, err := base64.StdEncoding.DecodeString(chunk.Data) + _, err = batch.BroadcastTxCommit(ctx, tx2) require.NoError(t, err) - decoded = append(decoded, string(data)) - - } - doc := []byte(strings.Join(decoded, "")) - - var out types.GenesisDoc - require.NoError(t, tmjson.Unmarshal(doc, &out), - "first: %+v, doc: %s", first, string(doc)) - } -} - -func TestABCIQuery(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - apph := bres.Height + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - res, err := c.ABCIQuery(ctx, "/key", k) - qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { - assert.EqualValues(t, v, qres.Value) - } - } + // we should have 2 requests waiting + require.Equal(t, 2, batch.Count()) + // we want to make sure we cleared 2 pending requests + require.Equal(t, 2, batch.Clear()) + // now there should be no batched requests + require.Equal(t, 0, batch.Count()) + }) + t.Run("SendingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.Send(ctx) + require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") + }) + t.Run("ClearingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") + }) + t.Run("ConcurrentJSONRPC", func(t *testing.T) { + var wg sync.WaitGroup + c := getHTTPClient(t, conf) + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testBatchedJSONRPCCalls(ctx, t, c) + }() + } + wg.Wait() + }) + }) + t.Run("HTTPReturnsErrorIfClientIsNotRunning", func(t *testing.T) { + c := getHTTPClientWithTimeout(t, conf, 100*time.Millisecond) + + // on Subscribe + _, err := c.Subscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on Unsubscribe + err = c.Unsubscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on UnsubscribeAll + err = c.UnsubscribeAll(ctx, "TestHeaderEvents") + assert.Error(t, err) + }) } -// Make some app checks -func TestAppCalls(t *testing.T) { +// Make sure info is correct (we connect properly) +func TestClientMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - - // get an offset of height to avoid racing and guessing - s, err := c.Status(ctx) - require.NoError(t, err) - // sh is start height or status height - sh := s.SyncInfo.LatestBlockHeight - - // look for the future - h := sh + 20 - _, err = c.Block(ctx, &h) - require.Error(t, err) // no block yet - - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - require.True(t, bres.DeliverTx.IsOK()) - txh := bres.Height - apph := txh + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - - _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) - require.NoError(t, err) - qres := _qres.Response - if assert.True(t, qres.IsOK()) { - assert.Equal(t, k, qres.Key) - assert.EqualValues(t, v, qres.Value) - } - - // make sure we can lookup the tx with proof - ptx, err := c.Tx(ctx, bres.Hash, true) - require.NoError(t, err) - assert.EqualValues(t, txh, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - - // and we can even check the block is added - block, err := c.Block(ctx, &apph) - require.NoError(t, err) - appHash := block.Block.Header.AppHash - assert.True(t, len(appHash) > 0) - assert.EqualValues(t, apph, block.Block.Header.Height) - - blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) - require.NoError(t, err) - require.Equal(t, block, blockByHash) - - // now check the results - blockResults, err := c.BlockResults(ctx, &txh) - require.NoError(t, err, "%d: %+v", i, err) - assert.Equal(t, txh, blockResults.Height) - if assert.Equal(t, 1, len(blockResults.TxsResults)) { - // check success code - assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) - } - - // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(ctx, apph, apph) - require.NoError(t, err) - assert.True(t, info.LastHeight >= apph) - if assert.Equal(t, 1, len(info.BlockMetas)) { - lastMeta := info.BlockMetas[0] - assert.EqualValues(t, apph, lastMeta.Header.Height) - blockData := block.Block - assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) - assert.Equal(t, block.BlockID, lastMeta.BlockID) - } - - // and get the corresponding commit with the same apphash - commit, err := c.Commit(ctx, &apph) - require.NoError(t, err) - cappHash := commit.Header.AppHash - assert.Equal(t, appHash, cappHash) - assert.NotNil(t, commit.Commit) - - // compare the commits (note Commit(2) has commit from Block(3)) - h = apph - 1 - commit2, err := c.Commit(ctx, &h) - require.NoError(t, err) - assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) - - // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) - require.NoError(t, err) - pres := _pres.Response - assert.True(t, pres.IsOK()) - - // XXX Test proof - } -} - -func TestBlockchainInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) + pool := getMempool(t, n) for i, c := range GetClients(t, n, conf) { - err := client.WaitForHeight(c, 10, nil) - require.NoError(t, err) + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + t.Run("Status", func(t *testing.T) { + status, err := c.Status(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.Equal(t, conf.Moniker, status.NodeInfo.Moniker) + }) + t.Run("Info", func(t *testing.T) { + info, err := c.ABCIInfo(ctx) + require.NoError(t, err) + + status, err := c.Status(ctx) + require.NoError(t, err) + + assert.GreaterOrEqual(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) + assert.True(t, strings.Contains(info.Response.Data, "size")) + }) + t.Run("NetInfo", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + netinfo, err := nc.NetInfo(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, netinfo.Listening) + assert.Equal(t, 0, len(netinfo.Peers)) + }) + t.Run("DumpConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.DumpConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + assert.Empty(t, cons.Peers) + }) + t.Run("ConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.ConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + }) + t.Run("Health", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + _, err := nc.Health(ctx) + require.Nil(t, err, "%d: %+v", i, err) + }) + t.Run("GenesisAndValidators", func(t *testing.T) { + // make sure this is the right genesis file + gen, err := c.Genesis(ctx) + require.Nil(t, err, "%d: %+v", i, err) + // get the genesis validator + require.Equal(t, 1, len(gen.Genesis.Validators)) + gval := gen.Genesis.Validators[0] + + // get the current validators + h := int64(1) + vals, err := c.Validators(ctx, &h, nil, nil) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, 1, len(vals.Validators)) + require.Equal(t, 1, vals.Count) + require.Equal(t, 1, vals.Total) + val := vals.Validators[0] + + // make sure the current set is also the genesis set + assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.PubKey, val.PubKey) + }) + t.Run("GenesisChunked", func(t *testing.T) { + first, err := c.GenesisChunked(ctx, 0) + require.NoError(t, err) + + decoded := make([]string, 0, first.TotalChunks) + for i := 0; i < first.TotalChunks; i++ { + chunk, err := c.GenesisChunked(ctx, uint(i)) + require.NoError(t, err) + data, err := base64.StdEncoding.DecodeString(chunk.Data) + require.NoError(t, err) + decoded = append(decoded, string(data)) - res, err := c.BlockchainInfo(ctx, 0, 0) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) > 0) - - res, err = c.BlockchainInfo(ctx, 1, 1) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) == 1) - - res, err = c.BlockchainInfo(ctx, 1, 10000) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) < 100) - for _, m := range res.BlockMetas { - assert.NotNil(t, m) - } + } + doc := []byte(strings.Join(decoded, "")) + + var out types.GenesisDoc + require.NoError(t, tmjson.Unmarshal(doc, &out), + "first: %+v, doc: %s", first, string(doc)) + }) + t.Run("ABCIQuery", func(t *testing.T) { + // write something + k, v, tx := MakeTxKV() + status, err := c.Status(ctx) + require.NoError(t, err) + _, err = c.BroadcastTxSync(ctx, tx) + require.NoError(t, err, "%d: %+v", i, err) + apph := status.SyncInfo.LatestBlockHeight + 2 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + res, err := c.ABCIQuery(ctx, "/key", k) + qres := res.Response + if assert.NoError(t, err) && assert.True(t, qres.IsOK()) { + assert.EqualValues(t, v, qres.Value) + } + }) + t.Run("AppCalls", func(t *testing.T) { + // get an offset of height to avoid racing and guessing + s, err := c.Status(ctx) + require.NoError(t, err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 20 + _, err = c.Block(ctx, &h) + require.Error(t, err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) + require.True(t, bres.DeliverTx.IsOK()) + txh := bres.Height + apph := txh + 1 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + + _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) + require.NoError(t, err) + qres := _qres.Response + if assert.True(t, qres.IsOK()) { + assert.Equal(t, k, qres.Key) + assert.EqualValues(t, v, qres.Value) + } - res, err = c.BlockchainInfo(ctx, 10000, 1) - require.NotNil(t, err) - assert.Nil(t, res) - assert.Contains(t, err.Error(), "can't be greater than max") - } -} + // make sure we can lookup the tx with proof + ptx, err := c.Tx(ctx, bres.Hash, true) + require.NoError(t, err) + assert.EqualValues(t, txh, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) -func TestBroadcastTxSync(t *testing.T) { - n, conf := NodeSuite(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // and we can even check the block is added + block, err := c.Block(ctx, &apph) + require.NoError(t, err) + appHash := block.Block.Header.AppHash + assert.True(t, len(appHash) > 0) + assert.EqualValues(t, apph, block.Block.Header.Height) + + blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) + require.NoError(t, err) + require.Equal(t, block, blockByHash) + + // now check the results + blockResults, err := c.BlockResults(ctx, &txh) + require.NoError(t, err, "%d: %+v", i, err) + assert.Equal(t, txh, blockResults.Height) + if assert.Equal(t, 1, len(blockResults.TxsResults)) { + // check success code + assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) + } - // TODO (melekes): use mempool which is set on RPC rather than getting it from node - pool := getMempool(t, n) - initMempoolSize := pool.Size() + // check blockchain info, now that we know there is info + info, err := c.BlockchainInfo(ctx, apph, apph) + require.NoError(t, err) + assert.True(t, info.LastHeight >= apph) + if assert.Equal(t, 1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(t, apph, lastMeta.Header.Height) + blockData := block.Block + assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(t, block.BlockID, lastMeta.BlockID) + } - for i, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + // and get the corresponding commit with the same apphash + commit, err := c.Commit(ctx, &apph) + require.NoError(t, err) + cappHash := commit.Header.AppHash + assert.Equal(t, appHash, cappHash) + assert.NotNil(t, commit.Commit) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := c.Commit(ctx, &h) + require.NoError(t, err) + assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) + + // and we got a proof that works! + _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) + require.NoError(t, err) + pres := _pres.Response + assert.True(t, pres.IsOK()) + + // XXX Test proof + }) + t.Run("BlockchainInfo", func(t *testing.T) { + err := client.WaitForHeight(c, 10, nil) + require.NoError(t, err) + + res, err := c.BlockchainInfo(ctx, 0, 0) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) > 0) + + res, err = c.BlockchainInfo(ctx, 1, 1) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) == 1) + + res, err = c.BlockchainInfo(ctx, 1, 10000) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) < 100) + for _, m := range res.BlockMetas { + assert.NotNil(t, m) + } - require.Equal(t, initMempoolSize+1, pool.Size()) + res, err = c.BlockchainInfo(ctx, 10000, 1) + require.NotNil(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "can't be greater than max") + }) + t.Run("BroadcastTxCommit", func(t *testing.T) { + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.True(t, bres.CheckTx.IsOK()) + require.True(t, bres.DeliverTx.IsOK()) + + require.Equal(t, 0, pool.Size()) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + _, _, tx := MakeTxKV() + initMempoolSize := pool.Size() + bres, err := c.BroadcastTxSync(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + + require.Equal(t, initMempoolSize+1, pool.Size()) + + txs := pool.ReapMaxTxs(len(tx)) + require.EqualValues(t, tx, txs[0]) + pool.Flush() + }) + t.Run("CheckTx", func(t *testing.T) { + _, _, tx := MakeTxKV() + + res, err := c.CheckTx(ctx, tx) + require.NoError(t, err) + assert.Equal(t, abci.CodeTypeOK, res.Code) + + assert.Equal(t, 0, pool.Size(), "mempool must be empty") + }) + t.Run("Events", func(t *testing.T) { + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start() + require.Nil(t, err) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) + } - txs := pool.ReapMaxTxs(len(tx)) - require.EqualValues(t, tx, txs[0]) - pool.Flush() + t.Run("Header", func(t *testing.T) { + evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", i, err) + _, ok := evt.(types.EventDataNewBlockHeader) + require.True(t, ok, "%d: %#v", i, evt) + // TODO: more checks... + }) + t.Run("Block", func(t *testing.T) { + const subscriber = "TestBlockEvents" + + eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) + require.NoError(t, err) + t.Cleanup(func() { + if err := c.UnsubscribeAll(ctx, subscriber); err != nil { + t.Error(err) + } + }) + + var firstBlockHeight int64 + for i := int64(0); i < 3; i++ { + event := <-eventCh + blockEvent, ok := event.Data.(types.EventDataNewBlock) + require.True(t, ok) + + block := blockEvent.Block + + if firstBlockHeight == 0 { + firstBlockHeight = block.Header.Height + } + + require.Equal(t, firstBlockHeight+i, block.Header.Height) + } + }) + t.Run("BroadcastTxAsync", func(t *testing.T) { + testTxEventsSent(ctx, t, "async", c) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + testTxEventsSent(ctx, t, "sync", c) + }) + }) + }) } } @@ -451,319 +516,268 @@ func getMempool(t *testing.T, srv service.Service) mempool.Mempool { return n.Mempool() } -func TestBroadcastTxCommit(t *testing.T) { +// these cases are roughly the same as the TestClientMethodCalls, but +// they have to loop over their clients in the individual test cases, +// so making a separate suite makes more sense, though isn't strictly +// speaking desirable. +func TestClientMethodCallsAdvanced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n, conf := NodeSuite(t) - pool := getMempool(t, n) - for i, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.True(t, bres.CheckTx.IsOK()) - require.True(t, bres.DeliverTx.IsOK()) - - require.Equal(t, 0, pool.Size()) - } -} -func TestUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, _, tx := MakeTxKV() - ch := make(chan *abci.Response, 1) - - n, conf := NodeSuite(t) - pool := getMempool(t, n) - err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) - - require.NoError(t, err) - - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } + t.Run("UnconfirmedTxs", func(t *testing.T) { + _, _, tx := MakeTxKV() + ch := make(chan struct{}) - for _, c := range GetClients(t, n, conf) { - mc := c.(client.MempoolClient) - limit := 1 - res, err := mc.UnconfirmedTxs(ctx, &limit) + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) require.NoError(t, err) - assert.Equal(t, 1, res.Count) - assert.Equal(t, 1, res.Total) - assert.Equal(t, pool.SizeBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) - } - - pool.Flush() -} - -func TestNumUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, _, tx := MakeTxKV() - - n, conf := NodeSuite(t) - ch := make(chan *abci.Response, 1) - pool := getMempool(t, n) - - err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) - require.NoError(t, err) - - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } - mempoolSize := pool.Size() - for i, c := range GetClients(t, n, conf) { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.NumUnconfirmedTxs(ctx) - require.Nil(t, err, "%d: %+v", i, err) - - assert.Equal(t, mempoolSize, res.Count) - assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, pool.SizeBytes(), res.TotalBytes) - } + for _, c := range GetClients(t, n, conf) { + mc := c.(client.MempoolClient) + limit := 1 + res, err := mc.UnconfirmedTxs(ctx, &limit) + require.NoError(t, err) - pool.Flush() -} + assert.Equal(t, 1, res.Count) + assert.Equal(t, 1, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) + } -func TestCheckTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + pool.Flush() + }) + t.Run("NumUnconfirmedTxs", func(t *testing.T) { + ch := make(chan struct{}) - n, conf := NodeSuite(t) - pool := getMempool(t, n) + pool := getMempool(t, n) - for _, c := range GetClients(t, n, conf) { _, _, tx := MakeTxKV() - res, err := c.CheckTx(ctx, tx) + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) require.NoError(t, err) - assert.Equal(t, abci.CodeTypeOK, res.Code) - assert.Equal(t, 0, pool.Size(), "mempool must be empty") - } -} - -func TestTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - n, conf := NodeSuite(t) + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } - c := getHTTPClient(t, conf) - - // first we broadcast a tx - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%+v", err) - - txHeight := bres.Height - txHash := bres.Hash - - anotherTxHash := types.Tx("a different tx").Hash() - - cases := []struct { - valid bool - prove bool - hash []byte - }{ - // only valid if correct hash provided - {true, false, txHash}, - {true, true, txHash}, - {false, false, anotherTxHash}, - {false, true, anotherTxHash}, - {false, false, nil}, - {false, true, nil}, - } + mempoolSize := pool.Size() + for i, c := range GetClients(t, n, conf) { + mc, ok := c.(client.MempoolClient) + require.True(t, ok, "%d", i) + res, err := mc.NumUnconfirmedTxs(ctx) + require.Nil(t, err, "%d: %+v", i, err) - for i, c := range GetClients(t, n, conf) { - for j, tc := range cases { - t.Logf("client %d, case %d", i, j) + assert.Equal(t, mempoolSize, res.Count) + assert.Equal(t, mempoolSize, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + } - // now we query for the tx. - // since there's only one tx, we know index=0. - ptx, err := c.Tx(ctx, tc.hash, tc.prove) + pool.Flush() + }) + t.Run("Tx", func(t *testing.T) { + c := getHTTPClient(t, conf) - if !tc.valid { - require.NotNil(t, err) - } else { - require.Nil(t, err, "%+v", err) - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) + // first we broadcast a tx + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.Nil(t, err, "%+v", err) + + txHeight := bres.Height + txHash := bres.Hash + + anotherTxHash := types.Tx("a different tx").Hash() + + cases := []struct { + valid bool + prove bool + hash []byte + }{ + // only valid if correct hash provided + {true, false, txHash}, + {true, true, txHash}, + {false, false, anotherTxHash}, + {false, true, anotherTxHash}, + {false, false, nil}, + {false, true, nil}, + } - // time to verify the proof - proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + for j, tc := range cases { + t.Run(fmt.Sprintf("Case%d", j), func(t *testing.T) { + // now we query for the tx. + // since there's only one tx, we know index=0. + ptx, err := c.Tx(ctx, tc.hash, tc.prove) + + if !tc.valid { + require.NotNil(t, err) + } else { + require.Nil(t, err, "%+v", err) + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) + + // time to verify the proof + proof := ptx.Proof + if tc.prove && assert.EqualValues(t, tx, proof.Data) { + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + } + } + }) } - } + }) } - } -} - -func TestTxSearchWithTimeout(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) - - _, _, tx := MakeTxKV() - _, err := timeoutClient.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - - // query using a compositeKey (see kvstore application) - result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") -} - -func TestTxSearch(t *testing.T) { - n, conf := NodeSuite(t) - c := getHTTPClient(t, conf) + }) + t.Run("TxSearchWithTimeout", func(t *testing.T) { + timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) - // first we broadcast a few txs - for i := 0; i < 10; i++ { _, _, tx := MakeTxKV() - _, err := c.BroadcastTxCommit(context.Background(), tx) + _, err := timeoutClient.BroadcastTxCommit(ctx, tx) require.NoError(t, err) - } - - // since we're not using an isolated test server, we'll have lingering transactions - // from other tests as well - result, err := c.TxSearch(context.Background(), "tx.height >= 0", true, nil, nil, "asc") - require.NoError(t, err) - txCount := len(result.Txs) - - // pick out the last tx to have something to search for in tests - find := result.Txs[len(result.Txs)-1] - anotherTxHash := types.Tx("a different tx").Hash() - - for i, c := range GetClients(t, n, conf) { - t.Logf("client %d", i) - - // now we query for the tx. - result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - require.Equal(t, find.Hash, result.Txs[0].Hash) - - ptx := result.Txs[0] - assert.EqualValues(t, find.Height, ptx.Height) - assert.EqualValues(t, find.Tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, find.Hash, ptx.Hash) - - // time to verify the proof - if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) - } - - // query by height - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - - // query for non existing tx - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) // query using a compositeKey (see kvstore application) - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") require.Nil(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + }) + t.Run("TxSearch", func(t *testing.T) { + t.Skip("Test Asserts Non-Deterministic Results") + c := getHTTPClient(t, conf) + + // first we broadcast a few txs + for i := 0; i < 10; i++ { + _, _, tx := MakeTxKV() + _, err := c.BroadcastTxSync(ctx, tx) + require.NoError(t, err) + } - // query using an index key - result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + // since we're not using an isolated test server, we'll have lingering transactions + // from other tests as well + result, err := c.TxSearch(ctx, "tx.height >= 0", true, nil, nil, "asc") + require.NoError(t, err) + txCount := len(result.Txs) - // query using an noindex key - result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") + // pick out the last tx to have something to search for in tests + find := result.Txs[len(result.Txs)-1] + anotherTxHash := types.Tx("a different tx").Hash() - // query using a compositeKey (see kvstore application) and height - result, err = c.TxSearch(context.Background(), - "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { - // query a non existing tx with page 1 and txsPerPage 1 - perPage := 1 - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) + // now we query for the tx. + result, err := c.TxSearch(ctx, fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + require.Equal(t, find.Hash, result.Txs[0].Hash) - // check sorting - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } + ptx := result.Txs[0] + assert.EqualValues(t, find.Height, ptx.Height) + assert.EqualValues(t, find.Tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, find.Hash, ptx.Hash) - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - // check pagination - perPage = 3 - var ( - seen = map[int64]bool{} - maxHeight int64 - pages = int(math.Ceil(float64(txCount) / float64(perPage))) - ) - - for page := 1; page <= pages; page++ { - page := page - result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc") - require.NoError(t, err) - if page < pages { - require.Len(t, result.Txs, perPage) - } else { - require.LessOrEqual(t, len(result.Txs), perPage) - } - require.Equal(t, txCount, result.TotalCount) - for _, tx := range result.Txs { - require.False(t, seen[tx.Height], - "Found duplicate height %v in page %v", tx.Height, page) - require.Greater(t, tx.Height, maxHeight, - "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) - seen[tx.Height] = true - maxHeight = tx.Height - } - } - require.Len(t, seen, txCount) - } -} + // time to verify the proof + if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { + assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + } -func TestBatchedJSONRPCCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // query by height + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + + // query for non existing tx + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // query using a compositeKey (see kvstore application) + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an index key + result, err = c.TxSearch(ctx, "app.index_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an noindex key + result, err = c.TxSearch(ctx, "app.noindex_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using a compositeKey (see kvstore application) and height + result, err = c.TxSearch(ctx, + "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query a non existing tx with page 1 and txsPerPage 1 + perPage := 1 + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // check sorting + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "asc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - testBatchedJSONRPCCalls(ctx, t, c) + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "desc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + // check pagination + perPage = 3 + var ( + seen = map[int64]bool{} + maxHeight int64 + pages = int(math.Ceil(float64(txCount) / float64(perPage))) + ) + + for page := 1; page <= pages; page++ { + page := page + result, err := c.TxSearch(ctx, "tx.height >= 1", false, &page, &perPage, "asc") + require.NoError(t, err) + if page < pages { + require.Len(t, result.Txs, perPage) + } else { + require.LessOrEqual(t, len(result.Txs), perPage) + } + require.Equal(t, txCount, result.TotalCount) + for _, tx := range result.Txs { + require.False(t, seen[tx.Height], + "Found duplicate height %v in page %v", tx.Height, page) + require.Greater(t, tx.Height, maxHeight, + "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) + seen[tx.Height] = true + maxHeight = tx.Height + } + } + require.Len(t, seen, txCount) + }) + } + }) } func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) { @@ -814,60 +828,3 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Equal(t, qresult1.Response.Value, v1) require.Equal(t, qresult2.Response.Value, v2) } - -func TestBatchedJSONRPCCallsCancellation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - _, _, tx1 := MakeTxKV() - _, _, tx2 := MakeTxKV() - - batch := c.NewBatch() - _, err := batch.BroadcastTxCommit(ctx, tx1) - require.NoError(t, err) - _, err = batch.BroadcastTxCommit(ctx, tx2) - require.NoError(t, err) - // we should have 2 requests waiting - require.Equal(t, 2, batch.Count()) - // we want to make sure we cleared 2 pending requests - require.Equal(t, 2, batch.Clear()) - // now there should be no batched requests - require.Equal(t, 0, batch.Count()) -} - -func TestSendingEmptyRequestBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - _, err := batch.Send(ctx) - require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") -} - -func TestClearingEmptyRequestBatch(t *testing.T) { - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") -} - -func TestConcurrentJSONRPCBatching(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - var wg sync.WaitGroup - c := getHTTPClient(t, conf) - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - defer wg.Done() - testBatchedJSONRPCCalls(ctx, t, c) - }() - } - wg.Wait() -} diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 9c73b8a8c..7733eb00c 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io/ioutil" "net" @@ -155,7 +156,7 @@ func New(remote string) (*Client, error) { // panics when client is nil. func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } parsedURL, err := newParsedURL(remote)