Browse Source

rpc: clean up client global state in tests (#6438)

pull/6454/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
fdd506a126
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 243 additions and 209 deletions
  1. +12
    -15
      light/example_test.go
  2. +16
    -13
      light/provider/http/http_test.go
  3. +10
    -4
      rpc/client/event_test.go
  4. +10
    -4
      rpc/client/evidence_test.go
  5. +5
    -5
      rpc/client/examples_test.go
  6. +16
    -15
      rpc/client/main_test.go
  7. +148
    -118
      rpc/client/rpc_test.go
  8. +11
    -9
      rpc/grpc/grpc_test.go
  9. +15
    -26
      rpc/test/helpers.go

+ 12
- 15
light/example_test.go View File

@ -6,7 +6,6 @@ import (
"io/ioutil"
stdlog "log"
"os"
"testing"
"time"
dbm "github.com/tendermint/tm-db"
@ -22,6 +21,11 @@ import (
// Automatically getting new headers and verifying them.
func ExampleClient_Update() {
// Start a test application
app := kvstore.NewApplication()
n := rpctest.StartTendermint(app, rpctest.SuppressStdout)
defer func() { rpctest.StopTendermint(n) }()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
@ -32,7 +36,7 @@ func ExampleClient_Update() {
defer os.RemoveAll(dbDir)
var (
config = rpctest.GetConfig()
config = n.Config()
chainID = config.ChainID()
)
@ -90,6 +94,11 @@ func ExampleClient_Update() {
// Manually getting light blocks and verifying them.
func ExampleClient_VerifyLightBlockAtHeight() {
// Start a test application
app := kvstore.NewApplication()
n := rpctest.StartTendermint(app, rpctest.SuppressStdout)
defer func() { rpctest.StopTendermint(n) }()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
@ -100,7 +109,7 @@ func ExampleClient_VerifyLightBlockAtHeight() {
defer os.RemoveAll(dbDir)
var (
config = rpctest.GetConfig()
config = n.Config()
chainID = config.ChainID()
)
@ -154,15 +163,3 @@ func ExampleClient_VerifyLightBlockAtHeight() {
fmt.Println("got header", h.Height)
// Output: got header 3
}
func TestMain(m *testing.M) {
// start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app, rpctest.SuppressStdout)
code := m.Run()
// and shut down proper at the end
rpctest.StopTendermint(node)
os.Exit(code)
}

+ 16
- 13
light/provider/http/http_test.go View File

@ -12,6 +12,7 @@ import (
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/light/provider"
lighthttp "github.com/tendermint/tendermint/light/provider/http"
"github.com/tendermint/tendermint/node"
rpcclient "github.com/tendermint/tendermint/rpc/client"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
rpctest "github.com/tendermint/tendermint/rpc/test"
@ -32,25 +33,27 @@ func TestNewProvider(t *testing.T) {
require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}")
}
func TestMain(m *testing.M) {
func NodeSuite(t *testing.T) *node.Node {
t.Helper()
// start a tendermint node in the background to test against
app := kvstore.NewApplication()
app.RetainBlocks = 9
node := rpctest.StartTendermint(app)
code := m.Run()
rpctest.StopTendermint(node)
os.Exit(code)
t.Cleanup(func() {
rpctest.StopTendermint(node)
os.RemoveAll(node.Config().RootDir)
})
return node
}
func TestProvider(t *testing.T) {
cfg := rpctest.GetConfig()
defer os.RemoveAll(cfg.RootDir)
n := NodeSuite(t)
cfg := n.Config()
rpcAddr := cfg.RPC.ListenAddress
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
panic(err)
}
require.NoError(t, err)
chainID := genDoc.ChainID
t.Log("chainID:", chainID)
@ -68,7 +71,7 @@ func TestProvider(t *testing.T) {
// let's get the highest block
lb, err := p.LightBlock(context.Background(), 0)
require.NoError(t, err)
assert.True(t, lb.Height < 1000)
assert.True(t, lb.Height < 9001, "height=%d", lb.Height)
// let's check this is valid somehow
assert.Nil(t, lb.ValidateBasic(chainID))
@ -80,7 +83,7 @@ func TestProvider(t *testing.T) {
assert.Equal(t, lower, lb.Height)
// fetching missing heights (both future and pruned) should return appropriate errors
lb, err = p.LightBlock(context.Background(), 1000)
lb, err = p.LightBlock(context.Background(), 9001)
require.Error(t, err)
require.Nil(t, lb)
assert.Equal(t, provider.ErrHeightTooHigh, err)


+ 10
- 4
rpc/client/event_test.go View File

@ -27,7 +27,9 @@ func MakeTxKV() ([]byte, []byte, []byte) {
}
func TestHeaderEvents(t *testing.T) {
for i, c := range GetClients() {
n := NodeSuite(t)
for i, c := range GetClients(t, n) {
i, c := i, c
t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
// start for this test it if it wasn't already running
@ -54,7 +56,8 @@ func TestHeaderEvents(t *testing.T) {
// subscribe to new blocks and make sure height increments by 1
func TestBlockEvents(t *testing.T) {
for _, c := range GetClients() {
n := NodeSuite(t)
for _, c := range GetClients(t, n) {
c := c
t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
@ -102,7 +105,8 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "a
func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") }
func testTxEventsSent(t *testing.T, broadcastMethod string) {
for _, c := range GetClients() {
n := NodeSuite(t)
for _, c := range GetClients(t, n) {
c := c
t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
@ -163,7 +167,9 @@ func TestClientsResubscribe(t *testing.T) {
}
func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) {
c := getHTTPClient()
n := NodeSuite(t)
c := getHTTPClient(t, n)
// on Subscribe
_, err := c.Subscribe(context.Background(), "TestHeaderEvents",


+ 10
- 4
rpc/client/evidence_test.go View File

@ -17,7 +17,6 @@ import (
"github.com/tendermint/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
@ -113,14 +112,21 @@ func makeEvidences(
}
func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) {
n := NodeSuite(t)
// previous versions of this test used a shared fixture with
// other tests, and in this version we give it a little time
// for the node to make progress before running the test
time.Sleep(10 * time.Millisecond)
var (
config = rpctest.GetConfig()
config = n.Config()
chainID = config.ChainID()
)
pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
require.NoError(t, err)
for i, c := range GetClients() {
for i, c := range GetClients(t, n) {
correct, fakes := makeEvidences(t, pv, chainID)
t.Logf("client %d", i)
@ -158,7 +164,7 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) {
}
func TestBroadcastEmptyEvidence(t *testing.T) {
for _, c := range GetClients() {
for _, c := range GetClients(t, NodeSuite(t)) {
_, err := c.BroadcastEvidence(context.Background(), nil)
assert.Error(t, err)
}


+ 5
- 5
rpc/client/examples_test.go View File

@ -15,11 +15,11 @@ import (
func ExampleHTTP_simple() {
// Start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig)
node := rpctest.StartTendermint(app, rpctest.SuppressStdout)
defer rpctest.StopTendermint(node)
// Create our RPC client
rpcAddr := rpctest.GetConfig().RPC.ListenAddress
rpcAddr := node.Config().RPC.ListenAddress
c, err := rpchttp.New(rpcAddr)
if err != nil {
log.Fatal(err) //nolint:gocritic
@ -68,10 +68,10 @@ func ExampleHTTP_simple() {
func ExampleHTTP_batching() {
// Start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig)
node := rpctest.StartTendermint(app, rpctest.SuppressStdout)
// Create our RPC client
rpcAddr := rpctest.GetConfig().RPC.ListenAddress
rpcAddr := node.Config().RPC.ListenAddress
c, err := rpchttp.New(rpcAddr)
if err != nil {
log.Fatal(err)
@ -98,7 +98,7 @@ func ExampleHTTP_batching() {
// Broadcast the transaction and wait for it to commit (rather use
// c.BroadcastTxSync though in production).
if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil {
log.Fatal(err) //nolint:gocritic
log.Fatal(err) // nolint:gocritic
}
}


+ 16
- 15
rpc/client/main_test.go View File

@ -1,32 +1,33 @@
package client_test
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/node"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
var node *nm.Node
func NodeSuite(t *testing.T) *node.Node {
t.Helper()
func TestMain(m *testing.M) {
// start a tendermint node (and kvstore) in the background to test against
dir, err := ioutil.TempDir("/tmp", "rpc-client-test")
if err != nil {
panic(err)
}
dir, err := ioutil.TempDir("/tmp", fmt.Sprint("rpc-client-test-", t.Name()))
require.NoError(t, err)
app := kvstore.NewPersistentKVStoreApplication(dir)
node = rpctest.StartTendermint(app)
n := rpctest.StartTendermint(app)
code := m.Run()
t.Cleanup(func() {
// and shut down proper at the end
rpctest.StopTendermint(n)
app.Close()
// and shut down proper at the end
rpctest.StopTendermint(node)
app.Close()
_ = os.RemoveAll(dir)
os.Exit(code)
_ = os.RemoveAll(dir)
})
return n
}

+ 148
- 118
rpc/client/rpc_test.go View File

@ -17,12 +17,12 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/rpc/client"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
rpclocal "github.com/tendermint/tendermint/rpc/client/local"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
@ -30,35 +30,33 @@ var (
ctx = context.Background()
)
func getHTTPClient() *rpchttp.HTTP {
rpcAddr := rpctest.GetConfig().RPC.ListenAddress
func getHTTPClient(t *testing.T, n *node.Node) *rpchttp.HTTP {
t.Helper()
rpcAddr := n.Config().RPC.ListenAddress
c, err := rpchttp.New(rpcAddr)
if err != nil {
panic(err)
}
require.NoError(t, err)
c.SetLogger(log.TestingLogger())
return c
}
func getHTTPClientWithTimeout(timeout time.Duration) *rpchttp.HTTP {
rpcAddr := rpctest.GetConfig().RPC.ListenAddress
func getHTTPClientWithTimeout(t *testing.T, n *node.Node, timeout time.Duration) *rpchttp.HTTP {
t.Helper()
rpcAddr := n.Config().RPC.ListenAddress
c, err := rpchttp.NewWithTimeout(rpcAddr, timeout)
if err != nil {
panic(err)
}
require.NoError(t, err)
c.SetLogger(log.TestingLogger())
return c
}
func getLocalClient() *rpclocal.Local {
return rpclocal.New(node)
}
// GetClients returns a slice of clients for table-driven tests
func GetClients() []client.Client {
func GetClients(t *testing.T, n *node.Node) []client.Client {
t.Helper()
return []client.Client{
getHTTPClient(),
getLocalClient(),
getHTTPClient(t, n),
rpclocal.New(n),
}
}
@ -72,7 +70,8 @@ func TestNilCustomHTTPClient(t *testing.T) {
}
func TestCustomHTTPClient(t *testing.T) {
remote := rpctest.GetConfig().RPC.ListenAddress
conf := NodeSuite(t).Config()
remote := conf.RPC.ListenAddress
c, err := rpchttp.NewWithClient(remote, http.DefaultClient)
require.Nil(t, err)
status, err := c.Status(context.Background())
@ -81,8 +80,9 @@ func TestCustomHTTPClient(t *testing.T) {
}
func TestCorsEnabled(t *testing.T) {
origin := rpctest.GetConfig().RPC.CORSAllowedOrigins[0]
remote := strings.ReplaceAll(rpctest.GetConfig().RPC.ListenAddress, "tcp", "http")
conf := NodeSuite(t).Config()
origin := conf.RPC.CORSAllowedOrigins[0]
remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http")
req, err := http.NewRequest("GET", remote, nil)
require.Nil(t, err, "%+v", err)
@ -97,9 +97,13 @@ func TestCorsEnabled(t *testing.T) {
// Make sure status is correct (we connect properly)
func TestStatus(t *testing.T) {
for i, c := range GetClients() {
moniker := rpctest.GetConfig().Moniker
status, err := c.Status(context.Background())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n := NodeSuite(t)
for i, c := range GetClients(t, n) {
moniker := n.Config().Moniker
status, err := c.Status(ctx)
require.Nil(t, err, "%d: %+v", i, err)
assert.Equal(t, moniker, status.NodeInfo.Moniker)
}
@ -107,10 +111,13 @@ func TestStatus(t *testing.T) {
// Make sure info is correct (we connect properly)
func TestInfo(t *testing.T) {
for i, c := range GetClients() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i, c := range GetClients(t, NodeSuite(t)) {
// status, err := c.Status()
// require.Nil(t, err, "%+v", err)
info, err := c.ABCIInfo(context.Background())
info, err := c.ABCIInfo(ctx)
require.Nil(t, err, "%d: %+v", i, err)
// TODO: this is not correct - fix merkleeyes!
// assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight)
@ -119,7 +126,7 @@ func TestInfo(t *testing.T) {
}
func TestNetInfo(t *testing.T) {
for i, c := range GetClients() {
for i, c := range GetClients(t, NodeSuite(t)) {
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
netinfo, err := nc.NetInfo(context.Background())
@ -130,7 +137,7 @@ func TestNetInfo(t *testing.T) {
}
func TestDumpConsensusState(t *testing.T) {
for i, c := range GetClients() {
for i, c := range GetClients(t, NodeSuite(t)) {
// FIXME: fix server so it doesn't panic on invalid input
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
@ -142,7 +149,7 @@ func TestDumpConsensusState(t *testing.T) {
}
func TestConsensusState(t *testing.T) {
for i, c := range GetClients() {
for i, c := range GetClients(t, NodeSuite(t)) {
// FIXME: fix server so it doesn't panic on invalid input
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
@ -153,7 +160,7 @@ func TestConsensusState(t *testing.T) {
}
func TestHealth(t *testing.T) {
for i, c := range GetClients() {
for i, c := range GetClients(t, NodeSuite(t)) {
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
_, err := nc.Health(context.Background())
@ -162,7 +169,7 @@ func TestHealth(t *testing.T) {
}
func TestGenesisAndValidators(t *testing.T) {
for i, c := range GetClients() {
for i, c := range GetClients(t, NodeSuite(t)) {
// make sure this is the right genesis file
gen, err := c.Genesis(context.Background())
@ -187,7 +194,7 @@ func TestGenesisAndValidators(t *testing.T) {
}
func TestABCIQuery(t *testing.T) {
for i, c := range GetClients() {
for i, c := range GetClients(t, NodeSuite(t)) {
// write something
k, v, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(context.Background(), tx)
@ -207,103 +214,105 @@ func TestABCIQuery(t *testing.T) {
// Make some app checks
func TestAppCalls(t *testing.T) {
assert, require := assert.New(t), require.New(t)
for i, c := range GetClients() {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for i, c := range GetClients(t, NodeSuite(t)) {
// get an offset of height to avoid racing and guessing
s, err := c.Status(context.Background())
require.NoError(err)
s, err := c.Status(ctx)
require.NoError(t, err)
// sh is start height or status height
sh := s.SyncInfo.LatestBlockHeight
// look for the future
h := sh + 20
_, err = c.Block(context.Background(), &h)
require.Error(err) // no block yet
_, err = c.Block(ctx, &h)
require.Error(t, err) // no block yet
// write something
k, v, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(context.Background(), tx)
require.NoError(err)
require.True(bres.DeliverTx.IsOK())
bres, err := c.BroadcastTxCommit(ctx, tx)
require.NoError(t, err)
require.True(t, bres.DeliverTx.IsOK())
txh := bres.Height
apph := txh + 1 // this is where the tx will be applied to the state
// wait before querying
err = client.WaitForHeight(c, apph, nil)
require.NoError(err)
require.NoError(t, err)
_qres, err := c.ABCIQueryWithOptions(context.Background(), "/key", k, client.ABCIQueryOptions{Prove: false})
require.NoError(err)
_qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false})
require.NoError(t, err)
qres := _qres.Response
if assert.True(qres.IsOK()) {
assert.Equal(k, qres.Key)
assert.EqualValues(v, qres.Value)
if assert.True(t, qres.IsOK()) {
assert.Equal(t, k, qres.Key)
assert.EqualValues(t, v, qres.Value)
}
// make sure we can lookup the tx with proof
ptx, err := c.Tx(context.Background(), bres.Hash, true)
require.NoError(err)
assert.EqualValues(txh, ptx.Height)
assert.EqualValues(tx, ptx.Tx)
ptx, err := c.Tx(ctx, bres.Hash, true)
require.NoError(t, err)
assert.EqualValues(t, txh, ptx.Height)
assert.EqualValues(t, tx, ptx.Tx)
// and we can even check the block is added
block, err := c.Block(context.Background(), &apph)
require.NoError(err)
block, err := c.Block(ctx, &apph)
require.NoError(t, err)
appHash := block.Block.Header.AppHash
assert.True(len(appHash) > 0)
assert.EqualValues(apph, block.Block.Header.Height)
assert.True(t, len(appHash) > 0)
assert.EqualValues(t, apph, block.Block.Header.Height)
blockByHash, err := c.BlockByHash(context.Background(), block.BlockID.Hash)
require.NoError(err)
require.Equal(block, blockByHash)
blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash)
require.NoError(t, err)
require.Equal(t, block, blockByHash)
// now check the results
blockResults, err := c.BlockResults(context.Background(), &txh)
require.Nil(err, "%d: %+v", i, err)
assert.Equal(txh, blockResults.Height)
if assert.Equal(1, len(blockResults.TxsResults)) {
blockResults, err := c.BlockResults(ctx, &txh)
require.NoError(t, err, "%d: %+v", i, err)
assert.Equal(t, txh, blockResults.Height)
if assert.Equal(t, 1, len(blockResults.TxsResults)) {
// check success code
assert.EqualValues(0, blockResults.TxsResults[0].Code)
assert.EqualValues(t, 0, blockResults.TxsResults[0].Code)
}
// check blockchain info, now that we know there is info
info, err := c.BlockchainInfo(context.Background(), apph, apph)
require.NoError(err)
assert.True(info.LastHeight >= apph)
if assert.Equal(1, len(info.BlockMetas)) {
info, err := c.BlockchainInfo(ctx, apph, apph)
require.NoError(t, err)
assert.True(t, info.LastHeight >= apph)
if assert.Equal(t, 1, len(info.BlockMetas)) {
lastMeta := info.BlockMetas[0]
assert.EqualValues(apph, lastMeta.Header.Height)
assert.EqualValues(t, apph, lastMeta.Header.Height)
blockData := block.Block
assert.Equal(blockData.Header.AppHash, lastMeta.Header.AppHash)
assert.Equal(block.BlockID, lastMeta.BlockID)
assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash)
assert.Equal(t, block.BlockID, lastMeta.BlockID)
}
// and get the corresponding commit with the same apphash
commit, err := c.Commit(context.Background(), &apph)
require.NoError(err)
commit, err := c.Commit(ctx, &apph)
require.NoError(t, err)
cappHash := commit.Header.AppHash
assert.Equal(appHash, cappHash)
assert.NotNil(commit.Commit)
assert.Equal(t, appHash, cappHash)
assert.NotNil(t, commit.Commit)
// compare the commits (note Commit(2) has commit from Block(3))
h = apph - 1
commit2, err := c.Commit(context.Background(), &h)
require.NoError(err)
assert.Equal(block.Block.LastCommitHash, commit2.Commit.Hash())
commit2, err := c.Commit(ctx, &h)
require.NoError(t, err)
assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash())
// and we got a proof that works!
_pres, err := c.ABCIQueryWithOptions(context.Background(), "/key", k, client.ABCIQueryOptions{Prove: true})
require.NoError(err)
_pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true})
require.NoError(t, err)
pres := _pres.Response
assert.True(pres.IsOK())
assert.True(t, pres.IsOK())
// XXX Test proof
}
}
func TestBlockchainInfo(t *testing.T) {
for i, c := range GetClients() {
for i, c := range GetClients(t, NodeSuite(t)) {
err := client.WaitForHeight(c, 10, nil)
require.NoError(t, err)
@ -333,38 +342,43 @@ func TestBlockchainInfo(t *testing.T) {
}
func TestBroadcastTxSync(t *testing.T) {
require := require.New(t)
n := NodeSuite(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// TODO (melekes): use mempool which is set on RPC rather than getting it from node
mempool := node.Mempool()
mempool := n.Mempool()
initMempoolSize := mempool.Size()
for i, c := range GetClients() {
for i, c := range GetClients(t, n) {
_, _, tx := MakeTxKV()
bres, err := c.BroadcastTxSync(context.Background(), tx)
require.Nil(err, "%d: %+v", i, err)
require.Equal(bres.Code, abci.CodeTypeOK) // FIXME
bres, err := c.BroadcastTxSync(ctx, tx)
require.Nil(t, err, "%d: %+v", i, err)
require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME
require.Equal(initMempoolSize+1, mempool.Size())
require.Equal(t, initMempoolSize+1, mempool.Size())
txs := mempool.ReapMaxTxs(len(tx))
require.EqualValues(tx, txs[0])
require.EqualValues(t, tx, txs[0])
mempool.Flush()
}
}
func TestBroadcastTxCommit(t *testing.T) {
require := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mempool := node.Mempool()
for i, c := range GetClients() {
n := NodeSuite(t)
mempool := n.Mempool()
for i, c := range GetClients(t, n) {
_, _, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(context.Background(), tx)
require.Nil(err, "%d: %+v", i, err)
require.True(bres.CheckTx.IsOK())
require.True(bres.DeliverTx.IsOK())
bres, err := c.BroadcastTxCommit(ctx, tx)
require.Nil(t, err, "%d: %+v", i, err)
require.True(t, bres.CheckTx.IsOK())
require.True(t, bres.DeliverTx.IsOK())
require.Equal(0, mempool.Size())
require.Equal(t, 0, mempool.Size())
}
}
@ -372,7 +386,9 @@ func TestUnconfirmedTxs(t *testing.T) {
_, _, tx := MakeTxKV()
ch := make(chan *abci.Response, 1)
mempool := node.Mempool()
n := NodeSuite(t)
mempool := n.Mempool()
err := mempool.CheckTx(tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{})
require.NoError(t, err)
@ -383,7 +399,7 @@ func TestUnconfirmedTxs(t *testing.T) {
t.Error("Timed out waiting for CheckTx callback")
}
for _, c := range GetClients() {
for _, c := range GetClients(t, n) {
mc := c.(client.MempoolClient)
limit := 1
res, err := mc.UnconfirmedTxs(context.Background(), &limit)
@ -401,8 +417,9 @@ func TestUnconfirmedTxs(t *testing.T) {
func TestNumUnconfirmedTxs(t *testing.T) {
_, _, tx := MakeTxKV()
n := NodeSuite(t)
ch := make(chan *abci.Response, 1)
mempool := node.Mempool()
mempool := n.Mempool()
err := mempool.CheckTx(tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{})
require.NoError(t, err)
@ -414,7 +431,7 @@ func TestNumUnconfirmedTxs(t *testing.T) {
}
mempoolSize := mempool.Size()
for i, c := range GetClients() {
for i, c := range GetClients(t, n) {
mc, ok := c.(client.MempoolClient)
require.True(t, ok, "%d", i)
res, err := mc.NumUnconfirmedTxs(context.Background())
@ -429,12 +446,15 @@ func TestNumUnconfirmedTxs(t *testing.T) {
}
func TestCheckTx(t *testing.T) {
mempool := node.Mempool()
n := NodeSuite(t)
mempool := n.Mempool()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for _, c := range GetClients() {
for _, c := range GetClients(t, n) {
_, _, tx := MakeTxKV()
res, err := c.CheckTx(context.Background(), tx)
res, err := c.CheckTx(ctx, tx)
require.NoError(t, err)
assert.Equal(t, abci.CodeTypeOK, res.Code)
@ -444,9 +464,13 @@ func TestCheckTx(t *testing.T) {
func TestTx(t *testing.T) {
// first we broadcast a tx
c := getHTTPClient()
n := NodeSuite(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := getHTTPClient(t, n)
_, _, tx := MakeTxKV()
bres, err := c.BroadcastTxCommit(context.Background(), tx)
bres, err := c.BroadcastTxCommit(ctx, tx)
require.Nil(t, err, "%+v", err)
txHeight := bres.Height
@ -468,13 +492,13 @@ func TestTx(t *testing.T) {
{false, true, nil},
}
for i, c := range GetClients() {
for i, c := range GetClients(t, n) {
for j, tc := range cases {
t.Logf("client %d, case %d", i, j)
// now we query for the tx.
// since there's only one tx, we know index=0.
ptx, err := c.Tx(context.Background(), tc.hash, tc.prove)
ptx, err := c.Tx(ctx, tc.hash, tc.prove)
if !tc.valid {
require.NotNil(t, err)
@ -497,20 +521,25 @@ func TestTx(t *testing.T) {
}
func TestTxSearchWithTimeout(t *testing.T) {
timeoutClient := getHTTPClientWithTimeout(10 * time.Second)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n := NodeSuite(t)
timeoutClient := getHTTPClientWithTimeout(t, n, 10*time.Second)
_, _, tx := MakeTxKV()
_, err := timeoutClient.BroadcastTxCommit(context.Background(), tx)
_, err := timeoutClient.BroadcastTxCommit(ctx, tx)
require.NoError(t, err)
// query using a compositeKey (see kvstore application)
result, err := timeoutClient.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc")
result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc")
require.Nil(t, err)
require.Greater(t, len(result.Txs), 0, "expected a lot of transactions")
}
func TestTxSearch(t *testing.T) {
c := getHTTPClient()
n := NodeSuite(t)
c := getHTTPClient(t, n)
// first we broadcast a few txs
for i := 0; i < 10; i++ {
@ -529,7 +558,7 @@ func TestTxSearch(t *testing.T) {
find := result.Txs[len(result.Txs)-1]
anotherTxHash := types.Tx("a different tx").Hash()
for i, c := range GetClients() {
for i, c := range GetClients(t, n) {
t.Logf("client %d", i)
// now we query for the tx.
@ -633,7 +662,7 @@ func TestTxSearch(t *testing.T) {
}
func TestBatchedJSONRPCCalls(t *testing.T) {
c := getHTTPClient()
c := getHTTPClient(t, NodeSuite(t))
testBatchedJSONRPCCalls(t, c)
}
@ -687,7 +716,7 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) {
}
func TestBatchedJSONRPCCallsCancellation(t *testing.T) {
c := getHTTPClient()
c := getHTTPClient(t, NodeSuite(t))
_, _, tx1 := MakeTxKV()
_, _, tx2 := MakeTxKV()
@ -705,21 +734,22 @@ func TestBatchedJSONRPCCallsCancellation(t *testing.T) {
}
func TestSendingEmptyRequestBatch(t *testing.T) {
c := getHTTPClient()
c := getHTTPClient(t, NodeSuite(t))
batch := c.NewBatch()
_, err := batch.Send(ctx)
require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error")
}
func TestClearingEmptyRequestBatch(t *testing.T) {
c := getHTTPClient()
c := getHTTPClient(t, NodeSuite(t))
batch := c.NewBatch()
require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result")
}
func TestConcurrentJSONRPCBatching(t *testing.T) {
n := NodeSuite(t)
var wg sync.WaitGroup
c := getHTTPClient()
c := getHTTPClient(t, n)
for i := 0; i < 50; i++ {
wg.Add(1)
go func() {


+ 11
- 9
rpc/grpc/grpc_test.go View File

@ -2,30 +2,32 @@ package coregrpc_test
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/node"
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
func TestMain(m *testing.M) {
func NodeSuite(t *testing.T) *node.Node {
t.Helper()
// start a tendermint node in the background to test against
app := kvstore.NewApplication()
node := rpctest.StartTendermint(app)
code := m.Run()
// and shut down proper at the end
rpctest.StopTendermint(node)
os.Exit(code)
t.Cleanup(func() {
rpctest.StopTendermint(node)
})
return node
}
func TestBroadcastTx(t *testing.T) {
res, err := rpctest.GetGRPCClient().BroadcastTx(
n := NodeSuite(t)
res, err := rpctest.GetGRPCClient(n.Config()).BroadcastTx(
context.Background(),
&core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")},
)


+ 15
- 26
rpc/test/helpers.go View File

@ -29,21 +29,20 @@ type Options struct {
recreateConfig bool
}
var globalConfig *cfg.Config
var defaultOptions = Options{
suppressStdout: false,
recreateConfig: false,
}
func waitForRPC() {
laddr := GetConfig().RPC.ListenAddress
func waitForRPC(ctx context.Context, conf *cfg.Config) {
laddr := conf.RPC.ListenAddress
client, err := rpcclient.New(laddr)
if err != nil {
panic(err)
}
result := new(ctypes.ResultStatus)
for {
_, err := client.Call(context.Background(), "status", map[string]interface{}{}, result)
_, err := client.Call(ctx, "status", map[string]interface{}{}, result)
if err == nil {
return
}
@ -53,10 +52,10 @@ func waitForRPC() {
}
}
func waitForGRPC() {
client := GetGRPCClient()
func waitForGRPC(ctx context.Context, conf *cfg.Config) {
client := GetGRPCClient(conf)
for {
_, err := client.Ping(context.Background(), &core_grpc.RequestPing{})
_, err := client.Ping(ctx, &core_grpc.RequestPing{})
if err == nil {
return
}
@ -102,21 +101,16 @@ func createConfig() *cfg.Config {
return c
}
// GetConfig returns a config for the test cases as a singleton
func GetConfig(forceCreate ...bool) *cfg.Config {
if globalConfig == nil || (len(forceCreate) > 0 && forceCreate[0]) {
globalConfig = createConfig()
}
return globalConfig
}
func GetGRPCClient() core_grpc.BroadcastAPIClient {
grpcAddr := globalConfig.RPC.GRPCListenAddress
func GetGRPCClient(conf *cfg.Config) core_grpc.BroadcastAPIClient {
grpcAddr := conf.RPC.GRPCListenAddress
return core_grpc.StartGRPCClient(grpcAddr)
}
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeOpts := defaultOptions
for _, opt := range opts {
opt(&nodeOpts)
@ -127,9 +121,10 @@ func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node {
panic(err)
}
cfg := node.Config()
// wait for rpc
waitForRPC()
waitForGRPC()
waitForRPC(ctx, cfg)
waitForGRPC(ctx, cfg)
if !nodeOpts.suppressStdout {
fmt.Println("Tendermint running!")
@ -151,7 +146,7 @@ func StopTendermint(node *nm.Node) {
// NewTendermint creates a new tendermint server and sleeps forever
func NewTendermint(app abci.Application, opts *Options) *nm.Node {
// Create & start node
config := GetConfig(opts.recreateConfig)
config := createConfig()
var logger log.Logger
if opts.suppressStdout {
logger = log.NewNopLogger()
@ -186,9 +181,3 @@ func NewTendermint(app abci.Application, opts *Options) *nm.Node {
func SuppressStdout(o *Options) {
o.suppressStdout = true
}
// RecreateConfig instructs the RPC test to recreate the configuration each
// time, instead of treating it as a global singleton.
func RecreateConfig(o *Options) {
o.recreateConfig = true
}

Loading…
Cancel
Save