|
|
@ -15,11 +15,12 @@ import ( |
|
|
|
"github.com/stretchr/testify/require" |
|
|
|
|
|
|
|
abci "github.com/tendermint/tendermint/abci/types" |
|
|
|
"github.com/tendermint/tendermint/config" |
|
|
|
tmjson "github.com/tendermint/tendermint/libs/json" |
|
|
|
"github.com/tendermint/tendermint/libs/log" |
|
|
|
tmmath "github.com/tendermint/tendermint/libs/math" |
|
|
|
"github.com/tendermint/tendermint/libs/service" |
|
|
|
mempl "github.com/tendermint/tendermint/mempool" |
|
|
|
"github.com/tendermint/tendermint/node" |
|
|
|
"github.com/tendermint/tendermint/rpc/client" |
|
|
|
rpchttp "github.com/tendermint/tendermint/rpc/client/http" |
|
|
|
rpclocal "github.com/tendermint/tendermint/rpc/client/local" |
|
|
@ -28,14 +29,10 @@ import ( |
|
|
|
"github.com/tendermint/tendermint/types" |
|
|
|
) |
|
|
|
|
|
|
|
var ( |
|
|
|
ctx = context.Background() |
|
|
|
) |
|
|
|
|
|
|
|
func getHTTPClient(t *testing.T, n *node.Node) *rpchttp.HTTP { |
|
|
|
func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { |
|
|
|
t.Helper() |
|
|
|
|
|
|
|
rpcAddr := n.Config().RPC.ListenAddress |
|
|
|
rpcAddr := conf.RPC.ListenAddress |
|
|
|
c, err := rpchttp.New(rpcAddr) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
@ -43,22 +40,31 @@ func getHTTPClient(t *testing.T, n *node.Node) *rpchttp.HTTP { |
|
|
|
return c |
|
|
|
} |
|
|
|
|
|
|
|
func getHTTPClientWithTimeout(t *testing.T, n *node.Node, timeout time.Duration) *rpchttp.HTTP { |
|
|
|
func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Duration) *rpchttp.HTTP { |
|
|
|
t.Helper() |
|
|
|
rpcAddr := n.Config().RPC.ListenAddress |
|
|
|
|
|
|
|
rpcAddr := conf.RPC.ListenAddress |
|
|
|
c, err := rpchttp.NewWithTimeout(rpcAddr, timeout) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
|
c.SetLogger(log.TestingLogger()) |
|
|
|
|
|
|
|
return c |
|
|
|
} |
|
|
|
|
|
|
|
// GetClients returns a slice of clients for table-driven tests
|
|
|
|
func GetClients(t *testing.T, n *node.Node) []client.Client { |
|
|
|
func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client.Client { |
|
|
|
t.Helper() |
|
|
|
|
|
|
|
node, ok := ns.(rpclocal.NodeService) |
|
|
|
require.True(t, ok) |
|
|
|
|
|
|
|
ncl, err := rpclocal.New(node) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
|
return []client.Client{ |
|
|
|
getHTTPClient(t, n), |
|
|
|
rpclocal.New(n), |
|
|
|
getHTTPClient(t, conf), |
|
|
|
ncl, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
@ -72,7 +78,7 @@ func TestNilCustomHTTPClient(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestCustomHTTPClient(t *testing.T) { |
|
|
|
conf := NodeSuite(t).Config() |
|
|
|
_, conf := NodeSuite(t) |
|
|
|
remote := conf.RPC.ListenAddress |
|
|
|
c, err := rpchttp.NewWithClient(remote, http.DefaultClient) |
|
|
|
require.Nil(t, err) |
|
|
@ -82,7 +88,7 @@ func TestCustomHTTPClient(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestCorsEnabled(t *testing.T) { |
|
|
|
conf := NodeSuite(t).Config() |
|
|
|
_, conf := NodeSuite(t) |
|
|
|
origin := conf.RPC.CORSAllowedOrigins[0] |
|
|
|
remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") |
|
|
|
|
|
|
@ -102,9 +108,9 @@ func TestStatus(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n := NodeSuite(t) |
|
|
|
for i, c := range GetClients(t, n) { |
|
|
|
moniker := n.Config().Moniker |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
moniker := conf.Moniker |
|
|
|
status, err := c.Status(ctx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
assert.Equal(t, moniker, status.NodeInfo.Moniker) |
|
|
@ -115,8 +121,9 @@ func TestStatus(t *testing.T) { |
|
|
|
func TestInfo(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
// status, err := c.Status()
|
|
|
|
// require.Nil(t, err, "%+v", err)
|
|
|
|
info, err := c.ABCIInfo(ctx) |
|
|
@ -128,10 +135,14 @@ func TestInfo(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestNetInfo(t *testing.T) { |
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
nc, ok := c.(client.NetworkClient) |
|
|
|
require.True(t, ok, "%d", i) |
|
|
|
netinfo, err := nc.NetInfo(context.Background()) |
|
|
|
netinfo, err := nc.NetInfo(ctx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
assert.True(t, netinfo.Listening) |
|
|
|
assert.Equal(t, 0, len(netinfo.Peers)) |
|
|
@ -139,11 +150,15 @@ func TestNetInfo(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestDumpConsensusState(t *testing.T) { |
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
// FIXME: fix server so it doesn't panic on invalid input
|
|
|
|
nc, ok := c.(client.NetworkClient) |
|
|
|
require.True(t, ok, "%d", i) |
|
|
|
cons, err := nc.DumpConsensusState(context.Background()) |
|
|
|
cons, err := nc.DumpConsensusState(ctx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
assert.NotEmpty(t, cons.RoundState) |
|
|
|
assert.Empty(t, cons.Peers) |
|
|
@ -151,30 +166,44 @@ func TestDumpConsensusState(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestConsensusState(t *testing.T) { |
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
// FIXME: fix server so it doesn't panic on invalid input
|
|
|
|
nc, ok := c.(client.NetworkClient) |
|
|
|
require.True(t, ok, "%d", i) |
|
|
|
cons, err := nc.ConsensusState(context.Background()) |
|
|
|
cons, err := nc.ConsensusState(ctx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
assert.NotEmpty(t, cons.RoundState) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
func TestHealth(t *testing.T) { |
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
nc, ok := c.(client.NetworkClient) |
|
|
|
require.True(t, ok, "%d", i) |
|
|
|
_, err := nc.Health(context.Background()) |
|
|
|
_, err := nc.Health(ctx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
func TestGenesisAndValidators(t *testing.T) { |
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
|
|
|
|
// make sure this is the right genesis file
|
|
|
|
gen, err := c.Genesis(context.Background()) |
|
|
|
gen, err := c.Genesis(ctx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
// get the genesis validator
|
|
|
|
require.Equal(t, 1, len(gen.Genesis.Validators)) |
|
|
@ -182,7 +211,7 @@ func TestGenesisAndValidators(t *testing.T) { |
|
|
|
|
|
|
|
// get the current validators
|
|
|
|
h := int64(1) |
|
|
|
vals, err := c.Validators(context.Background(), &h, nil, nil) |
|
|
|
vals, err := c.Validators(ctx, &h, nil, nil) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
require.Equal(t, 1, len(vals.Validators)) |
|
|
|
require.Equal(t, 1, vals.Count) |
|
|
@ -199,7 +228,9 @@ func TestGenesisChunked(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
for _, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
for _, c := range GetClients(t, n, conf) { |
|
|
|
first, err := c.GenesisChunked(ctx, 0) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
@ -221,17 +252,22 @@ func TestGenesisChunked(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestABCIQuery(t *testing.T) { |
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
// write something
|
|
|
|
k, v, tx := MakeTxKV() |
|
|
|
bres, err := c.BroadcastTxCommit(context.Background(), tx) |
|
|
|
bres, err := c.BroadcastTxCommit(ctx, tx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
apph := bres.Height + 1 // this is where the tx will be applied to the state
|
|
|
|
|
|
|
|
// wait before querying
|
|
|
|
err = client.WaitForHeight(c, apph, nil) |
|
|
|
require.NoError(t, err) |
|
|
|
res, err := c.ABCIQuery(context.Background(), "/key", k) |
|
|
|
res, err := c.ABCIQuery(ctx, "/key", k) |
|
|
|
qres := res.Response |
|
|
|
if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { |
|
|
|
assert.EqualValues(t, v, qres.Value) |
|
|
@ -241,10 +277,12 @@ func TestABCIQuery(t *testing.T) { |
|
|
|
|
|
|
|
// Make some app checks
|
|
|
|
func TestAppCalls(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(ctx) |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
|
|
|
|
// get an offset of height to avoid racing and guessing
|
|
|
|
s, err := c.Status(ctx) |
|
|
@ -339,21 +377,26 @@ func TestAppCalls(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestBlockchainInfo(t *testing.T) { |
|
|
|
for i, c := range GetClients(t, NodeSuite(t)) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
err := client.WaitForHeight(c, 10, nil) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
|
res, err := c.BlockchainInfo(context.Background(), 0, 0) |
|
|
|
res, err := c.BlockchainInfo(ctx, 0, 0) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
assert.True(t, res.LastHeight > 0) |
|
|
|
assert.True(t, len(res.BlockMetas) > 0) |
|
|
|
|
|
|
|
res, err = c.BlockchainInfo(context.Background(), 1, 1) |
|
|
|
res, err = c.BlockchainInfo(ctx, 1, 1) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
assert.True(t, res.LastHeight > 0) |
|
|
|
assert.True(t, len(res.BlockMetas) == 1) |
|
|
|
|
|
|
|
res, err = c.BlockchainInfo(context.Background(), 1, 10000) |
|
|
|
res, err = c.BlockchainInfo(ctx, 1, 10000) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
assert.True(t, res.LastHeight > 0) |
|
|
|
assert.True(t, len(res.BlockMetas) < 100) |
|
|
@ -361,7 +404,7 @@ func TestBlockchainInfo(t *testing.T) { |
|
|
|
assert.NotNil(t, m) |
|
|
|
} |
|
|
|
|
|
|
|
res, err = c.BlockchainInfo(context.Background(), 10000, 1) |
|
|
|
res, err = c.BlockchainInfo(ctx, 10000, 1) |
|
|
|
require.NotNil(t, err) |
|
|
|
assert.Nil(t, res) |
|
|
|
assert.Contains(t, err.Error(), "can't be greater than max") |
|
|
@ -369,15 +412,15 @@ func TestBlockchainInfo(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestBroadcastTxSync(t *testing.T) { |
|
|
|
n := NodeSuite(t) |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
// TODO (melekes): use mempool which is set on RPC rather than getting it from node
|
|
|
|
mempool := n.Mempool() |
|
|
|
mempool := getMempool(t, n) |
|
|
|
initMempoolSize := mempool.Size() |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n) { |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
_, _, tx := MakeTxKV() |
|
|
|
bres, err := c.BroadcastTxSync(ctx, tx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
@ -391,14 +434,23 @@ func TestBroadcastTxSync(t *testing.T) { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
func getMempool(t *testing.T, srv service.Service) mempl.Mempool { |
|
|
|
t.Helper() |
|
|
|
n, ok := srv.(interface { |
|
|
|
Mempool() mempl.Mempool |
|
|
|
}) |
|
|
|
require.True(t, ok) |
|
|
|
return n.Mempool() |
|
|
|
} |
|
|
|
|
|
|
|
func TestBroadcastTxCommit(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n := NodeSuite(t) |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
mempool := n.Mempool() |
|
|
|
for i, c := range GetClients(t, n) { |
|
|
|
mempool := getMempool(t, n) |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
_, _, tx := MakeTxKV() |
|
|
|
bres, err := c.BroadcastTxCommit(ctx, tx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
@ -410,12 +462,16 @@ func TestBroadcastTxCommit(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestUnconfirmedTxs(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
_, _, tx := MakeTxKV() |
|
|
|
ch := make(chan *abci.Response, 1) |
|
|
|
n := NodeSuite(t) |
|
|
|
mempool := n.Mempool() |
|
|
|
|
|
|
|
err := mempool.CheckTx(context.Background(), tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
mempool := getMempool(t, n) |
|
|
|
err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) |
|
|
|
|
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
|
// wait for tx to arrive in mempoool.
|
|
|
@ -425,10 +481,10 @@ func TestUnconfirmedTxs(t *testing.T) { |
|
|
|
t.Error("Timed out waiting for CheckTx callback") |
|
|
|
} |
|
|
|
|
|
|
|
for _, c := range GetClients(t, n) { |
|
|
|
for _, c := range GetClients(t, n, conf) { |
|
|
|
mc := c.(client.MempoolClient) |
|
|
|
limit := 1 |
|
|
|
res, err := mc.UnconfirmedTxs(context.Background(), &limit) |
|
|
|
res, err := mc.UnconfirmedTxs(ctx, &limit) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
|
assert.Equal(t, 1, res.Count) |
|
|
@ -441,12 +497,16 @@ func TestUnconfirmedTxs(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestNumUnconfirmedTxs(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
_, _, tx := MakeTxKV() |
|
|
|
n := NodeSuite(t) |
|
|
|
|
|
|
|
n, conf := NodeSuite(t) |
|
|
|
ch := make(chan *abci.Response, 1) |
|
|
|
mempool := n.Mempool() |
|
|
|
mempool := getMempool(t, n) |
|
|
|
|
|
|
|
err := mempool.CheckTx(context.Background(), tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) |
|
|
|
err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
|
// wait for tx to arrive in mempoool.
|
|
|
@ -457,10 +517,10 @@ func TestNumUnconfirmedTxs(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
mempoolSize := mempool.Size() |
|
|
|
for i, c := range GetClients(t, n) { |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
mc, ok := c.(client.MempoolClient) |
|
|
|
require.True(t, ok, "%d", i) |
|
|
|
res, err := mc.NumUnconfirmedTxs(context.Background()) |
|
|
|
res, err := mc.NumUnconfirmedTxs(ctx) |
|
|
|
require.Nil(t, err, "%d: %+v", i, err) |
|
|
|
|
|
|
|
assert.Equal(t, mempoolSize, res.Count) |
|
|
@ -472,12 +532,13 @@ func TestNumUnconfirmedTxs(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestCheckTx(t *testing.T) { |
|
|
|
n := NodeSuite(t) |
|
|
|
mempool := n.Mempool() |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
for _, c := range GetClients(t, n) { |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
mempool := getMempool(t, n) |
|
|
|
|
|
|
|
for _, c := range GetClients(t, n, conf) { |
|
|
|
_, _, tx := MakeTxKV() |
|
|
|
|
|
|
|
res, err := c.CheckTx(ctx, tx) |
|
|
@ -489,12 +550,13 @@ func TestCheckTx(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestTx(t *testing.T) { |
|
|
|
// first we broadcast a tx
|
|
|
|
n := NodeSuite(t) |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
|
|
|
|
c := getHTTPClient(t, n) |
|
|
|
c := getHTTPClient(t, conf) |
|
|
|
|
|
|
|
// first we broadcast a tx
|
|
|
|
_, _, tx := MakeTxKV() |
|
|
|
bres, err := c.BroadcastTxCommit(ctx, tx) |
|
|
|
require.Nil(t, err, "%+v", err) |
|
|
@ -518,7 +580,7 @@ func TestTx(t *testing.T) { |
|
|
|
{false, true, nil}, |
|
|
|
} |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n) { |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
for j, tc := range cases { |
|
|
|
t.Logf("client %d, case %d", i, j) |
|
|
|
|
|
|
@ -550,8 +612,8 @@ func TestTxSearchWithTimeout(t *testing.T) { |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
n := NodeSuite(t) |
|
|
|
timeoutClient := getHTTPClientWithTimeout(t, n, 10*time.Second) |
|
|
|
_, conf := NodeSuite(t) |
|
|
|
timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) |
|
|
|
|
|
|
|
_, _, tx := MakeTxKV() |
|
|
|
_, err := timeoutClient.BroadcastTxCommit(ctx, tx) |
|
|
@ -564,8 +626,8 @@ func TestTxSearchWithTimeout(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestTxSearch(t *testing.T) { |
|
|
|
n := NodeSuite(t) |
|
|
|
c := getHTTPClient(t, n) |
|
|
|
n, conf := NodeSuite(t) |
|
|
|
c := getHTTPClient(t, conf) |
|
|
|
|
|
|
|
// first we broadcast a few txs
|
|
|
|
for i := 0; i < 10; i++ { |
|
|
@ -584,7 +646,7 @@ func TestTxSearch(t *testing.T) { |
|
|
|
find := result.Txs[len(result.Txs)-1] |
|
|
|
anotherTxHash := types.Tx("a different tx").Hash() |
|
|
|
|
|
|
|
for i, c := range GetClients(t, n) { |
|
|
|
for i, c := range GetClients(t, n, conf) { |
|
|
|
t.Logf("client %d", i) |
|
|
|
|
|
|
|
// now we query for the tx.
|
|
|
@ -688,18 +750,22 @@ func TestTxSearch(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestBatchedJSONRPCCalls(t *testing.T) { |
|
|
|
c := getHTTPClient(t, NodeSuite(t)) |
|
|
|
testBatchedJSONRPCCalls(t, c) |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
_, conf := NodeSuite(t) |
|
|
|
c := getHTTPClient(t, conf) |
|
|
|
testBatchedJSONRPCCalls(ctx, t, c) |
|
|
|
} |
|
|
|
|
|
|
|
func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { |
|
|
|
func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) { |
|
|
|
k1, v1, tx1 := MakeTxKV() |
|
|
|
k2, v2, tx2 := MakeTxKV() |
|
|
|
|
|
|
|
batch := c.NewBatch() |
|
|
|
r1, err := batch.BroadcastTxCommit(context.Background(), tx1) |
|
|
|
r1, err := batch.BroadcastTxCommit(ctx, tx1) |
|
|
|
require.NoError(t, err) |
|
|
|
r2, err := batch.BroadcastTxCommit(context.Background(), tx2) |
|
|
|
r2, err := batch.BroadcastTxCommit(ctx, tx2) |
|
|
|
require.NoError(t, err) |
|
|
|
require.Equal(t, 2, batch.Count()) |
|
|
|
bresults, err := batch.Send(ctx) |
|
|
@ -718,9 +784,9 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { |
|
|
|
err = client.WaitForHeight(c, apph, nil) |
|
|
|
require.NoError(t, err) |
|
|
|
|
|
|
|
q1, err := batch.ABCIQuery(context.Background(), "/key", k1) |
|
|
|
q1, err := batch.ABCIQuery(ctx, "/key", k1) |
|
|
|
require.NoError(t, err) |
|
|
|
q2, err := batch.ABCIQuery(context.Background(), "/key", k2) |
|
|
|
q2, err := batch.ABCIQuery(ctx, "/key", k2) |
|
|
|
require.NoError(t, err) |
|
|
|
require.Equal(t, 2, batch.Count()) |
|
|
|
qresults, err := batch.Send(ctx) |
|
|
@ -742,14 +808,18 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestBatchedJSONRPCCallsCancellation(t *testing.T) { |
|
|
|
c := getHTTPClient(t, NodeSuite(t)) |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
_, conf := NodeSuite(t) |
|
|
|
c := getHTTPClient(t, conf) |
|
|
|
_, _, tx1 := MakeTxKV() |
|
|
|
_, _, tx2 := MakeTxKV() |
|
|
|
|
|
|
|
batch := c.NewBatch() |
|
|
|
_, err := batch.BroadcastTxCommit(context.Background(), tx1) |
|
|
|
_, err := batch.BroadcastTxCommit(ctx, tx1) |
|
|
|
require.NoError(t, err) |
|
|
|
_, err = batch.BroadcastTxCommit(context.Background(), tx2) |
|
|
|
_, err = batch.BroadcastTxCommit(ctx, tx2) |
|
|
|
require.NoError(t, err) |
|
|
|
// we should have 2 requests waiting
|
|
|
|
require.Equal(t, 2, batch.Count()) |
|
|
@ -760,27 +830,35 @@ func TestBatchedJSONRPCCallsCancellation(t *testing.T) { |
|
|
|
} |
|
|
|
|
|
|
|
func TestSendingEmptyRequestBatch(t *testing.T) { |
|
|
|
c := getHTTPClient(t, NodeSuite(t)) |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
_, conf := NodeSuite(t) |
|
|
|
c := getHTTPClient(t, conf) |
|
|
|
batch := c.NewBatch() |
|
|
|
_, err := batch.Send(ctx) |
|
|
|
require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") |
|
|
|
} |
|
|
|
|
|
|
|
func TestClearingEmptyRequestBatch(t *testing.T) { |
|
|
|
c := getHTTPClient(t, NodeSuite(t)) |
|
|
|
_, conf := NodeSuite(t) |
|
|
|
c := getHTTPClient(t, conf) |
|
|
|
batch := c.NewBatch() |
|
|
|
require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") |
|
|
|
} |
|
|
|
|
|
|
|
func TestConcurrentJSONRPCBatching(t *testing.T) { |
|
|
|
n := NodeSuite(t) |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
defer cancel() |
|
|
|
|
|
|
|
_, conf := NodeSuite(t) |
|
|
|
var wg sync.WaitGroup |
|
|
|
c := getHTTPClient(t, n) |
|
|
|
c := getHTTPClient(t, conf) |
|
|
|
for i := 0; i < 50; i++ { |
|
|
|
wg.Add(1) |
|
|
|
go func() { |
|
|
|
defer wg.Done() |
|
|
|
testBatchedJSONRPCCalls(t, c) |
|
|
|
testBatchedJSONRPCCalls(ctx, t, c) |
|
|
|
}() |
|
|
|
} |
|
|
|
wg.Wait() |
|
|
|