Browse Source

correct spelling to US english (#6077)

pull/6098/head
Callum Waters 3 years ago
committed by GitHub
parent
commit
162f67cf26
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 392 additions and 392 deletions
  1. +2
    -2
      CHANGELOG.md
  2. +1
    -1
      UPGRADING.md
  3. +1
    -1
      abci/types/result.go
  4. +4
    -4
      behavior/doc.go
  5. +49
    -0
      behavior/peer_behaviour.go
  6. +19
    -19
      behavior/reporter.go
  7. +205
    -0
      behavior/reporter_test.go
  8. +0
    -49
      behaviour/peer_behaviour.go
  9. +0
    -205
      behaviour/reporter_test.go
  10. +1
    -1
      blockchain/v2/processor_test.go
  11. +10
    -10
      blockchain/v2/reactor.go
  12. +3
    -3
      blockchain/v2/reactor_test.go
  13. +1
    -1
      blockchain/v2/scheduler_test.go
  14. +1
    -1
      cmd/tendermint/commands/init.go
  15. +1
    -1
      cmd/tendermint/commands/light.go
  16. +2
    -2
      consensus/replay.go
  17. +2
    -2
      consensus/replay_test.go
  18. +1
    -1
      consensus/state.go
  19. +1
    -1
      crypto/secp256k1/secp256k1.go
  20. +1
    -1
      crypto/xchacha20poly1305/xchachapoly_test.go
  21. +2
    -2
      docs/architecture/adr-020-block-size.md
  22. +5
    -5
      docs/architecture/adr-033-pubsub.md
  23. +3
    -3
      docs/architecture/adr-062-p2p-architecture.md
  24. +1
    -1
      libs/bytes/bytes.go
  25. +1
    -1
      libs/events/event_cache_test.go
  26. +1
    -1
      libs/pubsub/pubsub.go
  27. +9
    -9
      libs/pubsub/pubsub_test.go
  28. +10
    -10
      libs/pubsub/subscription.go
  29. +1
    -1
      light/store/db/db.go
  30. +2
    -2
      node/node_test.go
  31. +5
    -5
      p2p/netaddress.go
  32. +1
    -1
      p2p/pex/addrbook_test.go
  33. +1
    -1
      p2p/pex/pex_reactor.go
  34. +2
    -2
      p2p/router.go
  35. +1
    -1
      p2p/transport_mconn.go
  36. +3
    -3
      privval/socket_listeners.go
  37. +1
    -1
      rpc/client/interface.go
  38. +2
    -2
      rpc/client/local/local.go
  39. +2
    -2
      rpc/core/events.go
  40. +2
    -2
      rpc/core/mempool.go
  41. +4
    -4
      rpc/jsonrpc/client/decode.go
  42. +1
    -1
      rpc/jsonrpc/server/http_json_handler.go
  43. +1
    -1
      rpc/jsonrpc/server/ws_handler.go
  44. +3
    -3
      rpc/jsonrpc/types/types.go
  45. +2
    -2
      state/execution_test.go
  46. +1
    -1
      state/state_test.go
  47. +1
    -1
      state/txindex/indexer_service.go
  48. +2
    -2
      statesync/syncer.go
  49. +1
    -1
      test/app/counter_test.sh
  50. +1
    -1
      test/e2e/README.md
  51. +1
    -1
      test/e2e/app/state.go
  52. +2
    -2
      test/e2e/runner/load.go
  53. +1
    -1
      test/e2e/runner/main.go
  54. +2
    -2
      test/maverick/consensus/replay.go
  55. +1
    -1
      test/maverick/consensus/state.go
  56. +1
    -1
      types/event_bus.go
  57. +1
    -1
      types/event_bus_test.go
  58. +3
    -3
      types/genesis_test.go
  59. +1
    -1
      types/signable.go
  60. +1
    -1
      types/vote_set.go
  61. +1
    -1
      version/version.go

+ 2
- 2
CHANGELOG.md View File

@ -1937,7 +1937,7 @@ See [UPGRADING.md](UPGRADING.md) for more details.
- [build] [\#3085](https://github.com/tendermint/tendermint/issues/3085) Fix `Version` field in build scripts (@husio)
- [crypto/multisig] [\#3102](https://github.com/tendermint/tendermint/issues/3102) Fix multisig keys address length
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshalling into `crypto.PubKey` interface
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshaling into `crypto.PubKey` interface
- [p2p/conn] [\#3111](https://github.com/tendermint/tendermint/issues/3111) Make SecretConnection thread safe
- [rpc] [\#3053](https://github.com/tendermint/tendermint/issues/3053) Fix internal error in `/tx_search` when results are empty
(@gianfelipe93)
@ -2388,7 +2388,7 @@ FEATURES:
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
IMPROVEMENTS:
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialized (@bradyjoestar)
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted


+ 1
- 1
UPGRADING.md View File

@ -470,7 +470,7 @@ In this case, the WS client will receive an error with description:
"error": {
"code": -32000,
"msg": "Server error",
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
}
}


+ 1
- 1
abci/types/result.go View File

@ -42,7 +42,7 @@ func (r ResponseQuery) IsErr() bool {
}
//---------------------------------------------------------------------------
// override JSON marshalling so we emit defaults (ie. disable omitempty)
// override JSON marshaling so we emit defaults (ie. disable omitempty)
var (
jsonpbMarshaller = jsonpb.Marshaler{


behaviour/doc.go → behavior/doc.go View File


+ 49
- 0
behavior/peer_behaviour.go View File

@ -0,0 +1,49 @@
package behavior
import (
"github.com/tendermint/tendermint/p2p"
)
// PeerBehavior is a struct describing a behavior a peer performed.
// `peerID` identifies the peer and reason characterizes the specific
// behavior performed by the peer.
type PeerBehavior struct {
peerID p2p.NodeID
reason interface{}
}
type badMessage struct {
explanation string
}
// BadMessage returns a badMessage PeerBehavior.
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: badMessage{explanation}}
}
type messageOutOfOrder struct {
explanation string
}
// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior.
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}}
}
type consensusVote struct {
explanation string
}
// ConsensusVote returns a consensusVote PeerBehavior.
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}}
}
type blockPart struct {
explanation string
}
// BlockPart returns blockPart PeerBehavior.
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: blockPart{explanation}}
}

behaviour/reporter.go → behavior/reporter.go View File


+ 205
- 0
behavior/reporter_test.go View File

@ -0,0 +1,205 @@
package behavior_test
import (
"sync"
"testing"
bh "github.com/tendermint/tendermint/behavior"
"github.com/tendermint/tendermint/p2p"
)
// TestMockReporter tests the MockReporter's ability to store reported
// peer behavior in memory indexed by the peerID.
func TestMockReporter(t *testing.T) {
var peerID p2p.NodeID = "MockPeer"
pr := bh.NewMockReporter()
behaviors := pr.GetBehaviors(peerID)
if len(behaviors) != 0 {
t.Error("Expected to have no behaviors reported")
}
badMessage := bh.BadMessage(peerID, "bad message")
if err := pr.Report(badMessage); err != nil {
t.Error(err)
}
behaviors = pr.GetBehaviors(peerID)
if len(behaviors) != 1 {
t.Error("Expected the peer have one reported behavior")
}
if behaviors[0] != badMessage {
t.Error("Expected Bad Message to have been reported")
}
}
type scriptItem struct {
peerID p2p.NodeID
behavior bh.PeerBehavior
}
// equalBehaviors returns true if a and b contain the same PeerBehaviors with
// the same freequencies and otherwise false.
func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool {
aHistogram := map[bh.PeerBehavior]int{}
bHistogram := map[bh.PeerBehavior]int{}
for _, behavior := range a {
aHistogram[behavior]++
}
for _, behavior := range b {
bHistogram[behavior]++
}
if len(aHistogram) != len(bHistogram) {
return false
}
for _, behavior := range a {
if aHistogram[behavior] != bHistogram[behavior] {
return false
}
}
for _, behavior := range b {
if bHistogram[behavior] != aHistogram[behavior] {
return false
}
}
return true
}
// TestEqualPeerBehaviors tests that equalBehaviors can tell that two slices
// of peer behaviors can be compared for the behaviors they contain and the
// freequencies that those behaviors occur.
func TestEqualPeerBehaviors(t *testing.T) {
var (
peerID p2p.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
left []bh.PeerBehavior
right []bh.PeerBehavior
}{
// Empty sets
{[]bh.PeerBehavior{}, []bh.PeerBehavior{}},
// Single behaviors
{[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{consensusVote}},
// Equal Frequencies
{[]bh.PeerBehavior{consensusVote, consensusVote},
[]bh.PeerBehavior{consensusVote, consensusVote}},
// Equal frequencies different orders
{[]bh.PeerBehavior{consensusVote, blockPart},
[]bh.PeerBehavior{blockPart, consensusVote}},
}
unequals = []struct {
left []bh.PeerBehavior
right []bh.PeerBehavior
}{
// Comparing empty sets to non empty sets
{[]bh.PeerBehavior{}, []bh.PeerBehavior{consensusVote}},
// Different behaviors
{[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{blockPart}},
// Same behavior with different frequencies
{[]bh.PeerBehavior{consensusVote},
[]bh.PeerBehavior{consensusVote, consensusVote}},
}
)
for _, test := range equals {
if !equalBehaviors(test.left, test.right) {
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
}
}
for _, test := range unequals {
if equalBehaviors(test.left, test.right) {
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
}
}
}
// TestPeerBehaviorConcurrency constructs a scenario in which
// multiple goroutines are using the same MockReporter instance.
// This test reproduces the conditions in which MockReporter will
// be used within a Reactor `Receive` method tests to ensure thread safety.
func TestMockPeerBehaviorReporterConcurrency(t *testing.T) {
var (
behaviorScript = []struct {
peerID p2p.NodeID
behaviors []bh.PeerBehavior
}{
{"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}},
{"2", []bh.PeerBehavior{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
{
"3",
[]bh.PeerBehavior{bh.BlockPart("3", ""),
bh.ConsensusVote("3", ""),
bh.BlockPart("3", ""),
bh.ConsensusVote("3", "")}},
{
"4",
[]bh.PeerBehavior{bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", "")}},
{
"5",
[]bh.PeerBehavior{bh.BlockPart("5", ""),
bh.ConsensusVote("5", ""),
bh.BlockPart("5", ""),
bh.ConsensusVote("5", "")}},
}
)
var receiveWg sync.WaitGroup
pr := bh.NewMockReporter()
scriptItems := make(chan scriptItem)
done := make(chan int)
numConsumers := 3
for i := 0; i < numConsumers; i++ {
receiveWg.Add(1)
go func() {
defer receiveWg.Done()
for {
select {
case pb := <-scriptItems:
if err := pr.Report(pb.behavior); err != nil {
t.Error(err)
}
case <-done:
return
}
}
}()
}
var sendingWg sync.WaitGroup
sendingWg.Add(1)
go func() {
defer sendingWg.Done()
for _, item := range behaviorScript {
for _, reason := range item.behaviors {
scriptItems <- scriptItem{item.peerID, reason}
}
}
}()
sendingWg.Wait()
for i := 0; i < numConsumers; i++ {
done <- 1
}
receiveWg.Wait()
for _, items := range behaviorScript {
reported := pr.GetBehaviors(items.peerID)
if !equalBehaviors(reported, items.behaviors) {
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
items.peerID, items.behaviors, reported)
}
}
}

+ 0
- 49
behaviour/peer_behaviour.go View File

@ -1,49 +0,0 @@
package behaviour
import (
"github.com/tendermint/tendermint/p2p"
)
// PeerBehaviour is a struct describing a behaviour a peer performed.
// `peerID` identifies the peer and reason characterizes the specific
// behaviour performed by the peer.
type PeerBehaviour struct {
peerID p2p.NodeID
reason interface{}
}
type badMessage struct {
explanation string
}
// BadMessage returns a badMessage PeerBehaviour.
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
}
type messageOutOfOrder struct {
explanation string
}
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
}
type consensusVote struct {
explanation string
}
// ConsensusVote returns a consensusVote PeerBehaviour.
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
}
type blockPart struct {
explanation string
}
// BlockPart returns blockPart PeerBehaviour.
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
}

+ 0
- 205
behaviour/reporter_test.go View File

@ -1,205 +0,0 @@
package behaviour_test
import (
"sync"
"testing"
bh "github.com/tendermint/tendermint/behaviour"
"github.com/tendermint/tendermint/p2p"
)
// TestMockReporter tests the MockReporter's ability to store reported
// peer behaviour in memory indexed by the peerID.
func TestMockReporter(t *testing.T) {
var peerID p2p.NodeID = "MockPeer"
pr := bh.NewMockReporter()
behaviours := pr.GetBehaviours(peerID)
if len(behaviours) != 0 {
t.Error("Expected to have no behaviours reported")
}
badMessage := bh.BadMessage(peerID, "bad message")
if err := pr.Report(badMessage); err != nil {
t.Error(err)
}
behaviours = pr.GetBehaviours(peerID)
if len(behaviours) != 1 {
t.Error("Expected the peer have one reported behaviour")
}
if behaviours[0] != badMessage {
t.Error("Expected Bad Message to have been reported")
}
}
type scriptItem struct {
peerID p2p.NodeID
behaviour bh.PeerBehaviour
}
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
// the same freequencies and otherwise false.
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
aHistogram := map[bh.PeerBehaviour]int{}
bHistogram := map[bh.PeerBehaviour]int{}
for _, behaviour := range a {
aHistogram[behaviour]++
}
for _, behaviour := range b {
bHistogram[behaviour]++
}
if len(aHistogram) != len(bHistogram) {
return false
}
for _, behaviour := range a {
if aHistogram[behaviour] != bHistogram[behaviour] {
return false
}
}
for _, behaviour := range b {
if bHistogram[behaviour] != aHistogram[behaviour] {
return false
}
}
return true
}
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
// of peer behaviours can be compared for the behaviours they contain and the
// freequencies that those behaviours occur.
func TestEqualPeerBehaviours(t *testing.T) {
var (
peerID p2p.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
left []bh.PeerBehaviour
right []bh.PeerBehaviour
}{
// Empty sets
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
// Single behaviours
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
// Equal Frequencies
{[]bh.PeerBehaviour{consensusVote, consensusVote},
[]bh.PeerBehaviour{consensusVote, consensusVote}},
// Equal frequencies different orders
{[]bh.PeerBehaviour{consensusVote, blockPart},
[]bh.PeerBehaviour{blockPart, consensusVote}},
}
unequals = []struct {
left []bh.PeerBehaviour
right []bh.PeerBehaviour
}{
// Comparing empty sets to non empty sets
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
// Different behaviours
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
// Same behaviour with different frequencies
{[]bh.PeerBehaviour{consensusVote},
[]bh.PeerBehaviour{consensusVote, consensusVote}},
}
)
for _, test := range equals {
if !equalBehaviours(test.left, test.right) {
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
}
}
for _, test := range unequals {
if equalBehaviours(test.left, test.right) {
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
}
}
}
// TestPeerBehaviourConcurrency constructs a scenario in which
// multiple goroutines are using the same MockReporter instance.
// This test reproduces the conditions in which MockReporter will
// be used within a Reactor `Receive` method tests to ensure thread safety.
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
var (
behaviourScript = []struct {
peerID p2p.NodeID
behaviours []bh.PeerBehaviour
}{
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
{
"3",
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
bh.ConsensusVote("3", ""),
bh.BlockPart("3", ""),
bh.ConsensusVote("3", "")}},
{
"4",
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", "")}},
{
"5",
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
bh.ConsensusVote("5", ""),
bh.BlockPart("5", ""),
bh.ConsensusVote("5", "")}},
}
)
var receiveWg sync.WaitGroup
pr := bh.NewMockReporter()
scriptItems := make(chan scriptItem)
done := make(chan int)
numConsumers := 3
for i := 0; i < numConsumers; i++ {
receiveWg.Add(1)
go func() {
defer receiveWg.Done()
for {
select {
case pb := <-scriptItems:
if err := pr.Report(pb.behaviour); err != nil {
t.Error(err)
}
case <-done:
return
}
}
}()
}
var sendingWg sync.WaitGroup
sendingWg.Add(1)
go func() {
defer sendingWg.Done()
for _, item := range behaviourScript {
for _, reason := range item.behaviours {
scriptItems <- scriptItem{item.peerID, reason}
}
}
}()
sendingWg.Wait()
for i := 0; i < numConsumers; i++ {
done <- 1
}
receiveWg.Wait()
for _, items := range behaviourScript {
reported := pr.GetBehaviours(items.peerID)
if !equalBehaviours(reported, items.behaviours) {
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
items.peerID, items.behaviours, reported)
}
}
}

+ 1
- 1
blockchain/v2/processor_test.go View File

@ -82,7 +82,7 @@ func executeProcessorTests(t *testing.T, tests []testFields) {
}
}()
// First step must always initialise the currentState as state.
// First step must always initialize the currentState as state.
if step.currentState != nil {
state = makeState(step.currentState)
}


+ 10
- 10
blockchain/v2/reactor.go View File

@ -7,7 +7,7 @@ import (
proto "github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
"github.com/tendermint/tendermint/behavior"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/libs/sync"
@ -44,7 +44,7 @@ type BlockchainReactor struct {
syncHeight int64
events chan Event // non-nil during a fast sync
reporter behaviour.Reporter
reporter behavior.Reporter
io iIO
store blockStore
}
@ -54,7 +54,7 @@ type blockApplier interface {
}
// XXX: unify naming in this package around tmState
func newReactor(state state.State, store blockStore, reporter behaviour.Reporter,
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
initHeight := state.LastBlockHeight + 1
if initHeight == 1 {
@ -82,7 +82,7 @@ func NewBlockchainReactor(
blockApplier blockApplier,
store blockStore,
fastSync bool) *BlockchainReactor {
reporter := behaviour.NewMockReporter()
reporter := behavior.NewMockReporter()
return newReactor(state, store, reporter, blockApplier, fastSync)
}
@ -126,7 +126,7 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
// Start implements cmn.Service interface
func (r *BlockchainReactor) Start() error {
r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch)
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
if r.fastSync {
err := r.startSync(nil)
if err != nil {
@ -136,7 +136,7 @@ func (r *BlockchainReactor) Start() error {
return nil
}
// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil,
// startSync begins a fast sync, signaled by r.events being non-nil. If state is non-nil,
// the scheduler and processor is updated with this state on startup.
func (r *BlockchainReactor) startSync(state *state.State) error {
r.mtx.Lock()
@ -376,7 +376,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
r.processor.send(event)
case scPeerError:
r.processor.send(event)
if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil {
if err := r.reporter.Report(behavior.BadMessage(event.peerID, "scPeerError")); err != nil {
r.logger.Error("Error reporting peer", "err", err)
}
case scBlockRequest:
@ -472,13 +472,13 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
if err := proto.Unmarshal(msgBytes, msgProto); err != nil {
logger.Error("error decoding message", "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
return
}
if err := msgProto.Validate(); err != nil {
logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
return
}
@ -518,7 +518,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
if err != nil {
logger.Error("error transitioning block from protobuf", "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
return
}
r.mtx.RLock()


+ 3
- 3
blockchain/v2/reactor_test.go View File

@ -15,7 +15,7 @@ import (
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/behaviour"
"github.com/tendermint/tendermint/behavior"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
@ -151,7 +151,7 @@ type testReactorParams struct {
func newTestReactor(p testReactorParams) *BlockchainReactor {
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
reporter := behaviour.NewMockReporter()
reporter := behavior.NewMockReporter()
var appl blockApplier
@ -308,7 +308,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
// t.Run(tt.name, func(t *testing.T) {
// reactor := newTestReactor(params)
// reactor.Start()
// reactor.reporter = behaviour.NewMockReporter()
// reactor.reporter = behavior.NewMockReporter()
// mockSwitch := &mockSwitchIo{switchedToConsensus: false}
// reactor.io = mockSwitch
// // time for go routines to start


+ 1
- 1
blockchain/v2/scheduler_test.go View File

@ -2230,7 +2230,7 @@ func TestScHandle(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
var sc *scheduler
for i, step := range tt.steps {
// First step must always initialise the currentState as state.
// First step must always initialize the currentState as state.
if step.currentSc != nil {
sc = newTestScheduler(*step.currentSc)
}


+ 1
- 1
cmd/tendermint/commands/init.go View File

@ -14,7 +14,7 @@ import (
tmtime "github.com/tendermint/tendermint/types/time"
)
// InitFilesCmd initialises a fresh Tendermint Core instance.
// InitFilesCmd initializes a fresh Tendermint Core instance.
var InitFilesCmd = &cobra.Command{
Use: "init",
Short: "Initialize Tendermint",


+ 1
- 1
cmd/tendermint/commands/light.go View File

@ -104,7 +104,7 @@ func init() {
}
func runProxy(cmd *cobra.Command, args []string) error {
// Initialise logger.
// Initialize logger.
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
var option log.Option
if verbose {


+ 2
- 2
consensus/replay.go View File

@ -55,8 +55,8 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
}
case <-newStepSub.Cancelled():
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was cancelled")
case <-newStepSub.Canceled():
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled")
case <-ticker:
return fmt.Errorf("failed to read off newStepSub.Out()")
}


+ 2
- 2
consensus/replay_test.go View File

@ -97,8 +97,8 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
require.NoError(t, err)
select {
case <-newBlockSub.Out():
case <-newBlockSub.Cancelled():
t.Fatal("newBlockSub was cancelled")
case <-newBlockSub.Canceled():
t.Fatal("newBlockSub was canceled")
case <-time.After(120 * time.Second):
t.Fatal("Timed out waiting for new block (see trace above)")
}


+ 1
- 1
consensus/state.go View File

@ -1934,7 +1934,7 @@ func (cs *State) addVote(
}
// Height mismatch is ignored.
// Not necessarily a bad peer, but not favourable behaviour.
// Not necessarily a bad peer, but not favorable behavior.
if vote.Height != cs.Height {
cs.Logger.Debug("vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "peerID", peerID)
return


+ 1
- 1
crypto/secp256k1/secp256k1.go View File

@ -152,7 +152,7 @@ func (pubKey PubKey) Address() crypto.Address {
return crypto.Address(hasherRIPEMD160.Sum(nil))
}
// Bytes returns the pubkey marshalled with amino encoding.
// Bytes returns the pubkey marshaled with amino encoding.
func (pubKey PubKey) Bytes() []byte {
return []byte(pubKey)
}


+ 1
- 1
crypto/xchacha20poly1305/xchachapoly_test.go View File

@ -83,7 +83,7 @@ func TestRandom(t *testing.T) {
}
}
// AFOREMENTIONED LICENCE
// AFOREMENTIONED LICENSE
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without


+ 2
- 2
docs/architecture/adr-020-block-size.md View File

@ -12,7 +12,7 @@
## Context
We currently use MaxTxs to reap txs from the mempool when proposing a block,
but enforce MaxBytes when unmarshalling a block, so we could easily propose a
but enforce MaxBytes when unmarshaling a block, so we could easily propose a
block thats too large to be valid.
We should just remove MaxTxs all together and stick with MaxBytes, and have a
@ -33,7 +33,7 @@ MaxBytes provides a clear limit on the total size of a block that requires no
additional calculation if you want to use it to bound resource usage, and there
has been considerable discussions about optimizing tendermint around 1MB blocks.
Regardless, we need some maximum on the size of a block so we can avoid
unmarshalling blocks that are too big during the consensus, and it seems more
unmarshaling blocks that are too big during the consensus, and it seems more
straightforward to provide a single fixed number for this rather than a
computation of "MaxDataBytes + everything else you need to make room for
(signatures, evidence, header)". MaxBytes provides a simple bound so we can


+ 5
- 5
docs/architecture/adr-033-pubsub.md View File

@ -121,7 +121,7 @@ type Subscription struct {
}
func (s *Subscription) Out() <-chan MsgAndTags
func (s *Subscription) Cancelled() <-chan struct{}
func (s *Subscription) Canceled() <-chan struct{}
func (s *Subscription) Err() error
```
@ -129,10 +129,10 @@ func (s *Subscription) Err() error
`Unsubscribe`/`UnsubscribeAll` does not close the channel to avoid clients from
receiving a nil message.
`Cancelled()` returns a channel that's closed when the subscription is terminated
`Canceled()` returns a channel that's closed when the subscription is terminated
and supposed to be used in a select statement.
If the channel returned by `Cancelled()` is not closed yet, `Err()` returns nil.
If the channel returned by `Canceled()` is not closed yet, `Err()` returns nil.
If the channel is closed, `Err()` returns a non-nil error explaining why:
`ErrUnsubscribed` if the subscriber choose to unsubscribe,
`ErrOutOfCapacity` if the subscriber is not pulling messages fast enough and the channel returned by `Out()` became full.
@ -147,7 +147,7 @@ for {
select {
case msgAndTags <- subscription.Out():
// ...
case <-subscription.Cancelled():
case <-subscription.Canceled():
return subscription.Err()
}
```
@ -232,7 +232,7 @@ In review
- more idiomatic interface
- subscribers know what tags msg was published with
- subscribers aware of the reason their subscription was cancelled
- subscribers aware of the reason their subscription was canceled
### Negative


+ 3
- 3
docs/architecture/adr-062-p2p-architecture.md View File

@ -314,7 +314,7 @@ type Channel struct {
In <-chan Envelope // Inbound messages (peers to reactors).
Out chan<- Envelope // outbound messages (reactors to peers)
Error chan<- PeerError // Peer error reporting.
messageType proto.Message // Channel's message type, for e.g. unmarshalling.
messageType proto.Message // Channel's message type, for e.g. unmarshaling.
}
// Close closes the channel, also closing Out and Error.
@ -529,7 +529,7 @@ func RunEchoReactor(router *p2p.Router, peerManager *p2p.PeerManager) error {
}
// EchoReactor provides an echo service, pinging all known peers until the given
// context is cancelled.
// context is canceled.
func EchoReactor(ctx context.Context, channel *p2p.Channel, peerUpdates *p2p.PeerUpdates) error {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
@ -567,7 +567,7 @@ func EchoReactor(ctx context.Context, channel *p2p.Channel, peerUpdates *p2p.Pee
case peerUpdate := <-peerUpdates:
fmt.Printf("Peer %q changed status to %q", peerUpdate.PeerID, peerUpdate.Status)
// Exit when context is cancelled.
// Exit when context is canceled.
case <-ctx.Done():
return nil
}


+ 1
- 1
libs/bytes/bytes.go View File

@ -58,7 +58,7 @@ func (bz *HexBytes) UnmarshalJSON(data []byte) error {
return nil
}
// Bytes fulfils various interfaces in light-client, etc...
// Bytes fulfills various interfaces in light-client, etc...
func (bz HexBytes) Bytes() []byte {
return bz
}


+ 1
- 1
libs/events/event_cache_test.go View File

@ -13,7 +13,7 @@ func TestEventCache_Flush(t *testing.T) {
require.NoError(t, err)
err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) {
// Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache
// Check we are not initializing an empty buffer full of zeroed eventInfos in the EventCache
require.FailNow(t, "We should never receive a message on this switch since none are fired")
})
require.NoError(t, err)


+ 1
- 1
libs/pubsub/pubsub.go View File

@ -27,7 +27,7 @@
// select {
// case msg <- subscription.Out():
// // handle msg.Data() and msg.Events()
// case <-subscription.Cancelled():
// case <-subscription.Canceled():
// return subscription.Err()
// }
// }


+ 9
- 9
libs/pubsub/pubsub_test.go View File

@ -59,7 +59,7 @@ func TestSubscribe(t *testing.T) {
select {
case <-published:
assertReceive(t, "Quicksilver", subscription.Out())
assertCancelled(t, subscription, pubsub.ErrOutOfCapacity)
assertCanceled(t, subscription, pubsub.ErrOutOfCapacity)
case <-time.After(3 * time.Second):
t.Fatal("Expected Publish(Asylum) not to block")
}
@ -146,7 +146,7 @@ func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) {
err = s.Publish(ctx, "Viper")
require.NoError(t, err)
assertCancelled(t, subscription, pubsub.ErrOutOfCapacity)
assertCanceled(t, subscription, pubsub.ErrOutOfCapacity)
}
func TestDifferentClients(t *testing.T) {
@ -298,7 +298,7 @@ func TestUnsubscribe(t *testing.T) {
require.NoError(t, err)
assert.Zero(t, len(subscription.Out()), "Should not receive anything after Unsubscribe")
assertCancelled(t, subscription, pubsub.ErrUnsubscribed)
assertCanceled(t, subscription, pubsub.ErrUnsubscribed)
}
func TestClientUnsubscribesTwice(t *testing.T) {
@ -373,8 +373,8 @@ func TestUnsubscribeAll(t *testing.T) {
assert.Zero(t, len(subscription1.Out()), "Should not receive anything after UnsubscribeAll")
assert.Zero(t, len(subscription2.Out()), "Should not receive anything after UnsubscribeAll")
assertCancelled(t, subscription1, pubsub.ErrUnsubscribed)
assertCancelled(t, subscription2, pubsub.ErrUnsubscribed)
assertCanceled(t, subscription1, pubsub.ErrUnsubscribed)
assertCanceled(t, subscription2, pubsub.ErrUnsubscribed)
}
func TestBufferCapacity(t *testing.T) {
@ -431,7 +431,7 @@ func benchmarkNClients(n int, b *testing.B) {
select {
case <-subscription.Out():
continue
case <-subscription.Cancelled():
case <-subscription.Canceled():
return
}
}
@ -472,7 +472,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
select {
case <-subscription.Out():
continue
case <-subscription.Cancelled():
case <-subscription.Canceled():
return
}
}
@ -500,8 +500,8 @@ func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message,
}
}
func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) {
_, ok := <-subscription.Cancelled()
func assertCanceled(t *testing.T, subscription *pubsub.Subscription, err error) {
_, ok := <-subscription.Canceled()
assert.False(t, ok)
assert.Equal(t, err, subscription.Err())
}

+ 10
- 10
libs/pubsub/subscription.go View File

@ -23,16 +23,16 @@ var (
type Subscription struct {
out chan Message
cancelled chan struct{}
mtx tmsync.RWMutex
err error
canceled chan struct{}
mtx tmsync.RWMutex
err error
}
// NewSubscription returns a new subscription with the given outCapacity.
func NewSubscription(outCapacity int) *Subscription {
return &Subscription{
out: make(chan Message, outCapacity),
cancelled: make(chan struct{}),
out: make(chan Message, outCapacity),
canceled: make(chan struct{}),
}
}
@ -43,13 +43,13 @@ func (s *Subscription) Out() <-chan Message {
return s.out
}
// Cancelled returns a channel that's closed when the subscription is
// Canceled returns a channel that's closed when the subscription is
// terminated and supposed to be used in a select statement.
func (s *Subscription) Cancelled() <-chan struct{} {
return s.cancelled
func (s *Subscription) Canceled() <-chan struct{} {
return s.canceled
}
// Err returns nil if the channel returned by Cancelled is not yet closed.
// Err returns nil if the channel returned by Canceled is not yet closed.
// If the channel is closed, Err returns a non-nil error explaining why:
// - ErrUnsubscribed if the subscriber choose to unsubscribe,
// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough
@ -66,7 +66,7 @@ func (s *Subscription) cancel(err error) {
s.mtx.Lock()
s.err = err
s.mtx.Unlock()
close(s.cancelled)
close(s.canceled)
}
// Message glues data and events together.


+ 1
- 1
light/store/db/db.go View File

@ -57,7 +57,7 @@ func (s *dbs) SaveLightBlock(lb *types.LightBlock) error {
lbBz, err := lbpb.Marshal()
if err != nil {
return fmt.Errorf("marshalling LightBlock: %w", err)
return fmt.Errorf("marshaling LightBlock: %w", err)
}
s.mtx.Lock()


+ 2
- 2
node/node_test.go View File

@ -51,8 +51,8 @@ func TestNodeStartStop(t *testing.T) {
require.NoError(t, err)
select {
case <-blocksSub.Out():
case <-blocksSub.Cancelled():
t.Fatal("blocksSub was cancelled")
case <-blocksSub.Canceled():
t.Fatal("blocksSub was canceled")
case <-time.After(10 * time.Second):
t.Fatal("timed out waiting for the node to produce a block")
}


+ 5
- 5
p2p/netaddress.go View File

@ -332,10 +332,10 @@ func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
}
return Default
default: /* ipv6 */
var tunnelled bool
// Is our v6 is tunnelled?
var tunneled bool
// Is our v6 is tunneled?
if o.RFC3964() || o.RFC6052() || o.RFC6145() {
tunnelled = true
tunneled = true
}
switch {
case !o.Routable():
@ -344,8 +344,8 @@ func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
return Teredo
case o.IP.To4() != nil:
return Ipv4
case tunnelled:
// only prioritise ipv6 if we aren't tunnelling it.
case tunneled:
// only prioritize ipv6 if we aren't tunneling it.
return Ipv6Weak
}
return Ipv6Strong


+ 1
- 1
p2p/pex/addrbook_test.go View File

@ -741,7 +741,7 @@ func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nO
return
}
// Analyse the layout of the selection specified by 'addrs'
// Analyze the layout of the selection specified by 'addrs'
// Returns:
// - seqLens - the lengths of the sequences of addresses of same type
// - seqTypes - the types of sequences in selection


+ 1
- 1
p2p/pex/pex_reactor.go View File

@ -492,7 +492,7 @@ func (r *Reactor) ensurePeers() {
}
// TODO: consider moving some checks from toDial into here
// so we don't even consider dialing peers that we want to wait
// before dialling again, or have dialed too many times already
// before dialing again, or have dialed too many times already
r.Logger.Info("Will dial address", "addr", try)
toDial[try.ID] = try
}


+ 2
- 2
p2p/router.go View File

@ -58,7 +58,7 @@ type Channel struct {
Out chan<- Envelope // outbound messages (reactors to peers)
Error chan<- PeerError // peer error reporting
messageType proto.Message // the channel's message type, used for unmarshalling
messageType proto.Message // the channel's message type, used for unmarshaling
closeCh chan struct{}
closeOnce sync.Once
}
@ -767,7 +767,7 @@ func (r *Router) OnStop() {
}
}
// stopCtx returns a new context that is cancelled when the router stops.
// stopCtx returns a new context that is canceled when the router stops.
func (r *Router) stopCtx() context.Context {
ctx, cancel := context.WithCancel(context.Background())
go func() {


+ 1
- 1
p2p/transport_mconn.go View File

@ -254,7 +254,7 @@ func (c *mConnConnection) Handshake(
)
// To handle context cancellation, we need to do the handshake in a
// goroutine and abort the blocking network calls by closing the connection
// when the context is cancelled.
// when the context is canceled.
go func() {
// FIXME: Since the MConnection code panics, we need to recover it and turn it
// into an error. We should remove panics instead.


+ 3
- 3
privval/socket_listeners.go View File

@ -38,7 +38,7 @@ func TCPListenerTimeoutReadWrite(timeout time.Duration) TCPListenerOption {
// tcpListener implements net.Listener.
var _ net.Listener = (*TCPListener)(nil)
// TCPListener wraps a *net.TCPListener to standardise protocol timeouts
// TCPListener wraps a *net.TCPListener to standardize protocol timeouts
// and potentially other tuning parameters. It also returns encrypted connections.
type TCPListener struct {
*net.TCPListener
@ -103,7 +103,7 @@ func UnixListenerTimeoutReadWrite(timeout time.Duration) UnixListenerOption {
return func(ul *UnixListener) { ul.timeoutReadWrite = timeout }
}
// UnixListener wraps a *net.UnixListener to standardise protocol timeouts
// UnixListener wraps a *net.UnixListener to standardize protocol timeouts
// and potentially other tuning parameters. It returns unencrypted connections.
type UnixListener struct {
*net.UnixListener
@ -150,7 +150,7 @@ func (ln *UnixListener) Accept() (net.Conn, error) {
// timeoutConn implements net.Conn.
var _ net.Conn = (*timeoutConn)(nil)
// timeoutConn wraps a net.Conn to standardise protocol timeouts / deadline resets.
// timeoutConn wraps a net.Conn to standardize protocol timeouts / deadline resets.
type timeoutConn struct {
net.Conn
timeout time.Duration


+ 1
- 1
rpc/client/interface.go View File

@ -120,7 +120,7 @@ type MempoolClient interface {
}
// EvidenceClient is used for submitting an evidence of the malicious
// behaviour.
// behavior.
type EvidenceClient interface {
BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
}


+ 2
- 2
rpc/client/local/local.go View File

@ -241,12 +241,12 @@ func (c *Local) eventsRoutine(
c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query)
}
}
case <-sub.Cancelled():
case <-sub.Canceled():
if sub.Err() == tmpubsub.ErrUnsubscribed {
return
}
c.Logger.Error("subscription was cancelled, resubscribing...", "err", sub.Err(), "query", q.String())
c.Logger.Error("subscription was canceled, resubscribing...", "err", sub.Err(), "query", q.String())
sub = c.resubscribe(subscriber, q)
if sub == nil { // client was stopped
return


+ 2
- 2
rpc/core/events.go View File

@ -58,7 +58,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er
env.Logger.Info("Can't write response (slow client)",
"to", addr, "subscriptionID", subscriptionID, "err", err)
}
case <-sub.Cancelled():
case <-sub.Canceled():
if sub.Err() != tmpubsub.ErrUnsubscribed {
var reason string
if sub.Err() == nil {
@ -67,7 +67,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er
reason = sub.Err().Error()
}
var (
err = fmt.Errorf("subscription was cancelled (reason: %s)", reason)
err = fmt.Errorf("subscription was canceled (reason: %s)", reason)
resp = rpctypes.RPCServerError(subscriptionID, err)
)
if ok := ctx.WSConn.TryWriteRPCResponse(resp); !ok {


+ 2
- 2
rpc/core/mempool.go View File

@ -106,14 +106,14 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc
Hash: tx.Hash(),
Height: deliverTxRes.Height,
}, nil
case <-deliverTxSub.Cancelled():
case <-deliverTxSub.Canceled():
var reason string
if deliverTxSub.Err() == nil {
reason = "Tendermint exited"
} else {
reason = deliverTxSub.Err().Error()
}
err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason)
err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason)
env.Logger.Error("Error on broadcastTxCommit", "err", err)
return &ctypes.ResultBroadcastTxCommit{
CheckTx: *checkTxRes,


+ 4
- 4
rpc/jsonrpc/client/decode.go View File

@ -19,7 +19,7 @@ func unmarshalResponseBytes(
// into the correct type.
response := &types.RPCResponse{}
if err := json.Unmarshal(responseBytes, response); err != nil {
return nil, fmt.Errorf("error unmarshalling: %w", err)
return nil, fmt.Errorf("error unmarshaling: %w", err)
}
if response.Error != nil {
@ -32,7 +32,7 @@ func unmarshalResponseBytes(
// Unmarshal the RawMessage into the result.
if err := tmjson.Unmarshal(response.Result, result); err != nil {
return nil, fmt.Errorf("error unmarshalling result: %w", err)
return nil, fmt.Errorf("error unmarshaling result: %w", err)
}
return result, nil
@ -49,7 +49,7 @@ func unmarshalResponseBytesArray(
)
if err := json.Unmarshal(responseBytes, &responses); err != nil {
return nil, fmt.Errorf("error unmarshalling: %w", err)
return nil, fmt.Errorf("error unmarshaling: %w", err)
}
// No response error checking here as there may be a mixture of successful
@ -78,7 +78,7 @@ func unmarshalResponseBytesArray(
for i := 0; i < len(responses); i++ {
if err := tmjson.Unmarshal(responses[i].Result, results[i]); err != nil {
return nil, fmt.Errorf("error unmarshalling #%d result: %w", i, err)
return nil, fmt.Errorf("error unmarshaling #%d result: %w", i, err)
}
}


+ 1
- 1
rpc/jsonrpc/server/http_json_handler.go View File

@ -52,7 +52,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
w,
http.StatusInternalServerError,
types.RPCParseError(
fmt.Errorf("error unmarshalling request: %w", err),
fmt.Errorf("error unmarshaling request: %w", err),
),
)
return


+ 1
- 1
rpc/jsonrpc/server/ws_handler.go View File

@ -49,7 +49,7 @@ func NewWebsocketManager(
CheckOrigin: func(r *http.Request) bool {
// TODO ???
//
// The default behaviour would be relevant to browser-based clients,
// The default behavior would be relevant to browser-based clients,
// afaik. I suppose having a pass-through is a workaround for allowing
// for more complex security schemes, shifting the burden of
// AuthN/AuthZ outside the Tendermint RPC.


+ 3
- 3
rpc/jsonrpc/types/types.go View File

@ -55,7 +55,7 @@ type RPCRequest struct {
Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{}
}
// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int
// UnmarshalJSON custom JSON unmarshaling due to jsonrpcid being string or int
func (req *RPCRequest) UnmarshalJSON(data []byte) error {
unsafeReq := &struct {
JSONRPC string `json:"jsonrpc"`
@ -154,7 +154,7 @@ type RPCResponse struct {
Error *RPCError `json:"error,omitempty"`
}
// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int
// UnmarshalJSON custom JSON unmarshaling due to jsonrpcid being string or int
func (resp *RPCResponse) UnmarshalJSON(data []byte) error {
unsafeResp := &struct {
JSONRPC string `json:"jsonrpc"`
@ -187,7 +187,7 @@ func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse {
var js []byte
js, err := tmjson.Marshal(res)
if err != nil {
return RPCInternalError(id, fmt.Errorf("error marshalling response: %w", err))
return RPCInternalError(id, fmt.Errorf("error marshaling response: %w", err))
}
rawMsg = json.RawMessage(js)
}


+ 2
- 2
state/execution_test.go View File

@ -405,8 +405,8 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey)
assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower)
}
case <-updatesSub.Cancelled():
t.Fatalf("updatesSub was cancelled (reason: %v)", updatesSub.Err())
case <-updatesSub.Canceled():
t.Fatalf("updatesSub was canceled (reason: %v)", updatesSub.Err())
case <-time.After(1 * time.Second):
t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.")
}


+ 1
- 1
state/state_test.go View File

@ -40,7 +40,7 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
return tearDown, stateDB, state
}
// TestStateCopy tests the correct copying behaviour of State.
// TestStateCopy tests the correct copying behavior of State.
func TestStateCopy(t *testing.T) {
tearDown, _, state := setupTestCase(t)
defer tearDown(t)


+ 1
- 1
state/txindex/indexer_service.go View File

@ -32,7 +32,7 @@ func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService
// and indexing them by events.
func (is *IndexerService) OnStart() error {
// Use SubscribeUnbuffered here to ensure both subscriptions does not get
// cancelled due to not pulling messages fast enough. Cause this might
// canceled due to not pulling messages fast enough. Cause this might
// sometimes happen when there are no other subscribers.
blockHeadersSub, err := is.eventBus.SubscribeUnbuffered(


+ 2
- 2
statesync/syncer.go View File

@ -245,7 +245,7 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types.
return sm.State{}, nil, err
}
// Spawn chunk fetchers. They will terminate when the chunk queue is closed or context cancelled.
// Spawn chunk fetchers. They will terminate when the chunk queue is closed or context canceled.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i := int32(0); i < chunkFetchers; i++ {
@ -385,7 +385,7 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch
for {
index, err := chunks.Allocate()
if err == errDone {
// Keep checking until the context is cancelled (restore is done), in case any
// Keep checking until the context is canceled (restore is done), in case any
// chunks need to be refetched.
select {
case <-ctx.Done():


+ 1
- 1
test/app/counter_test.sh View File

@ -29,7 +29,7 @@ function getCode() {
else
# protobuf auto adds `omitempty` to everything so code OK and empty data/log
# will not even show when marshalled into json
# apparently we can use github.com/golang/protobuf/jsonpb to do the marshalling ...
# apparently we can use github.com/golang/protobuf/jsonpb to do the marshaling ...
echo 0
fi
}


+ 1
- 1
test/e2e/README.md View File

@ -72,7 +72,7 @@ The test runner has the following stages, which can also be executed explicitly
* `logs`: outputs all node logs.
* `tail`: tails (follows) node logs until cancelled.
* `tail`: tails (follows) node logs until canceled.
## Tests


+ 1
- 1
test/e2e/app/state.go View File

@ -19,7 +19,7 @@ type State struct {
Values map[string]string
Hash []byte
// private fields aren't marshalled to disk.
// private fields aren't marshaled to disk.
file string
persistInterval uint64
initialHeight uint64


+ 2
- 2
test/e2e/runner/load.go View File

@ -14,7 +14,7 @@ import (
)
// Load generates transactions against the network until the given
// context is cancelled.
// context is canceled.
func Load(ctx context.Context, testnet *e2e.Testnet) error {
// Since transactions are executed across all nodes in the network, we need
// to reduce transaction load for larger networks to avoid using too much
@ -64,7 +64,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error {
}
}
// loadGenerate generates jobs until the context is cancelled
// loadGenerate generates jobs until the context is canceled
func loadGenerate(ctx context.Context, chTx chan<- types.Tx) {
for i := 0; i < math.MaxInt64; i++ {
// We keep generating the same 1000 keys over and over, with different values.


+ 1
- 1
test/e2e/runner/main.go View File

@ -171,7 +171,7 @@ func NewCLI() *CLI {
cli.root.AddCommand(&cobra.Command{
Use: "load",
Short: "Generates transaction load until the command is cancelled",
Short: "Generates transaction load until the command is canceled",
RunE: func(cmd *cobra.Command, args []string) error {
return Load(context.Background(), cli.testnet)
},


+ 2
- 2
test/maverick/consensus/replay.go View File

@ -56,8 +56,8 @@ func (cs *State) readReplayMessage(msg *tmcon.TimedWALMessage, newStepSub types.
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
}
case <-newStepSub.Cancelled():
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was cancelled")
case <-newStepSub.Canceled():
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled")
case <-ticker:
return fmt.Errorf("failed to read off newStepSub.Out()")
}


+ 1
- 1
test/maverick/consensus/state.go View File

@ -387,7 +387,7 @@ func (cs *State) addVote(
}
// Height mismatch is ignored.
// Not necessarily a bad peer, but not favourable behaviour.
// Not necessarily a bad peer, but not favorable behavior.
if vote.Height != cs.Height {
cs.Logger.Debug("vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "peerID", peerID)
return


+ 1
- 1
types/event_bus.go View File

@ -23,7 +23,7 @@ type EventBusSubscriber interface {
type Subscription interface {
Out() <-chan tmpubsub.Message
Cancelled() <-chan struct{}
Canceled() <-chan struct{}
Err() error
}


+ 1
- 1
types/event_bus_test.go View File

@ -438,7 +438,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes
for {
select {
case <-sub.Out():
case <-sub.Cancelled():
case <-sub.Canceled():
return
}
}


+ 3
- 3
types/genesis_test.go View File

@ -84,7 +84,7 @@ func TestGenesisGood(t *testing.T) {
Validators: []GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}},
}
genDocBytes, err = tmjson.Marshal(baseGenDoc)
assert.NoError(t, err, "error marshalling genDoc")
assert.NoError(t, err, "error marshaling genDoc")
// test base gendoc and check consensus params were filled
genDoc, err := GenesisDocFromJSON(genDocBytes)
@ -96,14 +96,14 @@ func TestGenesisGood(t *testing.T) {
// create json with consensus params filled
genDocBytes, err = tmjson.Marshal(genDoc)
assert.NoError(t, err, "error marshalling genDoc")
assert.NoError(t, err, "error marshaling genDoc")
genDoc, err = GenesisDocFromJSON(genDocBytes)
assert.NoError(t, err, "expected no error for valid genDoc json")
// test with invalid consensus params
genDoc.ConsensusParams.Block.MaxBytes = 0
genDocBytes, err = tmjson.Marshal(genDoc)
assert.NoError(t, err, "error marshalling genDoc")
assert.NoError(t, err, "error marshaling genDoc")
_, err = GenesisDocFromJSON(genDocBytes)
assert.Error(t, err, "expected error for genDoc json with block size of 0")


+ 1
- 1
types/signable.go View File

@ -17,7 +17,7 @@ var (
// SignBytes returns the bytes to be signed
// NOTE: chainIDs are part of the SignBytes but not
// necessarily the object themselves.
// NOTE: Expected to panic if there is an error marshalling.
// NOTE: Expected to panic if there is an error marshaling.
type Signable interface {
SignBytes(chainID string) []byte
}

+ 1
- 1
types/vote_set.go View File

@ -498,7 +498,7 @@ func (voteSet *VoteSet) MarshalJSON() ([]byte, error) {
}
// More human readable JSON of the vote set
// NOTE: insufficient for unmarshalling from (compressed votes)
// NOTE: insufficient for unmarshaling from (compressed votes)
// TODO: make the peerMaj23s nicer to read (eg just the block hash)
type VoteSetJSON struct {
Votes []string `json:"votes"`


+ 1
- 1
version/version.go View File

@ -16,7 +16,7 @@ const (
)
var (
// P2PProtocol versions all p2p behaviour and msgs.
// P2PProtocol versions all p2p behavior and msgs.
// This includes proposer selection.
P2PProtocol uint64 = 8


Loading…
Cancel
Save