|
@ -9,6 +9,7 @@ import ( |
|
|
"time" |
|
|
"time" |
|
|
|
|
|
|
|
|
cfg "github.com/tendermint/tendermint/config" |
|
|
cfg "github.com/tendermint/tendermint/config" |
|
|
|
|
|
"github.com/tendermint/tendermint/internal/libs/clist" |
|
|
tmsync "github.com/tendermint/tendermint/internal/libs/sync" |
|
|
tmsync "github.com/tendermint/tendermint/internal/libs/sync" |
|
|
"github.com/tendermint/tendermint/internal/mempool" |
|
|
"github.com/tendermint/tendermint/internal/mempool" |
|
|
"github.com/tendermint/tendermint/internal/p2p" |
|
|
"github.com/tendermint/tendermint/internal/p2p" |
|
@ -306,7 +307,7 @@ func (r *Reactor) processPeerUpdates() { |
|
|
|
|
|
|
|
|
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { |
|
|
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { |
|
|
peerMempoolID := r.ids.GetForPeer(peerID) |
|
|
peerMempoolID := r.ids.GetForPeer(peerID) |
|
|
var memTx *WrappedTx |
|
|
|
|
|
|
|
|
var nextGossipTx *clist.CElement |
|
|
|
|
|
|
|
|
// remove the peer ID from the map of routines and mark the waitgroup as done
|
|
|
// remove the peer ID from the map of routines and mark the waitgroup as done
|
|
|
defer func() { |
|
|
defer func() { |
|
@ -333,10 +334,10 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) |
|
|
// This happens because the CElement we were looking at got garbage
|
|
|
// This happens because the CElement we were looking at got garbage
|
|
|
// collected (removed). That is, .NextWait() returned nil. Go ahead and
|
|
|
// collected (removed). That is, .NextWait() returned nil. Go ahead and
|
|
|
// start from the beginning.
|
|
|
// start from the beginning.
|
|
|
if memTx == nil { |
|
|
|
|
|
|
|
|
if nextGossipTx == nil { |
|
|
select { |
|
|
select { |
|
|
case <-r.mempool.WaitForNextTx(): // wait until a tx is available
|
|
|
case <-r.mempool.WaitForNextTx(): // wait until a tx is available
|
|
|
if memTx = r.mempool.NextGossipTx(); memTx == nil { |
|
|
|
|
|
|
|
|
if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil { |
|
|
continue |
|
|
continue |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -352,6 +353,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
memTx := nextGossipTx.Value.(*WrappedTx) |
|
|
|
|
|
|
|
|
if r.peerMgr != nil { |
|
|
if r.peerMgr != nil { |
|
|
height := r.peerMgr.GetHeight(peerID) |
|
|
height := r.peerMgr.GetHeight(peerID) |
|
|
if height > 0 && height < memTx.height-1 { |
|
|
if height > 0 && height < memTx.height-1 { |
|
@ -380,16 +383,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
select { |
|
|
select { |
|
|
case <-memTx.gossipEl.NextWaitChan(): |
|
|
|
|
|
// If there is a next element in gossip index, we point memTx to that node's
|
|
|
|
|
|
// value, otherwise we reset memTx to nil which will be checked at the
|
|
|
|
|
|
// parent for loop.
|
|
|
|
|
|
next := memTx.gossipEl.Next() |
|
|
|
|
|
if next != nil { |
|
|
|
|
|
memTx = next.Value.(*WrappedTx) |
|
|
|
|
|
} else { |
|
|
|
|
|
memTx = nil |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
case <-nextGossipTx.NextWaitChan(): |
|
|
|
|
|
nextGossipTx = nextGossipTx.Next() |
|
|
|
|
|
|
|
|
case <-closer.Done(): |
|
|
case <-closer.Done(): |
|
|
// The peer is marked for removal via a PeerUpdate as the doneCh was
|
|
|
// The peer is marked for removal via a PeerUpdate as the doneCh was
|
|
|