Browse Source

Merge pull request from GHSA-v24h-pjjv-mcp6

v0.32.10: advisory fix
rc1/v0.32.12 v0.32.10
Alessio Treglia 4 years ago
committed by GitHub
parent
commit
eab4d6d82b
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 156 additions and 10 deletions
  1. +61
    -0
      CHANGELOG.md
  2. +1
    -1
      CHANGELOG_PENDING.md
  3. +1
    -1
      consensus/reactor.go
  4. +1
    -1
      evidence/reactor.go
  5. +8
    -3
      mempool/reactor.go
  6. +18
    -0
      mempool/reactor_test.go
  7. +5
    -0
      node/node.go
  8. +1
    -1
      p2p/pex/pex_reactor.go
  9. +14
    -2
      p2p/transport.go
  10. +45
    -0
      p2p/transport_test.go
  11. +1
    -1
      version/version.go

+ 61
- 0
CHANGELOG.md View File

@ -1,5 +1,66 @@
# Changelog
## v0.32.10
*April 6, 2020*
This security release fixes:
### Denial of Service 1
Tendermint 0.33.2 and earlier does not limit the number of P2P connection
requests. For each p2p connection, Tendermint allocates ~0.5MB. Even though
this memory is garbage collected once the connection is terminated (due to
duplicate IP or reaching a maximum number of inbound peers), temporary memory
spikes can lead to OOM (Out-Of-Memory) exceptions.
Tendermint 0.33.3 (and 0.32.10) limits the total number of P2P incoming
connection requests to to `p2p.max_num_inbound_peers +
len(p2p.unconditional_peer_ids)`.
Notes:
- Tendermint does not rate limit P2P connection requests per IP (an attacker
can saturate all the inbound slots);
- Tendermint does not rate limit HTTP(S) requests. If you expose any RPC
endpoints to the public, please make sure to put in place some protection
(https://www.nginx.com/blog/rate-limiting-nginx/). We may implement this in
the future ([\#1696](https://github.com/tendermint/tendermint/issues/1696)).
### Denial of Service 2
Tendermint 0.33.2 and earlier does not reclaim `activeID` of a peer after it's
removed in `Mempool` reactor. This does not happen all the time. It only
happens when a connection fails (for any reason) before the Peer is created and
added to all reactors. `RemovePeer` is therefore called before `AddPeer`, which
leads to always growing memory (`activeIDs` map). The `activeIDs` map has a
maximum size of 65535 and the node will panic if this map reaches the maximum.
An attacker can create a lot of connection attempts (exploiting Denial of
Service 1), which ultimately will lead to the node panicking.
Tendermint 0.33.3 (and 0.32.10) claims `activeID` for a peer in `InitPeer`,
which is executed before `MConnection` is started.
Notes:
- `InitPeer` function was added to all reactors to combat a similar issue -
[\#3338](https://github.com/tendermint/tendermint/issues/3338);
- Denial of Service 2 is independent of Denial of Service 1 and can be executed
without it.
**All clients are recommended to upgrade**
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
- [p2p] Limit the number of incoming connections (@melekes)
## v0.32.9
_January, 9, 2020_


+ 1
- 1
CHANGELOG_PENDING.md View File

@ -1,4 +1,4 @@
## v0.32.10
## v0.32.11
\*\*


+ 1
- 1
consensus/reactor.go View File

@ -57,7 +57,7 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options
metrics: NopMetrics(),
}
conR.updateFastSyncingMetric()
conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR)
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
for _, option := range options {
option(conR)


+ 1
- 1
evidence/reactor.go View File

@ -34,7 +34,7 @@ func NewEvidenceReactor(evpool *EvidencePool) *EvidenceReactor {
evR := &EvidenceReactor{
evpool: evpool,
}
evR.BaseReactor = *p2p.NewBaseReactor("EvidenceReactor", evR)
evR.BaseReactor = *p2p.NewBaseReactor("Evidence", evR)
return evR
}


+ 8
- 3
mempool/reactor.go View File

@ -47,7 +47,7 @@ type mempoolIDs struct {
activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
}
// Reserve searches for the next unused ID and assignes it to the
// Reserve searches for the next unused ID and assigns it to the
// peer.
func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
ids.mtx.Lock()
@ -110,10 +110,16 @@ func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
mempool: mempool,
ids: newMempoolIDs(),
}
memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR)
memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
return memR
}
// InitPeer implements Reactor by creating a state for the peer.
func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
memR.ids.ReserveForPeer(peer)
return peer
}
// SetLogger sets the Logger on the reactor and the underlying mempool.
func (memR *Reactor) SetLogger(l log.Logger) {
memR.Logger = l
@ -142,7 +148,6 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
// AddPeer implements Reactor.
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
func (memR *Reactor) AddPeer(peer p2p.Peer) {
memR.ids.ReserveForPeer(peer)
go memR.broadcastTxRoutine(peer)
}


+ 18
- 0
mempool/reactor_test.go View File

@ -223,3 +223,21 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
ids.ReserveForPeer(peer)
})
}
func TestDontExhaustMaxActiveIDs(t *testing.T) {
config := cfg.TestConfig()
const N = 1
reactors := makeAndConnectReactors(config, N)
defer func() {
for _, r := range reactors {
r.Stop()
}
}()
reactor := reactors[0]
for i := 0; i < maxActiveIDs+1; i++ {
peer := mock.NewPeer(nil)
reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
reactor.AddPeer(peer)
}
}

+ 5
- 0
node/node.go View File

@ -467,6 +467,11 @@ func createTransport(
}
p2p.MultiplexTransportConnFilters(connFilters...)(transport)
// Limit the number of incoming connections.
max := config.P2P.MaxNumInboundPeers
p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
return transport, peerFilters
}


+ 1
- 1
p2p/pex/pex_reactor.go View File

@ -130,7 +130,7 @@ func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor {
lastReceivedRequests: cmn.NewCMap(),
crawlPeerInfos: make(map[p2p.ID]crawlPeerInfo),
}
r.BaseReactor = *p2p.NewBaseReactor("PEXReactor", r)
r.BaseReactor = *p2p.NewBaseReactor("PEX", r)
return r
}


+ 14
- 2
p2p/transport.go View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/pkg/errors"
"golang.org/x/net/netutil"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/p2p/conn"
@ -122,11 +123,18 @@ func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption {
return func(mt *MultiplexTransport) { mt.resolver = resolver }
}
// MultiplexTransportMaxIncomingConnections sets the maximum number of
// simultaneous connections (incoming). Default: 0 (unlimited)
func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption {
return func(mt *MultiplexTransport) { mt.maxIncomingConnections = n }
}
// MultiplexTransport accepts and dials tcp connections and upgrades them to
// multiplexed peers.
type MultiplexTransport struct {
netAddr NetAddress
listener net.Listener
netAddr NetAddress
listener net.Listener
maxIncomingConnections int // see MaxIncomingConnections
acceptc chan accept
closec chan struct{}
@ -240,6 +248,10 @@ func (mt *MultiplexTransport) Listen(addr NetAddress) error {
return err
}
if mt.maxIncomingConnections > 0 {
ln = netutil.LimitListener(ln, mt.maxIncomingConnections)
}
mt.netAddr = addr
mt.listener = ln


+ 45
- 0
p2p/transport_test.go View File

@ -5,6 +5,7 @@ import (
"math/rand"
"net"
"reflect"
"strings"
"testing"
"time"
@ -134,6 +135,50 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
}
}
func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
mt := newMultiplexTransport(
emptyNodeInfo(),
NodeKey{
PrivKey: ed25519.GenPrivKey(),
},
)
id := mt.nodeKey.ID()
MultiplexTransportMaxIncomingConnections(0)(mt)
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
if err != nil {
t.Fatal(err)
}
if err := mt.Listen(*addr); err != nil {
t.Fatal(err)
}
errc := make(chan error)
go func() {
addr := NewNetAddress(id, mt.listener.Addr())
_, err := addr.Dial()
if err != nil {
errc <- err
return
}
close(errc)
}()
if err := <-errc; err != nil {
t.Errorf("connection failed: %v", err)
}
_, err = mt.Accept(peerConfig{})
if err == nil || !strings.Contains(err.Error(), "connection reset by peer") {
t.Errorf("expected connection reset by peer error, got %v", err)
}
}
func TestTransportMultiplexAcceptMultiple(t *testing.T) {
mt := testSetupMultiplexTransport(t)
laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())


+ 1
- 1
version/version.go View File

@ -21,7 +21,7 @@ const (
// XXX: Don't change the name of this variable or you will break
// automation :)
TMCoreSemVer = "0.32.9"
TMCoreSemVer = "0.32.10"
// ABCISemVer is the semantic version of the ABCI library
ABCISemVer = "0.16.1"


Loading…
Cancel
Save