From 42ed5d75a5fd412ec57b7ac0e5ab68ba2a2a68ed Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 5 Oct 2021 10:49:26 -0400 Subject: [PATCH] consensus: wait until peerUpdates channel is closed to close remaining peers (#7058) (#7060) The race occurred as a result of a goroutine launched by `processPeerUpdate` racing with the `OnStop` method. The `processPeerUpdates` goroutine deletes from the map as `OnStop` is reading from it. This change updates the `OnStop` method to wait for the peer updates channel to be done before closing the peers. It also copies the map contents to a new map so that it will not conflict with the view of the map that the goroutine created in `processPeerUpdate` sees. --- internal/consensus/reactor.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index ca3f5d353..22bd7576e 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -230,16 +230,14 @@ func (r *Reactor) OnStop() { } r.mtx.Lock() - peers := r.peers - r.mtx.Unlock() - - // wait for all spawned peer goroutines to gracefully exit - for _, ps := range peers { - ps.closer.Close() - } - for _, ps := range peers { - ps.broadcastWG.Wait() + // Close and wait for each of the peers to shutdown. + // This is safe to perform with the lock since none of the peers require the + // lock to complete any of the methods that the waitgroup is waiting on. + for _, state := range r.peers { + state.closer.Close() + state.broadcastWG.Wait() } + r.mtx.Unlock() // Close the StateChannel goroutine separately since it uses its own channel // to signal closure.