Browse Source

statesync: ensure test network properly configured (#7026)

This test reliably gets hung up on network configuration, (which may
be a real issue,) but it's network setup is handcranked and we should
ensure that the test focuses on it's core assertions and doesn't fail for 
test architecture reasons.
pull/7030/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
23fe6fd2f9
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 37 additions and 8 deletions
  1. +13
    -0
      internal/statesync/dispatcher.go
  2. +5
    -3
      internal/statesync/reactor.go
  3. +19
    -5
      internal/statesync/reactor_test.go

+ 13
- 0
internal/statesync/dispatcher.go View File

@ -297,3 +297,16 @@ func (l *peerList) All() []types.NodeID {
defer l.mtx.Unlock()
return l.peers
}
func (l *peerList) Contains(id types.NodeID) bool {
l.mtx.Lock()
defer l.mtx.Unlock()
for _, p := range l.peers {
if id == p {
return true
}
}
return false
}

+ 5
- 3
internal/statesync/reactor.go View File

@ -254,11 +254,11 @@ func (r *Reactor) OnStop() {
// Wait for all p2p Channels to be closed before returning. This ensures we
// can easily reason about synchronization of all p2p Channels and ensure no
// panics will occur.
<-r.peerUpdates.Done()
<-r.snapshotCh.Done()
<-r.chunkCh.Done()
<-r.blockCh.Done()
<-r.paramsCh.Done()
<-r.peerUpdates.Done()
}
// Sync runs a state sync, fetching snapshots and providing chunks to the
@ -1013,9 +1013,11 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error {
iter++
select {
case <-ctx.Done():
return fmt.Errorf("operation canceled while waiting for peers after %s", time.Since(startAt))
return fmt.Errorf("operation canceled while waiting for peers after %.2fs [%d/%d]",
time.Since(startAt).Seconds(), r.peers.Len(), numPeers)
case <-r.closeCh:
return fmt.Errorf("shutdown while waiting for peers after %s", time.Since(startAt))
return fmt.Errorf("shutdown while waiting for peers after %.2fs [%d/%d]",
time.Since(startAt).Seconds(), r.peers.Len(), numPeers)
case <-t.C:
continue
case <-logT.C:


+ 19
- 5
internal/statesync/reactor_test.go View File

@ -525,25 +525,39 @@ func TestReactor_StateProviderP2P(t *testing.T) {
rts.reactor.cfg.UseP2P = true
rts.reactor.cfg.TrustHeight = 1
rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash())
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
for _, p := range []types.NodeID{peerA, peerB} {
if !rts.reactor.peers.Contains(p) {
rts.reactor.peers.Append(p)
}
}
require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured")
bctx, cancel := context.WithCancel(context.Background())
defer cancel()
ictx, cancel := context.WithTimeout(bctx, time.Second)
defer cancel()
rts.reactor.mtx.Lock()
err := rts.reactor.initStateProvider(ctx, factory.DefaultTestChainID, 1)
err := rts.reactor.initStateProvider(ictx, factory.DefaultTestChainID, 1)
rts.reactor.mtx.Unlock()
require.NoError(t, err)
rts.reactor.syncer.stateProvider = rts.reactor.stateProvider
appHash, err := rts.reactor.stateProvider.AppHash(ctx, 5)
actx, cancel := context.WithTimeout(bctx, 10*time.Second)
defer cancel()
appHash, err := rts.reactor.stateProvider.AppHash(actx, 5)
require.NoError(t, err)
require.Len(t, appHash, 32)
state, err := rts.reactor.stateProvider.State(ctx, 5)
state, err := rts.reactor.stateProvider.State(actx, 5)
require.NoError(t, err)
require.Equal(t, appHash, state.AppHash)
require.Equal(t, types.DefaultConsensusParams(), &state.ConsensusParams)
commit, err := rts.reactor.stateProvider.Commit(ctx, 5)
commit, err := rts.reactor.stateProvider.Commit(actx, 5)
require.NoError(t, err)
require.Equal(t, commit.BlockID, state.LastBlockID)


Loading…
Cancel
Save