Merge pull request #6397 from filecoin-project/fix/some-flaky-tests

Fix some flaky tests
This commit is contained in:
Łukasz Magiera 2021-06-04 22:00:29 +02:00 committed by GitHub
commit ba654285f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 98 additions and 40 deletions

View File

@ -159,7 +159,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
build.Clock.Sleep(blocktime)
}
checkMiner := func(ma address.Address, power abi.StoragePower, active bool, tsk types.TipSetKey) {
checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) {
p, err := client.StateMinerPower(ctx, ma, tsk)
require.NoError(t, err)
@ -174,6 +174,22 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
act, err := mst.DeadlineCronActive()
require.NoError(t, err)
if tsk != types.EmptyTSK {
ts, err := client.ChainGetTipSet(ctx, tsk)
require.NoError(t, err)
di, err := mst.DeadlineInfo(ts.Height())
require.NoError(t, err)
// cron happened on the same epoch some other condition would have happened
if di.Open == ts.Height() {
act, err := mst.DeadlineCronActive()
require.NoError(t, err)
require.Equal(t, activeIfCron, act)
return
}
}
require.Equal(t, active, act)
}
@ -181,7 +197,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
{
uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK)
require.NoError(t, err)
checkMiner(maddrB, types.NewInt(0), true, uts.Key())
checkMiner(maddrB, types.NewInt(0), true, true, uts.Key())
}
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
@ -197,19 +213,19 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
require.NoError(t, err)
// first round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), false, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK)
checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
// pledge sectors on minerB/minerD, stop post on minerC
pledgeSectors(t, ctx, minerB, sectersB, 0, nil)
checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
pledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
checkMiner(maddrD, types.NewInt(0), true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
@ -259,7 +275,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
build.Clock.Sleep(blocktime)
}
checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
checkMiner(maddrE, types.NewInt(0), true, true, types.EmptyTSK)
// go through rest of the PP
for {
@ -274,11 +290,11 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
}
// second round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectersB), true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, types.EmptyTSK)
checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectersB), true, true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
// disable post on minerB
minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
@ -329,7 +345,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
require.NoError(t, err)
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
checkMiner(maddrD, types.NewInt(0), true, r.TipSet)
// assert inactive if the message landed in the tipset we run cron in
checkMiner(maddrD, types.NewInt(0), true, false, r.TipSet)
}
// go through another PP
@ -345,8 +362,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
}
// third round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
}

View File

@ -39,25 +39,29 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
}
bstate := ts.ParentState()
bheight := ts.Height()
pts, err := sm.cs.LoadTipSet(ts.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
}
pheight := pts.Height()
// If we have to run an expensive migration, and we're not at genesis,
// return an error because the migration will take too long.
//
// We allow this at height 0 for at-genesis migrations (for testing).
if bheight-1 > 0 && sm.hasExpensiveFork(ctx, bheight-1) {
if pheight > 0 && sm.hasExpensiveFork(ctx, pheight) {
return nil, ErrExpensiveFork
}
// Run the (not expensive) migration.
bstate, err := sm.handleStateForks(ctx, bstate, bheight-1, nil, ts)
bstate, err = sm.handleStateForks(ctx, bstate, pheight, nil, ts)
if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
vmopt := &vm.VMOpts{
StateBase: bstate,
Epoch: bheight,
Epoch: pheight + 1,
Rand: store.NewChainRand(sm.cs, ts.Cids()),
Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(),
@ -155,6 +159,11 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
return nil, xerrors.Errorf("computing tipset state: %w", err)
}
state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts)
if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
r := store.NewChainRand(sm.cs, ts.Cids())
if span.IsRecordingEvents() {
@ -167,7 +176,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
vmopt := &vm.VMOpts{
StateBase: state,
Epoch: ts.Height(),
Epoch: ts.Height() + 1,
Rand: r,
Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(),

View File

@ -433,13 +433,19 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) {
tu.t.Fatal(err)
}
// TODO: some sort of timeout?
for n := range hc {
timeout := time.After(5 * time.Second)
for {
select {
case n := <-hc:
for _, c := range n {
if c.Val.Equals(target) {
return
}
}
case <-timeout:
tu.t.Fatal("waitUntilSyncTarget timeout")
}
}
}
@ -575,15 +581,20 @@ func TestSyncFork(t *testing.T) {
tu.loadChainToNode(p1)
tu.loadChainToNode(p2)
phead := func() {
printHead := func() {
h1, err := tu.nds[1].ChainHead(tu.ctx)
require.NoError(tu.t, err)
h2, err := tu.nds[2].ChainHead(tu.ctx)
require.NoError(tu.t, err)
fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height())
fmt.Println("Node 2: ", h2.Cids(), h1.Parents(), h2.Height())
w1, err := tu.nds[1].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h1)
require.NoError(tu.t, err)
w2, err := tu.nds[2].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h2)
require.NoError(tu.t, err)
fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height(), w1)
fmt.Println("Node 2: ", h2.Cids(), h2.Parents(), h2.Height(), w2)
//time.Sleep(time.Second * 2)
fmt.Println()
fmt.Println()
@ -591,7 +602,7 @@ func TestSyncFork(t *testing.T) {
fmt.Println()
}
phead()
printHead()
base := tu.g.CurTipset
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
@ -611,6 +622,8 @@ func TestSyncFork(t *testing.T) {
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
printHead()
// Now for the fun part!!
require.NoError(t, tu.mn.LinkAll())
@ -618,7 +631,7 @@ func TestSyncFork(t *testing.T) {
tu.waitUntilSyncTarget(p1, b.TipSet())
tu.waitUntilSyncTarget(p2, b.TipSet())
phead()
printHead()
}
// This test crafts a tipset with 2 blocks, A and B.

View File

@ -229,7 +229,7 @@ func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparat
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
var err error
var res []byte
if rt.vm.GetNtwkVersion(rt.ctx, randEpoch) >= network.Version13 {
if randEpoch > build.UpgradeHyperdriveHeight {
res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy)
} else {
res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy)

View File

@ -34,7 +34,9 @@ type SectorMgr struct {
lk sync.Mutex
}
type mockVerifProver struct{}
type mockVerifProver struct {
aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies
}
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
sectors := make(map[abi.SectorID]*sectorState)
@ -522,7 +524,19 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
}
}
return bytes.Equal(aggregate.Proof, out), nil
ok := bytes.Equal(aggregate.Proof, out)
if !ok {
genInfo, found := m.aggregates[string(aggregate.Proof)]
if !found {
log.Errorf("BAD AGGREGATE: saved generate inputs not found; agg.Proof: %x; expected: %x", aggregate.Proof, out)
} else {
log.Errorf("BAD AGGREGATE (1): agg.Proof: %x; expected: %x", aggregate.Proof, out)
log.Errorf("BAD AGGREGATE (2): Verify Infos: %+v", aggregate.Infos)
log.Errorf("BAD AGGREGATE (3): Generate Infos: %+v", genInfo.Infos)
}
}
return ok, nil
}
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
@ -533,6 +547,8 @@ func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealV
}
}
m.aggregates[string(out)] = aggregateInfo
return out, nil
}
@ -592,8 +608,11 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context,
return []uint64{0}, nil
}
var MockVerifier = mockVerifProver{}
var MockProver = mockVerifProver{}
var MockVerifier = mockVerifProver{
aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{},
}
var MockProver = MockVerifier
var _ storage.Sealer = &SectorMgr{}
var _ ffiwrapper.Verifier = MockVerifier