Merge pull request #6397 from filecoin-project/fix/some-flaky-tests
Fix some flaky tests
This commit is contained in:
commit
ba654285f0
@ -159,7 +159,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
checkMiner := func(ma address.Address, power abi.StoragePower, active bool, tsk types.TipSetKey) {
|
checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) {
|
||||||
p, err := client.StateMinerPower(ctx, ma, tsk)
|
p, err := client.StateMinerPower(ctx, ma, tsk)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -174,6 +174,22 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
|
|
||||||
act, err := mst.DeadlineCronActive()
|
act, err := mst.DeadlineCronActive()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if tsk != types.EmptyTSK {
|
||||||
|
ts, err := client.ChainGetTipSet(ctx, tsk)
|
||||||
|
require.NoError(t, err)
|
||||||
|
di, err := mst.DeadlineInfo(ts.Height())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// cron happened on the same epoch some other condition would have happened
|
||||||
|
if di.Open == ts.Height() {
|
||||||
|
act, err := mst.DeadlineCronActive()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, activeIfCron, act)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
require.Equal(t, active, act)
|
require.Equal(t, active, act)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,7 +197,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
{
|
{
|
||||||
uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK)
|
uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkMiner(maddrB, types.NewInt(0), true, uts.Key())
|
checkMiner(maddrB, types.NewInt(0), true, true, uts.Key())
|
||||||
}
|
}
|
||||||
|
|
||||||
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
|
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||||
@ -197,19 +213,19 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// first round of miner checks
|
// first round of miner checks
|
||||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK)
|
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, types.EmptyTSK)
|
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
|
||||||
|
|
||||||
checkMiner(maddrB, types.NewInt(0), false, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
|
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
|
|
||||||
// pledge sectors on minerB/minerD, stop post on minerC
|
// pledge sectors on minerB/minerD, stop post on minerC
|
||||||
pledgeSectors(t, ctx, minerB, sectersB, 0, nil)
|
pledgeSectors(t, ctx, minerB, sectersB, 0, nil)
|
||||||
checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
|
|
||||||
pledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
|
pledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
|
||||||
checkMiner(maddrD, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
|
|
||||||
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
||||||
|
|
||||||
@ -259,7 +275,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrE, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
|
|
||||||
// go through rest of the PP
|
// go through rest of the PP
|
||||||
for {
|
for {
|
||||||
@ -274,11 +290,11 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// second round of miner checks
|
// second round of miner checks
|
||||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK)
|
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectersB), true, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectersB), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
|
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
|
|
||||||
// disable post on minerB
|
// disable post on minerB
|
||||||
minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
||||||
@ -329,7 +345,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
||||||
|
|
||||||
checkMiner(maddrD, types.NewInt(0), true, r.TipSet)
|
// assert inactive if the message landed in the tipset we run cron in
|
||||||
|
checkMiner(maddrD, types.NewInt(0), true, false, r.TipSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// go through another PP
|
// go through another PP
|
||||||
@ -345,8 +362,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// third round of miner checks
|
// third round of miner checks
|
||||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK)
|
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
}
|
}
|
||||||
|
@ -39,25 +39,29 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
|||||||
}
|
}
|
||||||
|
|
||||||
bstate := ts.ParentState()
|
bstate := ts.ParentState()
|
||||||
bheight := ts.Height()
|
pts, err := sm.cs.LoadTipSet(ts.Parents())
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
|
||||||
|
}
|
||||||
|
pheight := pts.Height()
|
||||||
|
|
||||||
// If we have to run an expensive migration, and we're not at genesis,
|
// If we have to run an expensive migration, and we're not at genesis,
|
||||||
// return an error because the migration will take too long.
|
// return an error because the migration will take too long.
|
||||||
//
|
//
|
||||||
// We allow this at height 0 for at-genesis migrations (for testing).
|
// We allow this at height 0 for at-genesis migrations (for testing).
|
||||||
if bheight-1 > 0 && sm.hasExpensiveFork(ctx, bheight-1) {
|
if pheight > 0 && sm.hasExpensiveFork(ctx, pheight) {
|
||||||
return nil, ErrExpensiveFork
|
return nil, ErrExpensiveFork
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the (not expensive) migration.
|
// Run the (not expensive) migration.
|
||||||
bstate, err := sm.handleStateForks(ctx, bstate, bheight-1, nil, ts)
|
bstate, err = sm.handleStateForks(ctx, bstate, pheight, nil, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
vmopt := &vm.VMOpts{
|
vmopt := &vm.VMOpts{
|
||||||
StateBase: bstate,
|
StateBase: bstate,
|
||||||
Epoch: bheight,
|
Epoch: pheight + 1,
|
||||||
Rand: store.NewChainRand(sm.cs, ts.Cids()),
|
Rand: store.NewChainRand(sm.cs, ts.Cids()),
|
||||||
Bstore: sm.cs.StateBlockstore(),
|
Bstore: sm.cs.StateBlockstore(),
|
||||||
Syscalls: sm.cs.VMSys(),
|
Syscalls: sm.cs.VMSys(),
|
||||||
@ -155,6 +159,11 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
r := store.NewChainRand(sm.cs, ts.Cids())
|
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||||
|
|
||||||
if span.IsRecordingEvents() {
|
if span.IsRecordingEvents() {
|
||||||
@ -167,7 +176,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
|
|
||||||
vmopt := &vm.VMOpts{
|
vmopt := &vm.VMOpts{
|
||||||
StateBase: state,
|
StateBase: state,
|
||||||
Epoch: ts.Height(),
|
Epoch: ts.Height() + 1,
|
||||||
Rand: r,
|
Rand: r,
|
||||||
Bstore: sm.cs.StateBlockstore(),
|
Bstore: sm.cs.StateBlockstore(),
|
||||||
Syscalls: sm.cs.VMSys(),
|
Syscalls: sm.cs.VMSys(),
|
||||||
|
@ -433,12 +433,18 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) {
|
|||||||
tu.t.Fatal(err)
|
tu.t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: some sort of timeout?
|
timeout := time.After(5 * time.Second)
|
||||||
for n := range hc {
|
|
||||||
for _, c := range n {
|
for {
|
||||||
if c.Val.Equals(target) {
|
select {
|
||||||
return
|
case n := <-hc:
|
||||||
|
for _, c := range n {
|
||||||
|
if c.Val.Equals(target) {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
case <-timeout:
|
||||||
|
tu.t.Fatal("waitUntilSyncTarget timeout")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -575,15 +581,20 @@ func TestSyncFork(t *testing.T) {
|
|||||||
tu.loadChainToNode(p1)
|
tu.loadChainToNode(p1)
|
||||||
tu.loadChainToNode(p2)
|
tu.loadChainToNode(p2)
|
||||||
|
|
||||||
phead := func() {
|
printHead := func() {
|
||||||
h1, err := tu.nds[1].ChainHead(tu.ctx)
|
h1, err := tu.nds[1].ChainHead(tu.ctx)
|
||||||
require.NoError(tu.t, err)
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
h2, err := tu.nds[2].ChainHead(tu.ctx)
|
h2, err := tu.nds[2].ChainHead(tu.ctx)
|
||||||
require.NoError(tu.t, err)
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height())
|
w1, err := tu.nds[1].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h1)
|
||||||
fmt.Println("Node 2: ", h2.Cids(), h1.Parents(), h2.Height())
|
require.NoError(tu.t, err)
|
||||||
|
w2, err := tu.nds[2].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h2)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height(), w1)
|
||||||
|
fmt.Println("Node 2: ", h2.Cids(), h2.Parents(), h2.Height(), w2)
|
||||||
//time.Sleep(time.Second * 2)
|
//time.Sleep(time.Second * 2)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@ -591,7 +602,7 @@ func TestSyncFork(t *testing.T) {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|
||||||
phead()
|
printHead()
|
||||||
|
|
||||||
base := tu.g.CurTipset
|
base := tu.g.CurTipset
|
||||||
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
|
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
|
||||||
@ -611,6 +622,8 @@ func TestSyncFork(t *testing.T) {
|
|||||||
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
|
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
|
||||||
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
|
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
|
||||||
|
|
||||||
|
printHead()
|
||||||
|
|
||||||
// Now for the fun part!!
|
// Now for the fun part!!
|
||||||
|
|
||||||
require.NoError(t, tu.mn.LinkAll())
|
require.NoError(t, tu.mn.LinkAll())
|
||||||
@ -618,7 +631,7 @@ func TestSyncFork(t *testing.T) {
|
|||||||
tu.waitUntilSyncTarget(p1, b.TipSet())
|
tu.waitUntilSyncTarget(p1, b.TipSet())
|
||||||
tu.waitUntilSyncTarget(p2, b.TipSet())
|
tu.waitUntilSyncTarget(p2, b.TipSet())
|
||||||
|
|
||||||
phead()
|
printHead()
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test crafts a tipset with 2 blocks, A and B.
|
// This test crafts a tipset with 2 blocks, A and B.
|
||||||
|
@ -229,7 +229,7 @@ func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparat
|
|||||||
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
|
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
|
||||||
var err error
|
var err error
|
||||||
var res []byte
|
var res []byte
|
||||||
if rt.vm.GetNtwkVersion(rt.ctx, randEpoch) >= network.Version13 {
|
if randEpoch > build.UpgradeHyperdriveHeight {
|
||||||
res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy)
|
res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy)
|
||||||
} else {
|
} else {
|
||||||
res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy)
|
res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy)
|
||||||
|
27
extern/sector-storage/mock/mock.go
vendored
27
extern/sector-storage/mock/mock.go
vendored
@ -34,7 +34,9 @@ type SectorMgr struct {
|
|||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockVerifProver struct{}
|
type mockVerifProver struct {
|
||||||
|
aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies
|
||||||
|
}
|
||||||
|
|
||||||
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
|
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
|
||||||
sectors := make(map[abi.SectorID]*sectorState)
|
sectors := make(map[abi.SectorID]*sectorState)
|
||||||
@ -522,7 +524,19 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return bytes.Equal(aggregate.Proof, out), nil
|
ok := bytes.Equal(aggregate.Proof, out)
|
||||||
|
if !ok {
|
||||||
|
genInfo, found := m.aggregates[string(aggregate.Proof)]
|
||||||
|
if !found {
|
||||||
|
log.Errorf("BAD AGGREGATE: saved generate inputs not found; agg.Proof: %x; expected: %x", aggregate.Proof, out)
|
||||||
|
} else {
|
||||||
|
log.Errorf("BAD AGGREGATE (1): agg.Proof: %x; expected: %x", aggregate.Proof, out)
|
||||||
|
log.Errorf("BAD AGGREGATE (2): Verify Infos: %+v", aggregate.Infos)
|
||||||
|
log.Errorf("BAD AGGREGATE (3): Generate Infos: %+v", genInfo.Infos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
|
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
|
||||||
@ -533,6 +547,8 @@ func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealV
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.aggregates[string(out)] = aggregateInfo
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -592,8 +608,11 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context,
|
|||||||
return []uint64{0}, nil
|
return []uint64{0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var MockVerifier = mockVerifProver{}
|
var MockVerifier = mockVerifProver{
|
||||||
var MockProver = mockVerifProver{}
|
aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{},
|
||||||
|
}
|
||||||
|
|
||||||
|
var MockProver = MockVerifier
|
||||||
|
|
||||||
var _ storage.Sealer = &SectorMgr{}
|
var _ storage.Sealer = &SectorMgr{}
|
||||||
var _ ffiwrapper.Verifier = MockVerifier
|
var _ ffiwrapper.Verifier = MockVerifier
|
||||||
|
Loading…
Reference in New Issue
Block a user