Merge pull request #11215 from filecoin-project/backport/nv21/feat/act-precommv2
Backport: feat: sealing: Switch to calling PreCommitSectorBatch2
This commit is contained in:
commit
a9396624a7
@ -545,12 +545,6 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-batch_deal
|
suite: itest-batch_deal
|
||||||
target: "./itests/batch_deal_test.go"
|
target: "./itests/batch_deal_test.go"
|
||||||
- test:
|
|
||||||
name: test-itest-ccupgrade
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-ccupgrade
|
|
||||||
target: "./itests/ccupgrade_test.go"
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-cli
|
name: test-itest-cli
|
||||||
requires:
|
requires:
|
||||||
@ -876,12 +870,6 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-remove_verifreg_datacap
|
suite: itest-remove_verifreg_datacap
|
||||||
target: "./itests/remove_verifreg_datacap_test.go"
|
target: "./itests/remove_verifreg_datacap_test.go"
|
||||||
- test:
|
|
||||||
name: test-itest-sdr_upgrade
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-sdr_upgrade
|
|
||||||
target: "./itests/sdr_upgrade_test.go"
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-sealing_resources
|
name: test-itest-sealing_resources
|
||||||
requires:
|
requires:
|
||||||
@ -906,12 +894,6 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-sector_import_simple
|
suite: itest-sector_import_simple
|
||||||
target: "./itests/sector_import_simple_test.go"
|
target: "./itests/sector_import_simple_test.go"
|
||||||
- test:
|
|
||||||
name: test-itest-sector_make_cc_avail
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-sector_make_cc_avail
|
|
||||||
target: "./itests/sector_make_cc_avail_test.go"
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-sector_miner_collateral
|
name: test-itest-sector_miner_collateral
|
||||||
requires:
|
requires:
|
||||||
@ -930,18 +912,6 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-sector_pledge
|
suite: itest-sector_pledge
|
||||||
target: "./itests/sector_pledge_test.go"
|
target: "./itests/sector_pledge_test.go"
|
||||||
- test:
|
|
||||||
name: test-itest-sector_prefer_no_upgrade
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-sector_prefer_no_upgrade
|
|
||||||
target: "./itests/sector_prefer_no_upgrade_test.go"
|
|
||||||
- test:
|
|
||||||
name: test-itest-sector_revert_available
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-sector_revert_available
|
|
||||||
target: "./itests/sector_revert_available_test.go"
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-sector_terminate
|
name: test-itest-sector_terminate
|
||||||
requires:
|
requires:
|
||||||
@ -966,12 +936,6 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-splitstore
|
suite: itest-splitstore
|
||||||
target: "./itests/splitstore_test.go"
|
target: "./itests/splitstore_test.go"
|
||||||
- test:
|
|
||||||
name: test-itest-tape
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-tape
|
|
||||||
target: "./itests/tape_test.go"
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-verifreg
|
name: test-itest-verifreg
|
||||||
requires:
|
requires:
|
||||||
|
@ -581,12 +581,6 @@
|
|||||||
# env var: LOTUS_SEALING_DISABLECOLLATERALFALLBACK
|
# env var: LOTUS_SEALING_DISABLECOLLATERALFALLBACK
|
||||||
#DisableCollateralFallback = false
|
#DisableCollateralFallback = false
|
||||||
|
|
||||||
# enable / disable precommit batching (takes effect after nv13)
|
|
||||||
#
|
|
||||||
# type: bool
|
|
||||||
# env var: LOTUS_SEALING_BATCHPRECOMMITS
|
|
||||||
#BatchPreCommits = true
|
|
||||||
|
|
||||||
# maximum precommit batch size - batches will be sent immediately above this size
|
# maximum precommit batch size - batches will be sent immediately above this size
|
||||||
#
|
#
|
||||||
# type: int
|
# type: int
|
||||||
@ -636,7 +630,8 @@
|
|||||||
#CommitBatchSlack = "1h0m0s"
|
#CommitBatchSlack = "1h0m0s"
|
||||||
|
|
||||||
# network BaseFee below which to stop doing precommit batching, instead
|
# network BaseFee below which to stop doing precommit batching, instead
|
||||||
# sending precommit messages to the chain individually
|
# sending precommit messages to the chain individually. When the basefee is
|
||||||
|
# below this threshold, precommit messages will get sent out immediately.
|
||||||
#
|
#
|
||||||
# type: types.FIL
|
# type: types.FIL
|
||||||
# env var: LOTUS_SEALING_BATCHPRECOMMITABOVEBASEFEE
|
# env var: LOTUS_SEALING_BATCHPRECOMMITABOVEBASEFEE
|
||||||
|
@ -61,7 +61,6 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
sc.MaxSealingSectorsForDeals = 3
|
sc.MaxSealingSectorsForDeals = 3
|
||||||
sc.AlwaysKeepUnsealedCopy = true
|
sc.AlwaysKeepUnsealedCopy = true
|
||||||
sc.WaitDealsDelay = time.Hour
|
sc.WaitDealsDelay = time.Hour
|
||||||
sc.BatchPreCommits = false
|
|
||||||
sc.AggregateCommits = false
|
sc.AggregateCommits = false
|
||||||
|
|
||||||
return sc, nil
|
return sc, nil
|
||||||
|
@ -1,131 +0,0 @@
|
|||||||
// stm: #integration
|
|
||||||
package itests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCCUpgrade(t *testing.T) {
|
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_MINER_GET_INFO_001
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
|
||||||
|
|
||||||
//stm: @MINER_SECTOR_LIST_001
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
n := runTestCCUpgrade(t)
|
|
||||||
|
|
||||||
t.Run("post", func(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
ts, err := n.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
start := ts.Height()
|
|
||||||
// wait for a full proving period
|
|
||||||
t.Log("waiting for chain")
|
|
||||||
|
|
||||||
n.WaitTillChain(ctx, func(ts *types.TipSet) bool {
|
|
||||||
if ts.Height() > start+abi.ChainEpoch(2880) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func runTestCCUpgrade(t *testing.T) *kit.TestFullNode {
|
|
||||||
ctx := context.Background()
|
|
||||||
blockTime := 1 * time.Millisecond
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC())
|
|
||||||
ens.InterconnectAll().BeginMiningMustPost(blockTime)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
|
||||||
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
|
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
||||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
|
||||||
|
|
||||||
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, si)
|
|
||||||
require.Less(t, 50000, int(si.Expiration))
|
|
||||||
require.True(t, si.ReplacedSectorAge == 0)
|
|
||||||
|
|
||||||
client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
|
|
||||||
|
|
||||||
//stm: @SECTOR_CC_UPGRADE_001
|
|
||||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sl, err = miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
|
||||||
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
|
|
||||||
Rseed: 6,
|
|
||||||
SuspendUntilCryptoeconStable: true,
|
|
||||||
})
|
|
||||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
|
||||||
kit.AssertFilesEqual(t, inPath, outPath)
|
|
||||||
|
|
||||||
status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(status.Deals))
|
|
||||||
|
|
||||||
miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
|
|
||||||
CCUpgrade: {},
|
|
||||||
})
|
|
||||||
|
|
||||||
siUpdate, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, siUpdate)
|
|
||||||
require.True(t, siUpdate.SectorKeyCID != nil)
|
|
||||||
require.True(t, siUpdate.Activation > si.Activation)
|
|
||||||
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCCUpgradeAndPoSt(t *testing.T) {
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
t.Run("upgrade and then post", func(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
n := runTestCCUpgrade(t)
|
|
||||||
ts, err := n.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
start := ts.Height()
|
|
||||||
// wait for a full proving period
|
|
||||||
t.Log("waiting for chain")
|
|
||||||
|
|
||||||
n.WaitTillChain(ctx, func(ts *types.TipSet) bool {
|
|
||||||
if ts.Height() > start+abi.ChainEpoch(2880) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
@ -18,7 +18,6 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/builtin"
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -34,28 +33,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// TestDeadlineToggling:
|
// TestDeadlineToggling:
|
||||||
// * spins up a v3 network (miner A)
|
|
||||||
// * creates an inactive miner (miner B)
|
|
||||||
// * creates another miner, pledges a sector, waits for power (miner C)
|
|
||||||
//
|
|
||||||
// * goes through v4 upgrade
|
|
||||||
// * goes through PP
|
|
||||||
// * creates minerD, minerE
|
|
||||||
// * makes sure that miner B/D are inactive, A/C still are
|
|
||||||
// * pledges sectors on miner B/D
|
|
||||||
// * precommits a sector on minerE
|
|
||||||
// * disables post on miner C
|
|
||||||
// * goes through PP 0.5PP
|
|
||||||
// * asserts that minerE is active
|
|
||||||
// * goes through rest of PP (1.5)
|
|
||||||
// * asserts that miner C loses power
|
|
||||||
// * asserts that miner B/D is active and has power
|
|
||||||
// * asserts that minerE is inactive
|
|
||||||
// * disables post on miner B
|
|
||||||
// * terminates sectors on miner D
|
|
||||||
// * goes through another PP
|
|
||||||
// * asserts that miner B loses power
|
|
||||||
// * asserts that miner D loses power, is inactive
|
|
||||||
func TestDeadlineToggling(t *testing.T) {
|
func TestDeadlineToggling(t *testing.T) {
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
@ -71,7 +48,6 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
const sectorsC, sectorsD, sectorsB = 10, 9, 8
|
const sectorsC, sectorsD, sectorsB = 10, 9, 8
|
||||||
|
|
||||||
var (
|
var (
|
||||||
upgradeH abi.ChainEpoch = 4000
|
|
||||||
provingPeriod abi.ChainEpoch = 2880
|
provingPeriod abi.ChainEpoch = 2880
|
||||||
blocktime = 2 * time.Millisecond
|
blocktime = 2 * time.Millisecond
|
||||||
)
|
)
|
||||||
@ -81,14 +57,14 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
client kit.TestFullNode
|
client kit.TestFullNode
|
||||||
minerA kit.TestMiner
|
minerA kit.TestMiner // A has some genesis sector, just keeps power
|
||||||
minerB kit.TestMiner
|
minerB kit.TestMiner // B pledges some sector, later fails some posts but stays alive
|
||||||
minerC kit.TestMiner
|
minerC kit.TestMiner // C pledges sectors, gains power, and later stops its PoSTs, but stays alive
|
||||||
minerD kit.TestMiner
|
minerD kit.TestMiner // D pledges sectors and later terminates them, losing all power, eventually deactivates cron
|
||||||
minerE kit.TestMiner
|
minerE kit.TestMiner // E pre-commits a sector but doesn't advance beyond that, cron should become inactive
|
||||||
)
|
)
|
||||||
opts := []kit.NodeOpt{kit.WithAllSubsystems()}
|
opts := []kit.NodeOpt{kit.WithAllSubsystems()}
|
||||||
ens := kit.NewEnsemble(t, kit.MockProofs(), kit.TurboUpgradeAt(upgradeH)).
|
ens := kit.NewEnsemble(t, kit.MockProofs()).
|
||||||
FullNode(&client, opts...).
|
FullNode(&client, opts...).
|
||||||
Miner(&minerA, &client, opts...).
|
Miner(&minerA, &client, opts...).
|
||||||
Start().
|
Start().
|
||||||
@ -116,6 +92,8 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
ssz, err := minerC.ActorSectorSize(ctx, maddrC)
|
ssz, err := minerC.ActorSectorSize(ctx, maddrC)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
targetHeight := abi.ChainEpoch(0)
|
||||||
|
|
||||||
// pledge sectors on C, go through a PP, check for power
|
// pledge sectors on C, go through a PP, check for power
|
||||||
{
|
{
|
||||||
minerC.PledgeSectors(ctx, sectorsC, 0, nil)
|
minerC.PledgeSectors(ctx, sectorsC, 0, nil)
|
||||||
@ -127,11 +105,13 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
t.Log("Running one proving period (miner C)")
|
t.Log("Running one proving period (miner C)")
|
||||||
t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2)
|
t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||||
|
|
||||||
|
targetHeight = di.PeriodStart + provingPeriod*2
|
||||||
|
|
||||||
for {
|
for {
|
||||||
head, err := client.ChainHead(ctx)
|
head, err := client.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+provingPeriod*2 {
|
if head.Height() > targetHeight {
|
||||||
t.Logf("Now head.Height = %d", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -148,18 +128,6 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
|
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
|
||||||
}
|
}
|
||||||
|
|
||||||
// go through upgrade + PP
|
|
||||||
for {
|
|
||||||
head, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > upgradeH+provingPeriod {
|
|
||||||
t.Logf("Now head.Height = %d", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
checkMiner := func(ma address.Address, power abi.StoragePower, active bool, tsk types.TipSetKey) {
|
checkMiner := func(ma address.Address, power abi.StoragePower, active bool, tsk types.TipSetKey) {
|
||||||
//stm: @CHAIN_STATE_MINER_POWER_001
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err := client.StateMinerPower(ctx, ma, tsk)
|
p, err := client.StateMinerPower(ctx, ma, tsk)
|
||||||
@ -181,18 +149,6 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.Equal(t, active, act)
|
require.Equal(t, active, act)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check that just after the upgrade minerB was still active
|
|
||||||
{
|
|
||||||
uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkMiner(maddrB, types.NewInt(0), true, uts.Key())
|
|
||||||
}
|
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_NETWORK_VERSION_001
|
|
||||||
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.GreaterOrEqual(t, nv, network.Version12)
|
|
||||||
|
|
||||||
ens.Miner(&minerD, &client, opts...).
|
ens.Miner(&minerD, &client, opts...).
|
||||||
Miner(&minerE, &client, opts...).
|
Miner(&minerE, &client, opts...).
|
||||||
Start()
|
Start()
|
||||||
@ -254,12 +210,14 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
targetHeight = targetHeight + (provingPeriod / 2)
|
||||||
|
|
||||||
// go through 0.5 PP
|
// go through 0.5 PP
|
||||||
for {
|
for {
|
||||||
head, err := client.ChainHead(ctx)
|
head, err := client.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) {
|
if head.Height() > targetHeight {
|
||||||
t.Logf("Now head.Height = %d", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -268,12 +226,14 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
|
|
||||||
checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
|
||||||
|
|
||||||
|
targetHeight = targetHeight + (provingPeriod/2)*5
|
||||||
|
|
||||||
// go through rest of the PP
|
// go through rest of the PP
|
||||||
for {
|
for {
|
||||||
head, err := client.ChainHead(ctx)
|
head, err := client.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > upgradeH+(provingPeriod*3) {
|
if head.Height() > targetHeight {
|
||||||
t.Logf("Now head.Height = %d", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -285,7 +245,12 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
|
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
|
||||||
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, types.EmptyTSK)
|
||||||
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, types.EmptyTSK)
|
||||||
checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
|
|
||||||
|
// Note: in the older version of this test `active` would be set to false
|
||||||
|
// this is now true because the time to commit a precommit a sector has
|
||||||
|
// increased to 30 days. We could keep the original assert and increase the
|
||||||
|
// wait above to 30 days, but that makes the test take 14 minutes to run..
|
||||||
|
checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
|
||||||
|
|
||||||
// disable post on minerB
|
// disable post on minerB
|
||||||
minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
||||||
@ -344,12 +309,14 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
targetHeight = targetHeight + provingPeriod*2
|
||||||
|
|
||||||
// go through another PP
|
// go through another PP
|
||||||
for {
|
for {
|
||||||
head, err := client.ChainHead(ctx)
|
head, err := client.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > upgradeH+(provingPeriod*5) {
|
if head.Height() > targetHeight {
|
||||||
t.Logf("Now head.Height = %d", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -23,20 +23,6 @@ func GenesisNetworkVersion(nv network.Version) EnsembleOpt {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func SDRUpgradeAt(calico, persian abi.ChainEpoch) EnsembleOpt {
|
|
||||||
return UpgradeSchedule(stmgr.Upgrade{
|
|
||||||
Network: network.Version6,
|
|
||||||
Height: -1,
|
|
||||||
}, stmgr.Upgrade{
|
|
||||||
Network: network.Version7,
|
|
||||||
Height: calico,
|
|
||||||
Migration: filcns.UpgradeCalico,
|
|
||||||
}, stmgr.Upgrade{
|
|
||||||
Network: network.Version8,
|
|
||||||
Height: persian,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
|
func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
|
||||||
/* inline-gen template
|
/* inline-gen template
|
||||||
return UpgradeSchedule(stmgr.Upgrade{
|
return UpgradeSchedule(stmgr.Upgrade{
|
||||||
@ -58,14 +44,3 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
|
|||||||
})
|
})
|
||||||
/* inline-gen end */
|
/* inline-gen end */
|
||||||
}
|
}
|
||||||
|
|
||||||
func TurboUpgradeAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
|
|
||||||
return UpgradeSchedule(stmgr.Upgrade{
|
|
||||||
Network: network.Version11,
|
|
||||||
Height: -1,
|
|
||||||
}, stmgr.Upgrade{
|
|
||||||
Network: network.Version12,
|
|
||||||
Height: upgradeHeight,
|
|
||||||
Migration: filcns.UpgradeActorsV4,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -1,117 +0,0 @@
|
|||||||
// stm: #integration
|
|
||||||
package itests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sort"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
bminer "github.com/filecoin-project/lotus/miner"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSDRUpgrade(t *testing.T) {
|
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
|
||||||
//stm: @CHAIN_STATE_NETWORK_VERSION_001
|
|
||||||
|
|
||||||
//stm: @MINER_SECTOR_LIST_001
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
// oldDelay := policy.GetPreCommitChallengeDelay()
|
|
||||||
// policy.SetPreCommitChallengeDelay(5)
|
|
||||||
// t.Cleanup(func() {
|
|
||||||
// policy.SetPreCommitChallengeDelay(oldDelay)
|
|
||||||
// })
|
|
||||||
|
|
||||||
blocktime := 50 * time.Millisecond
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t,
|
|
||||||
kit.MockProofs(),
|
|
||||||
kit.SDRUpgradeAt(500, 1000),
|
|
||||||
)
|
|
||||||
ens.InterconnectAll()
|
|
||||||
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
pledge := make(chan struct{})
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
round := 0
|
|
||||||
for atomic.LoadInt64(&mine) != 0 {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
|
||||||
|
|
||||||
}}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3 sealing rounds: before, during after.
|
|
||||||
if round >= 3 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
head, err := client.ChainHead(ctx)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// rounds happen every 100 blocks, with a 50 block offset.
|
|
||||||
if head.Height() >= abi.ChainEpoch(round*500+50) {
|
|
||||||
round++
|
|
||||||
pledge <- struct{}{}
|
|
||||||
|
|
||||||
ver, err := client.StateNetworkVersion(ctx, head.Key())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
switch round {
|
|
||||||
case 1:
|
|
||||||
assert.Equal(t, network.Version6, ver)
|
|
||||||
case 2:
|
|
||||||
assert.Equal(t, network.Version7, ver)
|
|
||||||
case 3:
|
|
||||||
assert.Equal(t, network.Version8, ver)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// before.
|
|
||||||
miner.PledgeSectors(ctx, 9, 0, pledge)
|
|
||||||
|
|
||||||
s, err := miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
sort.Slice(s, func(i, j int) bool {
|
|
||||||
return s[i] < s[j]
|
|
||||||
})
|
|
||||||
|
|
||||||
for i, id := range s {
|
|
||||||
info, err := miner.SectorsStatus(ctx, id, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
|
|
||||||
if i >= 3 {
|
|
||||||
// after
|
|
||||||
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
|
||||||
}
|
|
||||||
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.StoreInt64(&mine, 0)
|
|
||||||
<-done
|
|
||||||
}
|
|
@ -1,78 +0,0 @@
|
|||||||
package itests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
|
||||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMakeAvailable(t *testing.T) {
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
blockTime := 1 * time.Millisecond
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) {
|
|
||||||
sc.MakeCCSectorsAvailable = true
|
|
||||||
}))
|
|
||||||
ens.InterconnectAll().BeginMiningMustPost(blockTime)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
||||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
|
||||||
{
|
|
||||||
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, si)
|
|
||||||
require.Less(t, 50000, int(si.Expiration))
|
|
||||||
}
|
|
||||||
client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
|
|
||||||
|
|
||||||
sl, err = miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
|
|
||||||
status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, api.SectorState(sealing.Available), status.State)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
|
||||||
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
|
|
||||||
Rseed: 6,
|
|
||||||
SuspendUntilCryptoeconStable: true,
|
|
||||||
})
|
|
||||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
|
||||||
kit.AssertFilesEqual(t, inPath, outPath)
|
|
||||||
|
|
||||||
sl, err = miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
|
|
||||||
status, err = miner.SectorsStatus(ctx, CCUpgrade, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(status.Deals))
|
|
||||||
miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
|
|
||||||
CCUpgrade: {},
|
|
||||||
})
|
|
||||||
}
|
|
@ -51,7 +51,6 @@ func TestMinerBalanceCollateral(t *testing.T) {
|
|||||||
sc.AlwaysKeepUnsealedCopy = true
|
sc.AlwaysKeepUnsealedCopy = true
|
||||||
sc.WaitDealsDelay = time.Hour
|
sc.WaitDealsDelay = time.Hour
|
||||||
|
|
||||||
sc.BatchPreCommits = batching
|
|
||||||
sc.AggregateCommits = batching
|
sc.AggregateCommits = batching
|
||||||
|
|
||||||
sc.PreCommitBatchWait = time.Hour
|
sc.PreCommitBatchWait = time.Hour
|
||||||
|
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -196,49 +195,3 @@ func TestPledgeMaxBatching(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("Force max prove commit aggregate size", runTest)
|
t.Run("Force max prove commit aggregate size", runTest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPledgeBeforeNv13(t *testing.T) {
|
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
|
||||||
blocktime := 50 * time.Millisecond
|
|
||||||
|
|
||||||
runTest := func(t *testing.T, nSectors int) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
|
|
||||||
kit.GenesisNetworkVersion(network.Version12))
|
|
||||||
ens.InterconnectAll().BeginMining(blocktime)
|
|
||||||
|
|
||||||
client.WaitTillChain(ctx, kit.HeightAtLeast(10))
|
|
||||||
|
|
||||||
toCheck := miner.StartPledge(ctx, nSectors, 0, nil)
|
|
||||||
|
|
||||||
for len(toCheck) > 0 {
|
|
||||||
states := map[api.SectorState]int{}
|
|
||||||
|
|
||||||
for n := range toCheck {
|
|
||||||
st, err := miner.SectorsStatus(ctx, n, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
states[st.State]++
|
|
||||||
if st.State == api.SectorState(sealing.Proving) {
|
|
||||||
delete(toCheck, n)
|
|
||||||
}
|
|
||||||
if strings.Contains(string(st.State), "Fail") {
|
|
||||||
t.Fatal("sector in a failed state", st.State)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("100-before-nv13", func(t *testing.T) {
|
|
||||||
runTest(t, 100)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -1,90 +0,0 @@
|
|||||||
package itests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
|
||||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPreferNoUpgrade(t *testing.T) {
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
blockTime := 1 * time.Millisecond
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) {
|
|
||||||
sc.PreferNewSectorsForDeals = true
|
|
||||||
}))
|
|
||||||
ens.InterconnectAll().BeginMiningMustPost(blockTime)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
|
||||||
Sealed := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
|
|
||||||
|
|
||||||
{
|
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
||||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
|
||||||
{
|
|
||||||
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, si)
|
|
||||||
require.Less(t, 50000, int(si.Expiration))
|
|
||||||
}
|
|
||||||
client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
|
|
||||||
|
|
||||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sl, err = miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
|
||||||
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
|
|
||||||
Rseed: 6,
|
|
||||||
SuspendUntilCryptoeconStable: true,
|
|
||||||
})
|
|
||||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
|
||||||
kit.AssertFilesEqual(t, inPath, outPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 2, "expected 2 sectors")
|
|
||||||
|
|
||||||
{
|
|
||||||
status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, api.SectorState(sealing.Available), status.State)
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
status, err := miner.SectorsStatus(ctx, Sealed, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(status.Deals))
|
|
||||||
miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
|
|
||||||
Sealed: {},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,85 +0,0 @@
|
|||||||
package itests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAbortUpgradeAvailable(t *testing.T) {
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
blockTime := 1 * time.Millisecond
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC())
|
|
||||||
ens.InterconnectAll().BeginMiningMustPost(blockTime)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
|
||||||
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
|
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
||||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
|
||||||
{
|
|
||||||
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, si)
|
|
||||||
require.Less(t, 50000, int(si.Expiration))
|
|
||||||
}
|
|
||||||
client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
|
|
||||||
|
|
||||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sl, err = miner.SectorsListNonGenesis(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
|
||||||
|
|
||||||
ss, err := miner.SectorsStatus(ctx, sl[0], false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
ss, err = miner.SectorsStatus(ctx, sl[0], false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if ss.State == api.SectorState(sealing.Proving) {
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, api.SectorState(sealing.Available), ss.State)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, miner.SectorAbortUpgrade(ctx, sl[0]))
|
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
ss, err = miner.SectorsStatus(ctx, sl[0], false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if ss.State == api.SectorState(sealing.Available) {
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, api.SectorState(sealing.Proving), ss.State)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,71 +0,0 @@
|
|||||||
// stm: #integration
|
|
||||||
package itests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTapeFix(t *testing.T) {
|
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
var blocktime = 2 * time.Millisecond
|
|
||||||
|
|
||||||
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
|
|
||||||
// TODO: Make the mock sector size configurable and reenable this
|
|
||||||
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
|
||||||
t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) })
|
|
||||||
}
|
|
||||||
|
|
||||||
func testTapeFix(t *testing.T, blocktime time.Duration, after bool) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
networkVersion := network.Version4
|
|
||||||
if after {
|
|
||||||
networkVersion = network.Version5
|
|
||||||
}
|
|
||||||
|
|
||||||
_, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(networkVersion))
|
|
||||||
ens.InterconnectAll().BeginMining(blocktime)
|
|
||||||
|
|
||||||
sid, err := miner.PledgeSector(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Log("All sectors is fsm")
|
|
||||||
|
|
||||||
// If before, we expect the precommit to fail
|
|
||||||
successState := api.SectorState(sealing.CommitFailed)
|
|
||||||
failureState := api.SectorState(sealing.Proving)
|
|
||||||
if after {
|
|
||||||
// otherwise, it should succeed.
|
|
||||||
successState, failureState = failureState, successState
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
st, err := miner.SectorsStatus(ctx, sid.Number, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if st.State == successState {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
require.NotEqual(t, failureState, st.State)
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
t.Log("WaitSeal")
|
|
||||||
}
|
|
||||||
}
|
|
@ -224,70 +224,6 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
|
||||||
kit.Expensive(t)
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
var (
|
|
||||||
blocktime = 2 * time.Millisecond
|
|
||||||
nSectors = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
och := build.UpgradeClausHeight
|
|
||||||
build.UpgradeClausHeight = 0
|
|
||||||
t.Cleanup(func() { build.UpgradeClausHeight = och })
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version9))
|
|
||||||
ens.InterconnectAll().BeginMining(blocktime)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_MINER_INFO_001
|
|
||||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, nSectors, 0, nil)
|
|
||||||
//stm: @CHAIN_STATE_GET_ACTOR_001
|
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
en := wact.Nonce
|
|
||||||
|
|
||||||
// wait for a new message to be sent from worker address, it will be a PoSt
|
|
||||||
|
|
||||||
waitForProof:
|
|
||||||
for {
|
|
||||||
//stm: @CHAIN_STATE_GET_ACTOR_001
|
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if wact.Nonce > en {
|
|
||||||
break waitForProof
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_LIST_MESSAGES_001
|
|
||||||
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_REPLAY_001
|
|
||||||
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWindowPostBaseFeeBurn(t *testing.T) {
|
func TestWindowPostBaseFeeBurn(t *testing.T) {
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
@ -345,79 +281,6 @@ waitForProof:
|
|||||||
require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero())
|
require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that V1_1 proofs are generated and accepted in nv19, and V1 proofs are accepted
|
|
||||||
func TestWindowPostV1P1NV19(t *testing.T) {
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
blocktime := 2 * time.Millisecond
|
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version19))
|
|
||||||
ens.InterconnectAll().BeginMining(blocktime)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
en := wact.Nonce
|
|
||||||
|
|
||||||
// wait for a new message to be sent from worker address, it will be a PoSt
|
|
||||||
|
|
||||||
waitForProof:
|
|
||||||
for {
|
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if wact.Nonce > en {
|
|
||||||
break waitForProof
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
pmr, err := client.StateSearchMsg(ctx, types.EmptyTSK, slm[0], -1, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
inclTs, err := client.ChainGetTipSet(ctx, pmr.TipSet)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
inclTsParents, err := client.ChainGetTipSet(ctx, inclTs.Parents())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
nv, err := client.StateNetworkVersion(ctx, pmr.TipSet)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, network.Version19, nv)
|
|
||||||
|
|
||||||
require.True(t, pmr.Receipt.ExitCode.IsSuccess())
|
|
||||||
|
|
||||||
slmsg, err := client.ChainGetMessage(ctx, slm[0])
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var params miner11.SubmitWindowedPoStParams
|
|
||||||
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
|
|
||||||
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
|
|
||||||
|
|
||||||
// "Turn" this into a V1 proof -- the proof will be invalid, but won't be validated, and so the call should succeed
|
|
||||||
params.Proofs[0].PoStProof = abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
|
||||||
v1PostParams := new(bytes.Buffer)
|
|
||||||
require.NoError(t, params.MarshalCBOR(v1PostParams))
|
|
||||||
|
|
||||||
slmsg.Params = v1PostParams.Bytes()
|
|
||||||
|
|
||||||
// Simulate call on inclTsParents's parents, so that the partition isn't already proven
|
|
||||||
call, err := client.StateCall(ctx, slmsg, inclTsParents.Parents())
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, call.MsgRct.ExitCode.IsSuccess())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that V1_1 proofs are generated and accepted in nv20, and that V1 proofs are NOT
|
// Tests that V1_1 proofs are generated and accepted in nv20, and that V1 proofs are NOT
|
||||||
func TestWindowPostV1P1NV20(t *testing.T) {
|
func TestWindowPostV1P1NV20(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/builtin"
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
miner2 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
|
||||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||||
"github.com/filecoin-project/go-state-types/builtin/v9/market"
|
"github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||||
|
|
||||||
@ -107,7 +108,10 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
|
|||||||
|
|
||||||
// Watch for a pre-commit message to the provider.
|
// Watch for a pre-commit message to the provider.
|
||||||
matchEvent := func(msg *types.Message) (bool, error) {
|
matchEvent := func(msg *types.Message) (bool, error) {
|
||||||
matched := msg.To == provider && (msg.Method == builtin.MethodsMiner.PreCommitSector || msg.Method == builtin.MethodsMiner.PreCommitSectorBatch || msg.Method == builtin.MethodsMiner.ProveReplicaUpdates)
|
matched := msg.To == provider && (msg.Method == builtin.MethodsMiner.PreCommitSector ||
|
||||||
|
msg.Method == builtin.MethodsMiner.PreCommitSectorBatch ||
|
||||||
|
msg.Method == builtin.MethodsMiner.PreCommitSectorBatch2 ||
|
||||||
|
msg.Method == builtin.MethodsMiner.ProveReplicaUpdates)
|
||||||
return matched, nil
|
return matched, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,6 +337,21 @@ func dealSectorInPreCommitMsg(msg *types.Message, res pipeline.CurrentDealInfo)
|
|||||||
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
|
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, precommit := range params.Sectors {
|
||||||
|
// Check through the deal IDs associated with this message
|
||||||
|
for _, did := range precommit.DealIDs {
|
||||||
|
if did == res.DealID {
|
||||||
|
// Found the deal ID in this message. Callback with the sector ID.
|
||||||
|
return &precommit.SectorNumber, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case builtin.MethodsMiner.PreCommitSectorBatch2:
|
||||||
|
var params miner2.PreCommitSectorBatchParams2
|
||||||
|
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
|
||||||
|
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
for _, precommit := range params.Sectors {
|
for _, precommit := range params.Sectors {
|
||||||
// Check through the deal IDs associated with this message
|
// Check through the deal IDs associated with this message
|
||||||
for _, did := range precommit.DealIDs {
|
for _, did := range precommit.DealIDs {
|
||||||
|
@ -138,7 +138,6 @@ func DefaultStorageMiner() *StorageMiner {
|
|||||||
AvailableBalanceBuffer: types.FIL(big.Zero()),
|
AvailableBalanceBuffer: types.FIL(big.Zero()),
|
||||||
DisableCollateralFallback: false,
|
DisableCollateralFallback: false,
|
||||||
|
|
||||||
BatchPreCommits: true,
|
|
||||||
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
|
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
|
||||||
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
|
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
|
||||||
// XXX snap deals wait deals slack if first
|
// XXX snap deals wait deals slack if first
|
||||||
|
@ -1204,12 +1204,6 @@ This is useful for forcing all deals to be assigned as snap deals to sectors mar
|
|||||||
|
|
||||||
Comment: `Don't send collateral with messages even if there is no available balance in the miner actor`,
|
Comment: `Don't send collateral with messages even if there is no available balance in the miner actor`,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "BatchPreCommits",
|
|
||||||
Type: "bool",
|
|
||||||
|
|
||||||
Comment: `enable / disable precommit batching (takes effect after nv13)`,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "MaxPreCommitBatch",
|
Name: "MaxPreCommitBatch",
|
||||||
Type: "int",
|
Type: "int",
|
||||||
@ -1263,7 +1257,8 @@ This is useful for forcing all deals to be assigned as snap deals to sectors mar
|
|||||||
Type: "types.FIL",
|
Type: "types.FIL",
|
||||||
|
|
||||||
Comment: `network BaseFee below which to stop doing precommit batching, instead
|
Comment: `network BaseFee below which to stop doing precommit batching, instead
|
||||||
sending precommit messages to the chain individually`,
|
sending precommit messages to the chain individually. When the basefee is
|
||||||
|
below this threshold, precommit messages will get sent out immediately.`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "AggregateAboveBaseFee",
|
Name: "AggregateAboveBaseFee",
|
||||||
|
@ -387,8 +387,6 @@ type SealingConfig struct {
|
|||||||
// Don't send collateral with messages even if there is no available balance in the miner actor
|
// Don't send collateral with messages even if there is no available balance in the miner actor
|
||||||
DisableCollateralFallback bool
|
DisableCollateralFallback bool
|
||||||
|
|
||||||
// enable / disable precommit batching (takes effect after nv13)
|
|
||||||
BatchPreCommits bool
|
|
||||||
// maximum precommit batch size - batches will be sent immediately above this size
|
// maximum precommit batch size - batches will be sent immediately above this size
|
||||||
MaxPreCommitBatch int
|
MaxPreCommitBatch int
|
||||||
// how long to wait before submitting a batch after crossing the minimum batch size
|
// how long to wait before submitting a batch after crossing the minimum batch size
|
||||||
@ -408,7 +406,8 @@ type SealingConfig struct {
|
|||||||
CommitBatchSlack Duration
|
CommitBatchSlack Duration
|
||||||
|
|
||||||
// network BaseFee below which to stop doing precommit batching, instead
|
// network BaseFee below which to stop doing precommit batching, instead
|
||||||
// sending precommit messages to the chain individually
|
// sending precommit messages to the chain individually. When the basefee is
|
||||||
|
// below this threshold, precommit messages will get sent out immediately.
|
||||||
BatchPreCommitAboveBaseFee types.FIL
|
BatchPreCommitAboveBaseFee types.FIL
|
||||||
|
|
||||||
// network BaseFee below which to stop doing commit aggregation, instead
|
// network BaseFee below which to stop doing commit aggregation, instead
|
||||||
|
@ -1000,7 +1000,6 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
|
|||||||
AvailableBalanceBuffer: types.FIL(cfg.AvailableBalanceBuffer),
|
AvailableBalanceBuffer: types.FIL(cfg.AvailableBalanceBuffer),
|
||||||
DisableCollateralFallback: cfg.DisableCollateralFallback,
|
DisableCollateralFallback: cfg.DisableCollateralFallback,
|
||||||
|
|
||||||
BatchPreCommits: cfg.BatchPreCommits,
|
|
||||||
MaxPreCommitBatch: cfg.MaxPreCommitBatch,
|
MaxPreCommitBatch: cfg.MaxPreCommitBatch,
|
||||||
PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait),
|
PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait),
|
||||||
PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack),
|
PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack),
|
||||||
@ -1045,7 +1044,6 @@ func ToSealingConfig(dealmakingCfg config.DealmakingConfig, sealingCfg config.Se
|
|||||||
AvailableBalanceBuffer: types.BigInt(sealingCfg.AvailableBalanceBuffer),
|
AvailableBalanceBuffer: types.BigInt(sealingCfg.AvailableBalanceBuffer),
|
||||||
DisableCollateralFallback: sealingCfg.DisableCollateralFallback,
|
DisableCollateralFallback: sealingCfg.DisableCollateralFallback,
|
||||||
|
|
||||||
BatchPreCommits: sealingCfg.BatchPreCommits,
|
|
||||||
MaxPreCommitBatch: sealingCfg.MaxPreCommitBatch,
|
MaxPreCommitBatch: sealingCfg.MaxPreCommitBatch,
|
||||||
PreCommitBatchWait: time.Duration(sealingCfg.PreCommitBatchWait),
|
PreCommitBatchWait: time.Duration(sealingCfg.PreCommitBatchWait),
|
||||||
PreCommitBatchSlack: time.Duration(sealingCfg.PreCommitBatchSlack),
|
PreCommitBatchSlack: time.Duration(sealingCfg.PreCommitBatchSlack),
|
||||||
|
@ -53,7 +53,6 @@ func TestCommitBatcher(t *testing.T) {
|
|||||||
WaitDealsDelay: time.Hour * 6,
|
WaitDealsDelay: time.Hour * 6,
|
||||||
AlwaysKeepUnsealedCopy: true,
|
AlwaysKeepUnsealedCopy: true,
|
||||||
|
|
||||||
BatchPreCommits: true,
|
|
||||||
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize,
|
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize,
|
||||||
PreCommitBatchWait: 24 * time.Hour,
|
PreCommitBatchWait: 24 * time.Hour,
|
||||||
PreCommitBatchSlack: 3 * time.Hour,
|
PreCommitBatchSlack: 3 * time.Hour,
|
||||||
|
@ -90,7 +90,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
|||||||
on(SectorOldTicket{}, GetTicket),
|
on(SectorOldTicket{}, GetTicket),
|
||||||
),
|
),
|
||||||
PreCommit2: planOne(
|
PreCommit2: planOne(
|
||||||
on(SectorPreCommit2{}, PreCommitting),
|
on(SectorPreCommit2{}, SubmitPreCommitBatch),
|
||||||
on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed),
|
on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed),
|
||||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||||
),
|
),
|
||||||
|
@ -54,10 +54,10 @@ func TestHappyPath(t *testing.T) {
|
|||||||
require.Equal(m.t, m.state.State, PreCommit2)
|
require.Equal(m.t, m.state.State, PreCommit2)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommit2{})
|
m.planSingle(SectorPreCommit2{})
|
||||||
require.Equal(m.t, m.state.State, PreCommitting)
|
require.Equal(m.t, m.state.State, SubmitPreCommitBatch)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommitted{})
|
m.planSingle(SectorPreCommitBatchSent{})
|
||||||
require.Equal(m.t, m.state.State, PreCommitWait)
|
require.Equal(m.t, m.state.State, PreCommitBatchWait)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommitLanded{})
|
m.planSingle(SectorPreCommitLanded{})
|
||||||
require.Equal(m.t, m.state.State, WaitSeed)
|
require.Equal(m.t, m.state.State, WaitSeed)
|
||||||
@ -77,7 +77,7 @@ func TestHappyPath(t *testing.T) {
|
|||||||
m.planSingle(SectorFinalized{})
|
m.planSingle(SectorFinalized{})
|
||||||
require.Equal(m.t, m.state.State, Proving)
|
require.Equal(m.t, m.state.State, Proving)
|
||||||
|
|
||||||
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
|
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
|
||||||
for i, n := range notif {
|
for i, n := range notif {
|
||||||
if n.before.State != expected[i] {
|
if n.before.State != expected[i] {
|
||||||
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
||||||
@ -116,10 +116,10 @@ func TestHappyPathFinalizeEarly(t *testing.T) {
|
|||||||
require.Equal(m.t, m.state.State, PreCommit2)
|
require.Equal(m.t, m.state.State, PreCommit2)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommit2{})
|
m.planSingle(SectorPreCommit2{})
|
||||||
require.Equal(m.t, m.state.State, PreCommitting)
|
require.Equal(m.t, m.state.State, SubmitPreCommitBatch)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommitted{})
|
m.planSingle(SectorPreCommitBatchSent{})
|
||||||
require.Equal(m.t, m.state.State, PreCommitWait)
|
require.Equal(m.t, m.state.State, PreCommitBatchWait)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommitLanded{})
|
m.planSingle(SectorPreCommitLanded{})
|
||||||
require.Equal(m.t, m.state.State, WaitSeed)
|
require.Equal(m.t, m.state.State, WaitSeed)
|
||||||
@ -145,7 +145,7 @@ func TestHappyPathFinalizeEarly(t *testing.T) {
|
|||||||
m.planSingle(SectorFinalized{})
|
m.planSingle(SectorFinalized{})
|
||||||
require.Equal(m.t, m.state.State, Proving)
|
require.Equal(m.t, m.state.State, Proving)
|
||||||
|
|
||||||
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector, Proving}
|
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector, Proving}
|
||||||
for i, n := range notif {
|
for i, n := range notif {
|
||||||
if n.before.State != expected[i] {
|
if n.before.State != expected[i] {
|
||||||
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
||||||
@ -220,10 +220,10 @@ func TestSeedRevert(t *testing.T) {
|
|||||||
require.Equal(m.t, m.state.State, PreCommit2)
|
require.Equal(m.t, m.state.State, PreCommit2)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommit2{})
|
m.planSingle(SectorPreCommit2{})
|
||||||
require.Equal(m.t, m.state.State, PreCommitting)
|
require.Equal(m.t, m.state.State, SubmitPreCommitBatch)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommitted{})
|
m.planSingle(SectorPreCommitBatchSent{})
|
||||||
require.Equal(m.t, m.state.State, PreCommitWait)
|
require.Equal(m.t, m.state.State, PreCommitBatchWait)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommitLanded{})
|
m.planSingle(SectorPreCommitLanded{})
|
||||||
require.Equal(m.t, m.state.State, WaitSeed)
|
require.Equal(m.t, m.state.State, WaitSeed)
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
@ -193,33 +192,30 @@ func (b *PreCommitBatcher) maybeStartBatch(notif bool) ([]sealiface.PreCommitBat
|
|||||||
return nil, xerrors.Errorf("getting config: %w", err)
|
return nil, xerrors.Errorf("getting config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if notif && total < cfg.MaxPreCommitBatch {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ts, err := b.api.ChainHead(b.mctx)
|
ts, err := b.api.ChainHead(b.mctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Drop this once nv14 has come and gone
|
curBasefeeLow := false
|
||||||
|
if !cfg.BatchPreCommitAboveBaseFee.Equals(big.Zero()) && ts.MinTicketBlock().ParentBaseFee.LessThan(cfg.BatchPreCommitAboveBaseFee) {
|
||||||
|
curBasefeeLow = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// if this wasn't an user-forced batch, and we're not at/above the max batch size,
|
||||||
|
// and we're not above the basefee threshold, don't batch yet
|
||||||
|
if notif && total < cfg.MaxPreCommitBatch && !curBasefeeLow {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key())
|
nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("couldn't get network version: %w", err)
|
return nil, xerrors.Errorf("couldn't get network version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
individual := false
|
// For precommits the only method to precommit sectors after nv21(22?) is to use the new precommit_batch2 method
|
||||||
if !cfg.BatchPreCommitAboveBaseFee.Equals(big.Zero()) && ts.MinTicketBlock().ParentBaseFee.LessThan(cfg.BatchPreCommitAboveBaseFee) && nv >= network.Version14 {
|
// So we always batch
|
||||||
individual = true
|
res, err := b.processBatch(cfg, ts.Key(), ts.MinTicketBlock().ParentBaseFee, nv)
|
||||||
}
|
|
||||||
|
|
||||||
// todo support multiple batches
|
|
||||||
var res []sealiface.PreCommitBatchRes
|
|
||||||
if !individual {
|
|
||||||
res, err = b.processBatch(cfg, ts.Key(), ts.MinTicketBlock().ParentBaseFee, nv)
|
|
||||||
} else {
|
|
||||||
res, err = b.processIndividually(cfg)
|
|
||||||
}
|
|
||||||
if err != nil && len(res) == 0 {
|
if err != nil && len(res) == 0 {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -243,91 +239,14 @@ func (b *PreCommitBatcher) maybeStartBatch(notif bool) ([]sealiface.PreCommitBat
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *PreCommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.PreCommitBatchRes, error) {
|
|
||||||
mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, types.EmptyTSK)
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
avail := types.TotalFilecoinInt
|
|
||||||
|
|
||||||
if cfg.CollateralFromMinerBalance && !cfg.DisableCollateralFallback {
|
|
||||||
avail, err = b.api.StateMinerAvailableBalance(b.mctx, b.maddr, types.EmptyTSK)
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("getting available miner balance: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
avail = big.Sub(avail, cfg.AvailableBalanceBuffer)
|
|
||||||
if avail.LessThan(big.Zero()) {
|
|
||||||
avail = big.Zero()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var res []sealiface.PreCommitBatchRes
|
|
||||||
|
|
||||||
for sn, info := range b.todo {
|
|
||||||
r := sealiface.PreCommitBatchRes{
|
|
||||||
Sectors: []abi.SectorNumber{sn},
|
|
||||||
}
|
|
||||||
|
|
||||||
mcid, err := b.processSingle(cfg, mi, &avail, info)
|
|
||||||
if err != nil {
|
|
||||||
r.Error = err.Error()
|
|
||||||
} else {
|
|
||||||
r.Msg = &mcid
|
|
||||||
}
|
|
||||||
|
|
||||||
res = append(res, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *PreCommitBatcher) processSingle(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, entry *preCommitEntry) (cid.Cid, error) {
|
|
||||||
msgParams := infoToPreCommitSectorParams(entry.pci)
|
|
||||||
enc := new(bytes.Buffer)
|
|
||||||
|
|
||||||
if err := msgParams.MarshalCBOR(enc); err != nil {
|
|
||||||
return cid.Undef, xerrors.Errorf("marshaling precommit params: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
deposit := entry.deposit
|
|
||||||
if cfg.CollateralFromMinerBalance {
|
|
||||||
c := big.Sub(deposit, *avail)
|
|
||||||
*avail = big.Sub(*avail, deposit)
|
|
||||||
deposit = c
|
|
||||||
|
|
||||||
if deposit.LessThan(big.Zero()) {
|
|
||||||
deposit = big.Zero()
|
|
||||||
}
|
|
||||||
if (*avail).LessThan(big.Zero()) {
|
|
||||||
*avail = big.Zero()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
goodFunds := big.Add(deposit, big.Int(b.feeCfg.MaxPreCommitGasFee))
|
|
||||||
|
|
||||||
from, _, err := b.addrSel.AddressFor(b.mctx, b.api, mi, api.PreCommitAddr, goodFunds, deposit)
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, xerrors.Errorf("no good address to send precommit message from: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSector, deposit, big.Int(b.feeCfg.MaxPreCommitGasFee), enc.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return mcid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *PreCommitBatcher) processPreCommitBatch(cfg sealiface.Config, bf abi.TokenAmount, entries []*preCommitEntry, nv network.Version) ([]sealiface.PreCommitBatchRes, error) {
|
func (b *PreCommitBatcher) processPreCommitBatch(cfg sealiface.Config, bf abi.TokenAmount, entries []*preCommitEntry, nv network.Version) ([]sealiface.PreCommitBatchRes, error) {
|
||||||
params := miner.PreCommitSectorBatchParams{}
|
params := miner.PreCommitSectorBatchParams2{}
|
||||||
deposit := big.Zero()
|
deposit := big.Zero()
|
||||||
var res sealiface.PreCommitBatchRes
|
var res sealiface.PreCommitBatchRes
|
||||||
|
|
||||||
for _, p := range entries {
|
for _, p := range entries {
|
||||||
res.Sectors = append(res.Sectors, p.pci.SectorNumber)
|
res.Sectors = append(res.Sectors, p.pci.SectorNumber)
|
||||||
params.Sectors = append(params.Sectors, *infoToPreCommitSectorParams(p.pci))
|
params.Sectors = append(params.Sectors, *p.pci)
|
||||||
deposit = big.Add(deposit, p.deposit)
|
deposit = big.Add(deposit, p.deposit)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -367,7 +286,7 @@ func (b *PreCommitBatcher) processPreCommitBatch(cfg sealiface.Config, bf abi.To
|
|||||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch, needFunds, maxFee, enc.Bytes())
|
_, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch2, needFunds, maxFee, enc.Bytes())
|
||||||
|
|
||||||
if err != nil && (!api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || len(entries) == 1) {
|
if err != nil && (!api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || len(entries) == 1) {
|
||||||
res.Error = err.Error()
|
res.Error = err.Error()
|
||||||
@ -385,7 +304,7 @@ func (b *PreCommitBatcher) processPreCommitBatch(cfg sealiface.Config, bf abi.To
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If state call succeeds, we can send the message for real
|
// If state call succeeds, we can send the message for real
|
||||||
mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch, needFunds, maxFee, enc.Bytes())
|
mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch2, needFunds, maxFee, enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.Error = err.Error()
|
res.Error = err.Error()
|
||||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("pushing message to mpool: %w", err)
|
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||||
|
@ -56,7 +56,6 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
WaitDealsDelay: time.Hour * 6,
|
WaitDealsDelay: time.Hour * 6,
|
||||||
AlwaysKeepUnsealedCopy: true,
|
AlwaysKeepUnsealedCopy: true,
|
||||||
|
|
||||||
BatchPreCommits: true,
|
|
||||||
MaxPreCommitBatch: maxBatch,
|
MaxPreCommitBatch: maxBatch,
|
||||||
PreCommitBatchWait: 24 * time.Hour,
|
PreCommitBatchWait: 24 * time.Hour,
|
||||||
PreCommitBatchSlack: 3 * time.Hour,
|
PreCommitBatchSlack: 3 * time.Hour,
|
||||||
@ -114,7 +113,7 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
basefee = big.NewInt(10001)
|
basefee = big.NewInt(10001)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil)
|
s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil).MaxTimes(2) // once in AddPreCommit
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer done.Unlock()
|
defer done.Unlock()
|
||||||
@ -183,28 +182,6 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
expectInitialCalls := func() action {
|
expectInitialCalls := func() action {
|
||||||
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise {
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise {
|
||||||
s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(10001), 1), nil)
|
s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(10001), 1), nil)
|
||||||
s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version14, nil)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
|
|
||||||
expectSendsSingle := func(expect []abi.SectorNumber) action {
|
|
||||||
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise {
|
|
||||||
s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(9999), 1), nil)
|
|
||||||
s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version14, nil)
|
|
||||||
|
|
||||||
s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil)
|
|
||||||
for _, number := range expect {
|
|
||||||
numClone := number
|
|
||||||
s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool {
|
|
||||||
b := i.(*types.Message)
|
|
||||||
var params miner6.PreCommitSectorParams
|
|
||||||
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params)))
|
|
||||||
require.Equal(t, numClone, params.SectorNumber)
|
|
||||||
return true
|
|
||||||
}), gomock.Any()).Return(dummySmsg, nil)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -240,18 +217,11 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
"addSingle": {
|
"addSingle": {
|
||||||
actions: []action{
|
actions: []action{
|
||||||
addSector(0, false),
|
addSector(0, true),
|
||||||
waitPending(1),
|
waitPending(1),
|
||||||
flush([]abi.SectorNumber{0}),
|
flush([]abi.SectorNumber{0}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"addTwo": {
|
|
||||||
actions: []action{
|
|
||||||
addSectors(getSectors(2), false),
|
|
||||||
waitPending(2),
|
|
||||||
flush(getSectors(2)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"addMax": {
|
"addMax": {
|
||||||
actions: []action{
|
actions: []action{
|
||||||
expectInitialCalls(),
|
expectInitialCalls(),
|
||||||
@ -268,10 +238,10 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
addSectors(getSectors(maxBatch), true),
|
addSectors(getSectors(maxBatch), true),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"addMax-belowBaseFee": {
|
"addOne-belowBaseFee": {
|
||||||
actions: []action{
|
actions: []action{
|
||||||
expectSendsSingle(getSectors(maxBatch)),
|
expectSend(getSectors(1), false),
|
||||||
addSectors(getSectors(maxBatch), false),
|
addSectors(getSectors(1), false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -287,6 +257,7 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
|
|
||||||
// create them mocks
|
// create them mocks
|
||||||
pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl)
|
pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl)
|
||||||
|
pcapi.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version20, nil).AnyTimes()
|
||||||
|
|
||||||
pcb := pipeline.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg)
|
pcb := pipeline.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg)
|
||||||
|
|
||||||
|
@ -42,7 +42,6 @@ type Config struct {
|
|||||||
AvailableBalanceBuffer abi.TokenAmount
|
AvailableBalanceBuffer abi.TokenAmount
|
||||||
DisableCollateralFallback bool
|
DisableCollateralFallback bool
|
||||||
|
|
||||||
BatchPreCommits bool
|
|
||||||
MaxPreCommitBatch int
|
MaxPreCommitBatch int
|
||||||
PreCommitBatchWait time.Duration
|
PreCommitBatchWait time.Duration
|
||||||
PreCommitBatchSlack time.Duration
|
PreCommitBatchSlack time.Duration
|
||||||
|
@ -82,7 +82,7 @@ const (
|
|||||||
PreCommit1 SectorState = "PreCommit1" // do PreCommit1
|
PreCommit1 SectorState = "PreCommit1" // do PreCommit1
|
||||||
PreCommit2 SectorState = "PreCommit2" // do PreCommit2
|
PreCommit2 SectorState = "PreCommit2" // do PreCommit2
|
||||||
|
|
||||||
PreCommitting SectorState = "PreCommitting" // on chain pre-commit
|
PreCommitting SectorState = "PreCommitting" // on chain pre-commit (deprecated)
|
||||||
PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain
|
PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain
|
||||||
|
|
||||||
SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
|
SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
|
||||||
|
@ -368,6 +368,10 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m
|
|||||||
DealIDs: sector.dealIDs(),
|
DealIDs: sector.dealIDs(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sector.hasDeals() {
|
||||||
|
params.UnsealedCid = sector.CommD
|
||||||
|
}
|
||||||
|
|
||||||
collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, ts.Key())
|
collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("getting initial pledge collateral: %w", err)
|
return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("getting initial pledge collateral: %w", err)
|
||||||
@ -377,62 +381,10 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error {
|
func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error {
|
||||||
cfg, err := m.getConfig()
|
// note: this is a legacy state handler, normally new sectors won't enter this state
|
||||||
if err != nil {
|
// but we keep this handler in order to not break existing sector state machines.
|
||||||
return xerrors.Errorf("getting config: %w", err)
|
// todo: drop after nv21
|
||||||
}
|
return ctx.Send(SectorPreCommitBatch{})
|
||||||
|
|
||||||
if cfg.BatchPreCommits {
|
|
||||||
nv, err := m.Api.StateNetworkVersion(ctx.Context(), types.EmptyTSK)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("getting network version: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nv >= network.Version13 {
|
|
||||||
return ctx.Send(SectorPreCommitBatch{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info, pcd, tsk, err := m.preCommitInfo(ctx, sector)
|
|
||||||
if err != nil {
|
|
||||||
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitInfo: %w", err)})
|
|
||||||
}
|
|
||||||
if info == nil {
|
|
||||||
return nil // event was sent in preCommitInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
params := infoToPreCommitSectorParams(info)
|
|
||||||
|
|
||||||
deposit, err := collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, pcd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
enc := new(bytes.Buffer)
|
|
||||||
if err := params.MarshalCBOR(enc); err != nil {
|
|
||||||
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)})
|
|
||||||
}
|
|
||||||
|
|
||||||
mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, tsk)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
goodFunds := big.Add(deposit, big.Int(m.feeCfg.MaxPreCommitGasFee))
|
|
||||||
|
|
||||||
from, _, err := m.addrSel.AddressFor(ctx.Context(), m.Api, mi, api.PreCommitAddr, goodFunds, deposit)
|
|
||||||
if err != nil {
|
|
||||||
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("no good address to send precommit message from: %w", err)})
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit)
|
|
||||||
mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: pcd, PreCommitInfo: *info})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error {
|
func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error {
|
||||||
|
@ -10,7 +10,6 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -127,14 +126,3 @@ func sendMsg(ctx context.Context, sa interface {
|
|||||||
|
|
||||||
return smsg.Cid(), nil
|
return smsg.Cid(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func infoToPreCommitSectorParams(info *miner.SectorPreCommitInfo) *miner.PreCommitSectorParams {
|
|
||||||
return &miner.PreCommitSectorParams{
|
|
||||||
SealProof: info.SealProof,
|
|
||||||
SectorNumber: info.SectorNumber,
|
|
||||||
SealedCID: info.SealedCID,
|
|
||||||
SealRandEpoch: info.SealRandEpoch,
|
|
||||||
DealIDs: info.DealIDs,
|
|
||||||
Expiration: info.Expiration,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user