Merge pull request #7810 from filecoin-project/feat/snap-deals

Snap Deals Lotus Integration: FSM Posting and integration test
This commit is contained in:
Aayush Rajasekaran 2022-01-11 12:46:55 -05:00 committed by GitHub
commit 207d33eaba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 2059 additions and 370 deletions

View File

@ -1084,7 +1084,7 @@ type CirculatingSupply struct {
type MiningBaseInfo struct {
MinerPower types.BigInt
NetworkPower types.BigInt
Sectors []builtin.SectorInfo
Sectors []builtin.ExtendedSectorInfo
WorkerKey address.Address
SectorSize abi.SectorSize
PrevBeaconEntry types.BeaconEntry

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-state-types/abi"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/filecoin-project/specs-storage/storage"
@ -99,8 +100,8 @@ type StorageMiner interface {
// Returns null if message wasn't sent
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
// Returns null if message wasn't sent
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
@ -111,6 +112,7 @@ type StorageMiner interface {
SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
// WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error //perm:admin retry:true
@ -253,7 +255,7 @@ type StorageMiner interface {
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
}
var _ storiface.WorkerReturn = *new(StorageMiner)

View File

@ -17,6 +17,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -620,7 +621,7 @@ type StorageMinerStruct struct {
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
ComputeProof func(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"read"`
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
@ -758,7 +759,9 @@ type StorageMinerStruct struct {
SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"`
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber, p2 bool) error `perm:"admin"`
SectorMatchPendingPiecesToOpenSectors func(p0 context.Context) error `perm:"admin"`
SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"`
@ -3710,14 +3713,14 @@ func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPo
return *new(map[abi.SectorNumber]string), ErrNotSupported
}
func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) {
if s.Internal.ComputeProof == nil {
return *new([]builtin.PoStProof), ErrNotSupported
}
return s.Internal.ComputeProof(p0, p1, p2)
return s.Internal.ComputeProof(p0, p1, p2, p3, p4)
}
func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) {
return *new([]builtin.PoStProof), ErrNotSupported
}
@ -4469,14 +4472,25 @@ func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration
return *new(time.Duration), ErrNotSupported
}
func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error {
if s.Internal.SectorMarkForUpgrade == nil {
return ErrNotSupported
}
return s.Internal.SectorMarkForUpgrade(p0, p1)
return s.Internal.SectorMarkForUpgrade(p0, p1, p2)
}
func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error {
if s.Internal.SectorMatchPendingPiecesToOpenSectors == nil {
return ErrNotSupported
}
return s.Internal.SectorMatchPendingPiecesToOpenSectors(p0)
}
func (s *StorageMinerStub) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error {
return ErrNotSupported
}

View File

@ -8,7 +8,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -57,7 +57,7 @@ type Gateway interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error)
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)

View File

@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -451,7 +451,7 @@ type GatewayStruct struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (network.Version, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
@ -2703,15 +2703,15 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
if s.Internal.StateNetworkVersion == nil {
return *new(network.Version), ErrNotSupported
return *new(abinetwork.Version), ErrNotSupported
}
return s.Internal.StateNetworkVersion(p0, p1)
}
func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
return *new(network.Version), ErrNotSupported
func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
return *new(abinetwork.Version), ErrNotSupported
}
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {

View File

@ -54,10 +54,10 @@ func VersionForType(nodeType NodeType) (Version, error) {
// semver versions of the rpc api exposed
var (
FullAPIVersion0 = newVer(1, 4, 0)
FullAPIVersion1 = newVer(2, 1, 0)
FullAPIVersion0 = newVer(1, 5, 0)
FullAPIVersion1 = newVer(2, 2, 0)
MinerAPIVersion0 = newVer(1, 2, 0)
MinerAPIVersion0 = newVer(1, 3, 0)
WorkerAPIVersion0 = newVer(1, 5, 0)
)

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -17,7 +17,7 @@ import (
const BootstrappersFile = ""
const GenesisFile = ""
const GenesisNetworkVersion = network.Version14
const GenesisNetworkVersion = network.Version15
var UpgradeBreezeHeight = abi.ChainEpoch(-1)

View File

@ -61,6 +61,7 @@ const (
// These are all just type aliases across actor versions. In the future, that might change
// and we might need to do something fancier.
type SectorInfo = proof7.SectorInfo
type ExtendedSectorInfo = proof7.ExtendedSectorInfo
type PoStProof = proof7.PoStProof
type FilterEstimate = smoothing0.FilterEstimate

View File

@ -45,6 +45,7 @@ const (
// These are all just type aliases across actor versions. In the future, that might change
// and we might need to do something fancier.
type SectorInfo = proof{{.latestVersion}}.SectorInfo
type ExtendedSectorInfo = proof{{.latestVersion}}.ExtendedSectorInfo
type PoStProof = proof{{.latestVersion}}.PoStProof
type FilterEstimate = smoothing0.FilterEstimate

View File

@ -23,6 +23,7 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
{{range .versions}}
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
{{end}}
@ -193,6 +194,7 @@ type SectorPreCommitOnChainInfo struct {
type PoStPartition = miner0.PoStPartition
type RecoveryDeclaration = miner0.RecoveryDeclaration
type FaultDeclaration = miner0.FaultDeclaration
type ReplicaUpdate = miner7.ReplicaUpdate
// Params
type DeclareFaultsParams = miner0.DeclareFaultsParams
@ -201,6 +203,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old

View File

@ -23,6 +23,7 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -282,6 +283,7 @@ type SectorPreCommitOnChainInfo struct {
type PoStPartition = miner0.PoStPartition
type RecoveryDeclaration = miner0.RecoveryDeclaration
type FaultDeclaration = miner0.FaultDeclaration
type ReplicaUpdate = miner7.ReplicaUpdate
// Params
type DeclareFaultsParams = miner0.DeclareFaultsParams
@ -290,6 +292,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old

View File

@ -26,7 +26,7 @@ import (
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
@ -400,12 +400,21 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
xsectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
sectors := make([]proof.SectorInfo, len(xsectors))
for i, xsi := range xsectors {
sectors[i] = proof.SectorInfo{
SealProof: xsi.SealProof,
SectorNumber: xsi.SectorNumber,
SealedCID: xsi.SealedCID,
}
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,

View File

@ -461,7 +461,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
if et != nil {
// TODO: maybe think about passing in more real parameters to this?
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil)
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0)
if err != nil {
return nil, err
}
@ -620,7 +620,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
type WinningPoStProver interface {
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error)
ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error)
}
type wppProvider struct{}
@ -629,7 +629,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
return []uint64{0}, nil
}
func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) {
func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error) {
return ValidWpostForTesting, nil
}
@ -692,11 +692,11 @@ func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (b
panic("not supported")
}
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) {
panic("not supported")
}

View File

@ -117,7 +117,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
return mas.GetSector(sid)
}
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) {
act, err := sm.LoadActorRaw(ctx, maddr, st)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
@ -203,12 +203,13 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra
return nil, xerrors.Errorf("loading proving sectors: %w", err)
}
out := make([]builtin.SectorInfo, len(sectors))
out := make([]builtin.ExtendedSectorInfo, len(sectors))
for i, sinfo := range sectors {
out[i] = builtin.SectorInfo{
out[i] = builtin.ExtendedSectorInfo{
SealProof: sinfo.SealProof,
SectorNumber: sinfo.SectorNumber,
SealedCID: sinfo.SealedCID,
SectorKey: sinfo.SectorKeyCID,
}
}

View File

@ -22,6 +22,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@ -543,7 +544,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
return []uint64{1}, nil
}
func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof2.PoStProof, error) {
return []proof2.PoStProof{
{
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,

View File

@ -245,8 +245,8 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre
return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
}
func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
func (ss *syscallShim) VerifyPoSt(info proof5.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), info)
if err != nil {
return err
}

View File

@ -86,9 +86,10 @@ func (cv *cachingVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
}, &svi)
}
func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) {
func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
return cv.backend.VerifyWinningPoSt(ctx, info)
}
func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) {
return cv.withCache(func() (bool, error) {
return cv.backend.VerifyWindowPoSt(ctx, info)

View File

@ -12,6 +12,8 @@ import (
"time"
saproof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
saproof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/docker/go-units"
logging "github.com/ipfs/go-log/v2"
@ -260,7 +262,8 @@ var sealBenchCmd = &cli.Command{
sectorNumber := c.Int("num-sectors")
var sealTimings []SealingResult
var sealedSectors []saproof2.SectorInfo
var extendedSealedSectors []saproof7.ExtendedSectorInfo
var sealedSectors []saproof7.SectorInfo
if robench == "" {
var err error
@ -269,7 +272,7 @@ var sealBenchCmd = &cli.Command{
PreCommit2: 1,
Commit: 1,
}
sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
sealTimings, extendedSealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
if err != nil {
return xerrors.Errorf("failed to run seals: %w", err)
}
@ -296,7 +299,13 @@ var sealBenchCmd = &cli.Command{
}
for _, s := range genm.Sectors {
sealedSectors = append(sealedSectors, saproof2.SectorInfo{
extendedSealedSectors = append(extendedSealedSectors, saproof7.ExtendedSectorInfo{
SealedCID: s.CommR,
SectorNumber: s.SectorID,
SealProof: s.ProofType,
SectorKey: nil,
})
sealedSectors = append(sealedSectors, proof.SectorInfo{
SealedCID: s.CommR,
SectorNumber: s.SectorID,
SealProof: s.ProofType,
@ -325,20 +334,20 @@ var sealBenchCmd = &cli.Command{
return err
}
fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(sealedSectors)))
fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(extendedSealedSectors)))
if err != nil {
return err
}
candidates := make([]saproof2.SectorInfo, len(fcandidates))
xcandidates := make([]saproof7.ExtendedSectorInfo, len(fcandidates))
for i, fcandidate := range fcandidates {
candidates[i] = sealedSectors[fcandidate]
xcandidates[i] = extendedSealedSectors[fcandidate]
}
gencandidates := time.Now()
log.Info("computing winning post snark (cold)")
proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:])
if err != nil {
return err
}
@ -346,14 +355,23 @@ var sealBenchCmd = &cli.Command{
winningpost1 := time.Now()
log.Info("computing winning post snark (hot)")
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:])
if err != nil {
return err
}
candidates := make([]saproof7.SectorInfo, len(xcandidates))
for i, xsi := range xcandidates {
candidates[i] = saproof7.SectorInfo{
SealedCID: xsi.SealedCID,
SectorNumber: xsi.SectorNumber,
SealProof: xsi.SealProof,
}
}
winnningpost2 := time.Now()
pvi1 := saproof2.WinningPoStVerifyInfo{
pvi1 := saproof7.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]),
Proofs: proof1,
ChallengedSectors: candidates,
@ -369,7 +387,7 @@ var sealBenchCmd = &cli.Command{
verifyWinningPost1 := time.Now()
pvi2 := saproof2.WinningPoStVerifyInfo{
pvi2 := saproof7.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]),
Proofs: proof2,
ChallengedSectors: candidates,
@ -386,7 +404,7 @@ var sealBenchCmd = &cli.Command{
verifyWinningPost2 := time.Now()
log.Info("computing window post snark (cold)")
wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:])
wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:])
if err != nil {
return err
}
@ -394,7 +412,7 @@ var sealBenchCmd = &cli.Command{
windowpost1 := time.Now()
log.Info("computing window post snark (hot)")
wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:])
wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:])
if err != nil {
return err
}
@ -502,10 +520,10 @@ type ParCfg struct {
Commit int
}
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof2.SectorInfo, error) {
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof7.ExtendedSectorInfo, error) {
var pieces []abi.PieceInfo
sealTimings := make([]SealingResult, numSectors)
sealedSectors := make([]saproof2.SectorInfo, numSectors)
sealedSectors := make([]saproof7.ExtendedSectorInfo, numSectors)
preCommit2Sema := make(chan struct{}, par.PreCommit2)
commitSema := make(chan struct{}, par.Commit)
@ -579,10 +597,11 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
precommit2 := time.Now()
<-preCommit2Sema
sealedSectors[i] = saproof2.SectorInfo{
sealedSectors[i] = saproof7.ExtendedSectorInfo{
SealProof: sid.ProofType,
SectorNumber: i,
SealedCID: cids.Sealed,
SectorKey: nil,
}
seed := lapi.SealSeed{

View File

@ -470,6 +470,8 @@ var stateList = []stateMeta{
{col: color.FgBlue, state: sealing.Empty},
{col: color.FgBlue, state: sealing.WaitDeals},
{col: color.FgBlue, state: sealing.AddPiece},
{col: color.FgBlue, state: sealing.SnapDealsWaitDeals},
{col: color.FgBlue, state: sealing.SnapDealsAddPiece},
{col: color.FgRed, state: sealing.UndefinedSectorState},
{col: color.FgYellow, state: sealing.Packing},
@ -488,6 +490,12 @@ var stateList = []stateMeta{
{col: color.FgYellow, state: sealing.SubmitCommitAggregate},
{col: color.FgYellow, state: sealing.CommitAggregateWait},
{col: color.FgYellow, state: sealing.FinalizeSector},
{col: color.FgYellow, state: sealing.SnapDealsPacking},
{col: color.FgYellow, state: sealing.UpdateReplica},
{col: color.FgYellow, state: sealing.ProveReplicaUpdate},
{col: color.FgYellow, state: sealing.SubmitReplicaUpdate},
{col: color.FgYellow, state: sealing.ReplicaUpdateWait},
{col: color.FgYellow, state: sealing.FinalizeReplicaUpdate},
{col: color.FgCyan, state: sealing.Terminating},
{col: color.FgCyan, state: sealing.TerminateWait},
@ -495,6 +503,7 @@ var stateList = []stateMeta{
{col: color.FgCyan, state: sealing.TerminateFailed},
{col: color.FgCyan, state: sealing.Removing},
{col: color.FgCyan, state: sealing.Removed},
{col: color.FgCyan, state: sealing.AbortUpgrade},
{col: color.FgRed, state: sealing.FailedUnrecoverable},
{col: color.FgRed, state: sealing.AddPieceFailed},
@ -512,6 +521,9 @@ var stateList = []stateMeta{
{col: color.FgRed, state: sealing.RemoveFailed},
{col: color.FgRed, state: sealing.DealsExpired},
{col: color.FgRed, state: sealing.RecoverDealIDs},
{col: color.FgRed, state: sealing.SnapDealsAddPieceFailed},
{col: color.FgRed, state: sealing.SnapDealsDealsExpired},
{col: color.FgRed, state: sealing.ReplicaUpdateFailed},
}
func init() {

View File

@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/api"
@ -50,11 +51,13 @@ var sectorsCmd = &cli.Command{
sectorsExtendCmd,
sectorsTerminateCmd,
sectorsRemoveCmd,
sectorsSnapUpCmd,
sectorsMarkForUpgradeCmd,
sectorsStartSealCmd,
sectorsSealDelayCmd,
sectorsCapacityCollateralCmd,
sectorsBatching,
sectorsRefreshPieceMatchingCmd,
},
}
@ -1476,6 +1479,44 @@ var sectorsRemoveCmd = &cli.Command{
},
}
var sectorsSnapUpCmd = &cli.Command{
Name: "snap-up",
Usage: "Mark a committed capacity sector to be filled with deals",
ArgsUsage: "<sectorNum>",
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 1 {
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer nCloser()
ctx := lcli.ReqContext(cctx)
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to get network version: %w", err)
}
if nv < network.Version15 {
return xerrors.Errorf("snap deals upgrades enabled in network v15")
}
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), true)
},
}
var sectorsMarkForUpgradeCmd = &cli.Command{
Name: "mark-for-upgrade",
Usage: "Mark a committed capacity sector for replacement by a sector with deals",
@ -1490,14 +1531,28 @@ var sectorsMarkForUpgradeCmd = &cli.Command{
return err
}
defer closer()
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer nCloser()
ctx := lcli.ReqContext(cctx)
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to get network version: %w", err)
}
if nv >= network.Version15 {
return xerrors.Errorf("classic cc upgrades disabled v15 and beyond, use `snap-up`")
}
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id))
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), false)
},
}
@ -2000,6 +2055,25 @@ var sectorsBatchingPendingPreCommit = &cli.Command{
},
}
var sectorsRefreshPieceMatchingCmd = &cli.Command{
Name: "match-pending-pieces",
Usage: "force a refreshed match of pending pieces to open sectors without manually waiting for more deals",
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if err := nodeApi.SectorMatchPendingPiecesToOpenSectors(ctx); err != nil {
return err
}
return nil
},
}
func yesno(b bool) string {
if b {
return color.GreenString("YES")

View File

@ -163,6 +163,16 @@ var runCmd = &cli.Command{
Usage: "enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap)",
Value: true,
},
&cli.BoolFlag{
Name: "replica-update",
Usage: "enable replica update",
Value: true,
},
&cli.BoolFlag{
Name: "prove-replica-update2",
Usage: "enable prove replica update 2",
Value: true,
},
&cli.IntFlag{
Name: "parallel-fetch-limit",
Usage: "maximum fetch operations to run in parallel",
@ -251,7 +261,7 @@ var runCmd = &cli.Command{
var taskTypes []sealtasks.TaskType
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize)
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize)
if cctx.Bool("addpiece") {
taskTypes = append(taskTypes, sealtasks.TTAddPiece)
@ -268,6 +278,12 @@ var runCmd = &cli.Command{
if cctx.Bool("commit") {
taskTypes = append(taskTypes, sealtasks.TTCommit2)
}
if cctx.Bool("replicaupdate") {
taskTypes = append(taskTypes, sealtasks.TTReplicaUpdate)
}
if cctx.Bool("prove-replica-update2") {
taskTypes = append(taskTypes, sealtasks.TTProveReplicaUpdate2)
}
if len(taskTypes) == 0 {
return xerrors.Errorf("no task types specified")

View File

@ -78,7 +78,7 @@ func (mockVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool,
return false, nil
}
func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
panic("should not be called")
}
func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {

View File

@ -119,6 +119,7 @@
* [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration)
* [SectorGetSealDelay](#SectorGetSealDelay)
* [SectorMarkForUpgrade](#SectorMarkForUpgrade)
* [SectorMatchPendingPiecesToOpenSectors](#SectorMatchPendingPiecesToOpenSectors)
* [SectorPreCommitFlush](#SectorPreCommitFlush)
* [SectorPreCommitPending](#SectorPreCommitPending)
* [SectorRemove](#SectorRemove)
@ -215,7 +216,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131584,
"BlockDelay": 42
}
```
@ -358,12 +359,15 @@ Inputs:
{
"SealProof": 8,
"SectorNumber": 9,
"SectorKey": null,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
}
],
"Bw=="
"Bw==",
10101,
15
]
```
@ -2474,12 +2478,22 @@ Perms: admin
Inputs:
```json
[
9
9,
true
]
```
Response: `{}`
### SectorMatchPendingPiecesToOpenSectors
Perms: admin
Inputs: `null`
Response: `{}`
### SectorPreCommitFlush
SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
Returns null if message wasn't sent

View File

@ -741,7 +741,7 @@ Perms: admin
Inputs: `null`
Response: `131328`
Response: `131584`
## Add

View File

@ -283,7 +283,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131584,
"BlockDelay": 42
}
```
@ -2579,6 +2579,7 @@ Response:
{
"SealProof": 8,
"SectorNumber": 9,
"SectorKey": null,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}

View File

@ -289,7 +289,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131584,
"BlockDelay": 42
}
```
@ -2591,6 +2591,7 @@ Response:
{
"SealProof": 8,
"SectorNumber": 9,
"SectorKey": null,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}

View File

@ -1514,23 +1514,25 @@ USAGE:
lotus-miner sectors command [command options] [arguments...]
COMMANDS:
status Get the seal status of a sector by its number
list List sectors
refs List References to sectors
update-state ADVANCED: manually update the state of a sector, this may aid in error recovery
pledge store random data in a sector
check-expire Inspect expiring sectors
expired Get or cleanup expired sectors
renew Renew expiring sectors while not exceeding each sector's max life
extend Extend sector expiration
terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)
remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))
mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals
seal Manually start sealing a sector (filling any unused space with junk)
set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts
get-cc-collateral Get the collateral required to pledge a committed capacity sector
batching manage batch sector operations
help, h Shows a list of commands or help for one command
status Get the seal status of a sector by its number
list List sectors
refs List References to sectors
update-state ADVANCED: manually update the state of a sector, this may aid in error recovery
pledge store random data in a sector
check-expire Inspect expiring sectors
expired Get or cleanup expired sectors
renew Renew expiring sectors while not exceeding each sector's max life
extend Extend sector expiration
terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)
remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))
snap-up Mark a committed capacity sector to be filled with deals
mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals
seal Manually start sealing a sector (filling any unused space with junk)
set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts
get-cc-collateral Get the collateral required to pledge a committed capacity sector
batching manage batch sector operations
match-pending-pieces force a refreshed match of pending pieces to open sectors without manually waiting for more deals
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help (default: false)
@ -1746,6 +1748,19 @@ OPTIONS:
```
### lotus-miner sectors snap-up
```
NAME:
lotus-miner sectors snap-up - Mark a committed capacity sector to be filled with deals
USAGE:
lotus-miner sectors snap-up [command options] <sectorNum>
OPTIONS:
--help, -h show help (default: false)
```
### lotus-miner sectors mark-for-upgrade
```
NAME:
@ -1846,6 +1861,19 @@ OPTIONS:
```
### lotus-miner sectors match-pending-pieces
```
NAME:
lotus-miner sectors match-pending-pieces - force a refreshed match of pending pieces to open sectors without manually waiting for more deals
USAGE:
lotus-miner sectors match-pending-pieces [command options] [arguments...]
OPTIONS:
--help, -h show help (default: false)
```
## lotus-miner proving
```
NAME:

View File

@ -44,6 +44,8 @@ OPTIONS:
--unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true)
--precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true)
--commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true)
--replica-update enable replica update (default: true)
--prove-replica-update2 enable prove replica update 2 (default: true)
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5)
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m")
--help, -h show help (default: false)

View File

@ -424,6 +424,12 @@
# env var: LOTUS_STORAGE_ALLOWUNSEAL
#AllowUnseal = true
# env var: LOTUS_STORAGE_ALLOWREPLICAUPDATE
#AllowReplicaUpdate = true
# env var: LOTUS_STORAGE_ALLOWPROVEREPLICAUPDATE2
#AllowProveReplicaUpdate2 = true
# env var: LOTUS_STORAGE_RESOURCEFILTERING
#ResourceFiltering = "hardware"

View File

@ -714,7 +714,6 @@ func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
if err != nil {
return empty, xerrors.Errorf("failed to update replica %d with new deal data: %w", sector.ID.Number, err)
}
return storage.ReplicaUpdateOut{NewSealed: sealed, NewUnsealed: unsealed}, nil
}
@ -854,6 +853,14 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef,
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer") // happens in localworker
}

View File

@ -19,6 +19,7 @@ import (
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -180,16 +181,16 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage
func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
sis := make([]proof2.SectorInfo, len(seals))
xsis := make([]proof7.ExtendedSectorInfo, len(seals))
for i, s := range seals {
sis[i] = proof2.SectorInfo{
xsis[i] = proof7.ExtendedSectorInfo{
SealProof: s.ref.ProofType,
SectorNumber: s.ref.ID.Number,
SealedCID: s.cids.Sealed,
}
}
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness)
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, xsis, randomness)
if len(skipped) > 0 {
require.Error(t, err)
require.EqualValues(t, skipped, skp)
@ -200,7 +201,16 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
t.Fatalf("%+v", err)
}
ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof2.WindowPoStVerifyInfo{
sis := make([]proof7.SectorInfo, len(seals))
for i, xsi := range xsis {
sis[i] = proof7.SectorInfo{
SealProof: xsi.SealProof,
SectorNumber: xsi.SectorNumber,
SealedCID: xsi.SealedCID,
}
}
ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof7.WindowPoStVerifyInfo{
Randomness: randomness,
Proofs: proofs,
ChallengedSectors: sis,

View File

@ -4,9 +4,7 @@ import (
"context"
"io"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -36,11 +34,11 @@ type Storage interface {
}
type Verifier interface {
VerifySeal(proof5.SealVerifyInfo) (bool, error)
VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error)
VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error)
VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error)
VerifySeal(proof.SealVerifyInfo) (bool, error)
VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error)
VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error)
VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error)
GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error)
}
@ -49,7 +47,7 @@ type Verifier interface {
type Prover interface {
// TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here
AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error)
AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error)
}
type SectorProvider interface {

View File

@ -11,16 +11,16 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/abi"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
ffiproof "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) {
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
if err != nil {
return nil, err
}
@ -32,12 +32,13 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID,
return ffi.GenerateWinningPoSt(minerID, privsectors, randomness)
}
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) {
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
if err != nil {
return nil, nil, xerrors.Errorf("gathering sector info: %w", err)
}
defer done()
if len(skipped) > 0 {
@ -53,11 +54,10 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s
Number: f,
})
}
return proof, faultyIDs, err
}
func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
func (sb *Sealer) pubExtendedSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
fmap := map[abi.SectorNumber]struct{}{}
for _, fault := range faults {
fmap[fault] = struct{}{}
@ -81,14 +81,32 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
ID: abi.SectorID{Miner: mid, Number: s.SectorNumber},
ProofType: s.SealProof,
}
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
if err != nil {
log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID)
continue
proveUpdate := s.SectorKey != nil
var cache string
var sealed string
if proveUpdate {
log.Debugf("Posting over updated sector for sector id: %d", s.SectorNumber)
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTUpdateCache|storiface.FTUpdate, 0, storiface.PathStorage)
if err != nil {
log.Warnw("failed to acquire FTUpdateCache and FTUpdate of sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID)
continue
}
doneFuncs = append(doneFuncs, d)
cache = paths.UpdateCache
sealed = paths.Update
} else {
log.Debugf("Posting over sector key sector for sector id: %d", s.SectorNumber)
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
if err != nil {
log.Warnw("failed to acquire FTCache and FTSealed of sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID)
continue
}
doneFuncs = append(doneFuncs, d)
cache = paths.Cache
sealed = paths.Sealed
}
doneFuncs = append(doneFuncs, d)
postProofType, err := rpt(s.SealProof)
if err != nil {
@ -96,11 +114,16 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err)
}
ffiInfo := ffiproof.SectorInfo{
SealProof: s.SealProof,
SectorNumber: s.SectorNumber,
SealedCID: s.SealedCID,
}
out = append(out, ffi.PrivateSectorInfo{
CacheDirPath: paths.Cache,
CacheDirPath: cache,
PoStProofType: postProofType,
SealedSectorPath: paths.Sealed,
SectorInfo: s,
SealedSectorPath: sealed,
SectorInfo: ffiInfo,
})
}
@ -113,19 +136,19 @@ type proofVerifier struct{}
var ProofVerifier = proofVerifier{}
func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) {
func (proofVerifier) VerifySeal(info proof.SealVerifyInfo) (bool, error) {
return ffi.VerifySeal(info)
}
func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
func (proofVerifier) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) {
return ffi.VerifyAggregateSeals(aggregate)
}
func (proofVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
func (proofVerifier) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) {
return ffi.SectorUpdate.VerifyUpdateProof(update)
}
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt")
defer span.End()
@ -133,7 +156,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningP
return ffi.VerifyWinningPoSt(info)
}
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWindowPoSt")
defer span.End()

View File

@ -98,11 +98,13 @@ type SealerConfig struct {
ParallelFetchLimit int
// Local worker config
AllowAddPiece bool
AllowPreCommit1 bool
AllowPreCommit2 bool
AllowCommit bool
AllowUnseal bool
AllowAddPiece bool
AllowPreCommit1 bool
AllowPreCommit2 bool
AllowCommit bool
AllowUnseal bool
AllowReplicaUpdate bool
AllowProveReplicaUpdate2 bool
// ResourceFiltering instructs the system which resource filtering strategy
// to use when evaluating tasks against this worker. An empty value defaults
@ -144,7 +146,7 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store
go m.sched.runSched()
localTasks := []sealtasks.TaskType{
sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch,
}
if sc.AllowAddPiece {
localTasks = append(localTasks, sealtasks.TTAddPiece)
@ -161,6 +163,12 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store
if sc.AllowUnseal {
localTasks = append(localTasks, sealtasks.TTUnseal)
}
if sc.AllowReplicaUpdate {
localTasks = append(localTasks, sealtasks.TTReplicaUpdate)
}
if sc.AllowProveReplicaUpdate2 {
localTasks = append(localTasks, sealtasks.TTProveReplicaUpdate2)
}
wcfg := WorkerConfig{
IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled,
@ -584,6 +592,23 @@ func (m *Manager) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef
return m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil)
}
func (m *Manager) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUpdateCache|storiface.FTUpdate); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); err != nil {
return xerrors.Errorf("removing update cache: %w", err)
}
if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); err != nil {
return xerrors.Errorf("removing update: %w", err)
}
return nil
}
func (m *Manager) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error {
ctx, cancel := context.WithCancel(ctx)
@ -666,7 +691,7 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error {
func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
log.Errorf("manager is doing replica update")
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces)
if err != nil {
return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err)
@ -677,7 +702,7 @@ func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
waitRes := func() {
p, werr := m.waitWork(ctx, wk)
if werr != nil {
waitErr = werr
waitErr = xerrors.Errorf("waitWork: %w", werr)
return
}
if p != nil {
@ -697,17 +722,17 @@ func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
log.Errorf("scheduled work for replica update")
err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces))
if err != nil {
return err
return xerrors.Errorf("startWork: %w", err)
}
waitRes()
return nil
})
if err != nil {
return storage.ReplicaUpdateOut{}, err
return storage.ReplicaUpdateOut{}, xerrors.Errorf("Schedule: %w", err)
}
return out, waitErr
}

View File

@ -10,9 +10,7 @@ import (
"math/rand"
"sync"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/dagstore/mount"
ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper"
@ -39,7 +37,7 @@ type SectorMgr struct {
}
type mockVerifProver struct {
aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies
aggregates map[string]proof.AggregateSealVerifyProofAndInfos // used for logging bad verifies
}
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
@ -336,14 +334,23 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) {
}
}
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) {
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
mgr.lk.Lock()
defer mgr.lk.Unlock()
sectorInfo := make([]proof.SectorInfo, len(xSectorInfo))
for i, xssi := range xSectorInfo {
sectorInfo[i] = proof.SectorInfo{
SealProof: xssi.SealProof,
SectorNumber: xssi.SectorNumber,
SealedCID: xssi.SealedCID,
}
}
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil
}
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) {
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
mgr.lk.Lock()
defer mgr.lk.Unlock()
@ -351,22 +358,22 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
return nil, nil, xerrors.Errorf("failed to post (mock)")
}
si := make([]proof5.SectorInfo, 0, len(sectorInfo))
si := make([]proof.ExtendedSectorInfo, 0, len(xSectorInfo))
var skipped []abi.SectorID
var err error
for _, info := range sectorInfo {
for _, xsi := range xSectorInfo {
sid := abi.SectorID{
Miner: minerID,
Number: info.SectorNumber,
Number: xsi.SectorNumber,
}
_, found := mgr.sectors[sid]
if found && !mgr.sectors[sid].failed && !mgr.sectors[sid].corrupted {
si = append(si, info)
si = append(si, xsi)
} else {
skipped = append(skipped, sid)
err = xerrors.Errorf("skipped some sectors")
@ -377,10 +384,19 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
return nil, skipped, err
}
return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
sectorInfo := make([]proof.SectorInfo, len(si))
for i, xssi := range si {
sectorInfo[i] = proof.SectorInfo{
SealProof: xssi.SealProof,
SectorNumber: xssi.SectorNumber,
SealedCID: xssi.SealedCID,
}
}
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
}
func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte {
func generateFakePoStProof(sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) []byte {
randomness[31] &= 0x3f
hasher := sha256.New()
@ -395,13 +411,13 @@ func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRa
}
func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof {
func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof.PoStProof {
wp, err := rpt(sectorInfo[0].SealProof)
if err != nil {
panic(err)
}
return []proof5.PoStProof{
return []proof.PoStProof{
{
PoStProof: wp,
ProofBytes: generateFakePoStProof(sectorInfo, randomness),
@ -465,6 +481,14 @@ func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.Sector
return nil
}
func (mgr *SectorMgr) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
return nil
}
func (mgr *SectorMgr) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error {
return nil
}
func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error {
mgr.lk.Lock()
defer mgr.lk.Unlock()
@ -553,7 +577,7 @@ func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callI
panic("not supported")
}
func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
plen, err := svi.SealProof.ProofSize()
if err != nil {
return false, err
@ -574,7 +598,7 @@ func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
return true, nil
}
func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
func (m mockVerifProver) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) {
out := make([]byte, m.aggLen(len(aggregate.Infos)))
for pi, svi := range aggregate.Infos {
for i := 0; i < 32; i++ {
@ -600,11 +624,11 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
return ok, nil
}
func (m mockVerifProver) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
func (m mockVerifProver) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) {
return true, nil
}
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length
for pi, proof := range proofs {
for i := range proof[:32] {
@ -646,12 +670,12 @@ func (m mockVerifProver) aggLen(nproofs int) int {
}
}
func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
return true, nil
}
func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 {
return false, xerrors.Errorf("expected 1 proof entry")
}
@ -674,7 +698,7 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context,
}
var MockVerifier = mockVerifProver{
aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{},
aggregates: map[string]proof.AggregateSealVerifyProofAndInfos{},
}
var MockProver = MockVerifier

View File

@ -7,7 +7,7 @@ import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
@ -23,11 +23,11 @@ type testExec struct {
apch chan chan apres
}
func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
panic("implement me")
}
func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
panic("implement me")
}
@ -59,6 +59,14 @@ func (t *testExec) ReleaseSealed(ctx context.Context, sector storage.SectorRef)
panic("implement me")
}
func (t *testExec) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}
func (t *testExec) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}
func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}

View File

@ -143,7 +143,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{184, 26}); err != nil {
if _, err := w.Write([]byte{184, 32}); err != nil {
return err
}
@ -573,6 +573,137 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
return err
}
// t.CCUpdate (bool) (bool)
if len("CCUpdate") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"CCUpdate\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCUpdate"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("CCUpdate")); err != nil {
return err
}
if err := cbg.WriteBool(w, t.CCUpdate); err != nil {
return err
}
// t.CCPieces ([]sealing.Piece) (slice)
if len("CCPieces") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"CCPieces\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCPieces"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("CCPieces")); err != nil {
return err
}
if len(t.CCPieces) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.CCPieces was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.CCPieces))); err != nil {
return err
}
for _, v := range t.CCPieces {
if err := v.MarshalCBOR(w); err != nil {
return err
}
}
// t.UpdateSealed (cid.Cid) (struct)
if len("UpdateSealed") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"UpdateSealed\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateSealed"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("UpdateSealed")); err != nil {
return err
}
if t.UpdateSealed == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.UpdateSealed); err != nil {
return xerrors.Errorf("failed to write cid field t.UpdateSealed: %w", err)
}
}
// t.UpdateUnsealed (cid.Cid) (struct)
if len("UpdateUnsealed") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"UpdateUnsealed\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateUnsealed"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("UpdateUnsealed")); err != nil {
return err
}
if t.UpdateUnsealed == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.UpdateUnsealed); err != nil {
return xerrors.Errorf("failed to write cid field t.UpdateUnsealed: %w", err)
}
}
// t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice)
if len("ReplicaUpdateProof") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"ReplicaUpdateProof\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateProof"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("ReplicaUpdateProof")); err != nil {
return err
}
if len(t.ReplicaUpdateProof) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.ReplicaUpdateProof was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ReplicaUpdateProof))); err != nil {
return err
}
if _, err := w.Write(t.ReplicaUpdateProof[:]); err != nil {
return err
}
// t.ReplicaUpdateMessage (cid.Cid) (struct)
if len("ReplicaUpdateMessage") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"ReplicaUpdateMessage\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateMessage"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("ReplicaUpdateMessage")); err != nil {
return err
}
if t.ReplicaUpdateMessage == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.ReplicaUpdateMessage); err != nil {
return xerrors.Errorf("failed to write cid field t.ReplicaUpdateMessage: %w", err)
}
}
// t.FaultReportMsg (cid.Cid) (struct)
if len("FaultReportMsg") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"FaultReportMsg\" was too long")
@ -1166,6 +1297,145 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error {
}
t.InvalidProofs = uint64(extra)
}
// t.CCUpdate (bool) (bool)
case "CCUpdate":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajOther {
return fmt.Errorf("booleans must be major type 7")
}
switch extra {
case 20:
t.CCUpdate = false
case 21:
t.CCUpdate = true
default:
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
}
// t.CCPieces ([]sealing.Piece) (slice)
case "CCPieces":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if extra > cbg.MaxLength {
return fmt.Errorf("t.CCPieces: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.CCPieces = make([]Piece, extra)
}
for i := 0; i < int(extra); i++ {
var v Piece
if err := v.UnmarshalCBOR(br); err != nil {
return err
}
t.CCPieces[i] = v
}
// t.UpdateSealed (cid.Cid) (struct)
case "UpdateSealed":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.UpdateSealed: %w", err)
}
t.UpdateSealed = &c
}
}
// t.UpdateUnsealed (cid.Cid) (struct)
case "UpdateUnsealed":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.UpdateUnsealed: %w", err)
}
t.UpdateUnsealed = &c
}
}
// t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice)
case "ReplicaUpdateProof":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.ReplicaUpdateProof: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.ReplicaUpdateProof = make([]uint8, extra)
}
if _, err := io.ReadFull(br, t.ReplicaUpdateProof[:]); err != nil {
return err
}
// t.ReplicaUpdateMessage (cid.Cid) (struct)
case "ReplicaUpdateMessage":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.ReplicaUpdateMessage: %w", err)
}
t.ReplicaUpdateMessage = &c
}
}
// t.FaultReportMsg (cid.Cid) (struct)
case "FaultReportMsg":

View File

@ -35,6 +35,9 @@ type ErrInvalidProof struct{ error }
type ErrNoPrecommit struct{ error }
type ErrCommitWaitFailed struct{ error }
type ErrBadRU struct{ error }
type ErrBadPR struct{ error }
func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error {
tok, height, err := api.ChainHead(ctx)
if err != nil {
@ -187,3 +190,32 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
return nil
}
// check that sector info is good after running a replica update
func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error {
if err := checkPieces(ctx, maddr, si, api); err != nil {
return err
}
if !si.CCUpdate {
return xerrors.Errorf("replica update on sector not marked for update")
}
commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.dealIDs(), tok)
if err != nil {
return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)}
}
if si.UpdateUnsealed == nil || !commD.Equals(*si.UpdateUnsealed) {
return &ErrBadRU{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)}
}
if si.UpdateSealed == nil {
return &ErrBadRU{xerrors.Errorf("nil sealed cid")}
}
if si.ReplicaUpdateProof == nil {
return ErrBadPR{xerrors.Errorf("nil PR2 proof")}
}
return nil
}

View File

@ -133,6 +133,44 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorFinalizeFailed{}, FinalizeFailed),
),
// Snap deals
SnapDealsWaitDeals: planOne(
on(SectorAddPiece{}, SnapDealsAddPiece),
on(SectorStartPacking{}, SnapDealsPacking),
),
SnapDealsAddPiece: planOne(
on(SectorPieceAdded{}, SnapDealsWaitDeals),
apply(SectorStartPacking{}),
apply(SectorAddPiece{}),
on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed),
),
SnapDealsPacking: planOne(
on(SectorPacked{}, UpdateReplica),
),
UpdateReplica: planOne(
on(SectorReplicaUpdate{}, ProveReplicaUpdate),
on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed),
on(SectorDealsExpired{}, SnapDealsDealsExpired),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
),
ProveReplicaUpdate: planOne(
on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate),
on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed),
on(SectorDealsExpired{}, SnapDealsDealsExpired),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
),
SubmitReplicaUpdate: planOne(
on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait),
on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed),
),
ReplicaUpdateWait: planOne(
on(SectorReplicaUpdateLanded{}, FinalizeReplicaUpdate),
on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed),
on(SectorAbortUpgrade{}, AbortUpgrade),
),
FinalizeReplicaUpdate: planOne(
on(SectorFinalized{}, Proving),
),
// Sealing errors
AddPieceFailed: planOne(
@ -188,11 +226,37 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
onReturning(SectorUpdateDealIDs{}),
),
// Snap Deals Errors
SnapDealsAddPieceFailed: planOne(
on(SectorRetryWaitDeals{}, SnapDealsWaitDeals),
apply(SectorStartPacking{}),
apply(SectorAddPiece{}),
),
SnapDealsDealsExpired: planOne(
on(SectorAbortUpgrade{}, AbortUpgrade),
),
SnapDealsRecoverDealIDs: planOne(
on(SectorUpdateDealIDs{}, SubmitReplicaUpdate),
on(SectorAbortUpgrade{}, AbortUpgrade),
),
AbortUpgrade: planOneOrIgnore(
on(SectorRevertUpgradeToProving{}, Proving),
),
ReplicaUpdateFailed: planOne(
on(SectorRetrySubmitReplicaUpdateWait{}, ReplicaUpdateWait),
on(SectorRetrySubmitReplicaUpdate{}, SubmitReplicaUpdate),
on(SectorRetryReplicaUpdate{}, UpdateReplica),
on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
on(SectorDealsExpired{}, SnapDealsDealsExpired),
),
// Post-seal
Proving: planOne(
on(SectorFaultReported{}, FaultReported),
on(SectorFaulty{}, Faulty),
on(SectorStartCCUpdate{}, SnapDealsWaitDeals),
),
Terminating: planOne(
on(SectorTerminating{}, TerminateWait),
@ -209,7 +273,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
TerminateFailed: planOne(
// SectorTerminating (global)
),
Removing: planOne(
Removing: planOneOrIgnore(
on(SectorRemoved{}, Removed),
on(SectorRemoveFailed{}, RemoveFailed),
),
@ -355,13 +419,6 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
log.Errorw("update sector stats", "error", err)
}
// todo: drop this, use Context iface everywhere
wrapCtx := func(f func(Context, SectorInfo) error) func(statemachine.Context, SectorInfo) error {
return func(ctx statemachine.Context, info SectorInfo) error {
return f(&ctx, info)
}
}
switch state.State {
// Happy path
case Empty:
@ -403,6 +460,24 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
case FinalizeSector:
return m.handleFinalizeSector, processed, nil
// Snap deals updates
case SnapDealsWaitDeals:
return m.handleWaitDeals, processed, nil
case SnapDealsAddPiece:
return m.handleAddPiece, processed, nil
case SnapDealsPacking:
return m.handlePacking, processed, nil
case UpdateReplica:
return m.handleReplicaUpdate, processed, nil
case ProveReplicaUpdate:
return m.handleProveReplicaUpdate, processed, nil
case SubmitReplicaUpdate:
return m.handleSubmitReplicaUpdate, processed, nil
case ReplicaUpdateWait:
return m.handleReplicaUpdateWait, processed, nil
case FinalizeReplicaUpdate:
return m.handleFinalizeReplicaUpdate, processed, nil
// Handled failure modes
case AddPieceFailed:
return m.handleAddPieceFailed, processed, nil
@ -426,7 +501,20 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
case DealsExpired:
return m.handleDealsExpired, processed, nil
case RecoverDealIDs:
return wrapCtx(m.HandleRecoverDealIDs), processed, nil
return m.HandleRecoverDealIDs, processed, nil
// Snap Deals failure modes
case SnapDealsAddPieceFailed:
return m.handleAddPieceFailed, processed, nil
case SnapDealsDealsExpired:
return m.handleDealsExpiredSnapDeals, processed, nil
case SnapDealsRecoverDealIDs:
return m.handleSnapDealsRecoverDealIDs, processed, nil
case ReplicaUpdateFailed:
return m.handleSubmitReplicaUpdateFailed, processed, nil
case AbortUpgrade:
return m.handleAbortUpgrade, processed, nil
// Post-seal
case Proving:
@ -642,3 +730,16 @@ func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err e
return uint64(len(events)), nil
}
}
// planOne but ignores unhandled states without erroring, this prevents the need to handle all possible events creating
// error during forced override
func planOneOrIgnore(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
f := planOne(ts...)
return func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
cnt, err := f(events, state)
if err != nil {
log.Warnf("planOneOrIgnore: ignoring error from planOne: %s", err)
}
return cnt, nil
}
}

View File

@ -295,6 +295,46 @@ type SectorFinalizeFailed struct{ error }
func (evt SectorFinalizeFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorFinalizeFailed) apply(*SectorInfo) {}
// Snap deals // CC update path
type SectorStartCCUpdate struct{}
func (evt SectorStartCCUpdate) apply(state *SectorInfo) {
state.CCUpdate = true
// Clear filler piece but remember in case of abort
state.CCPieces = state.Pieces
state.Pieces = nil
}
type SectorReplicaUpdate struct {
Out storage.ReplicaUpdateOut
}
func (evt SectorReplicaUpdate) apply(state *SectorInfo) {
state.UpdateSealed = &evt.Out.NewSealed
state.UpdateUnsealed = &evt.Out.NewUnsealed
}
type SectorProveReplicaUpdate struct {
Proof storage.ReplicaUpdateProof
}
func (evt SectorProveReplicaUpdate) apply(state *SectorInfo) {
state.ReplicaUpdateProof = evt.Proof
}
type SectorReplicaUpdateSubmitted struct {
Message cid.Cid
}
func (evt SectorReplicaUpdateSubmitted) apply(state *SectorInfo) {
state.ReplicaUpdateMessage = &evt.Message
}
type SectorReplicaUpdateLanded struct{}
func (evt SectorReplicaUpdateLanded) apply(state *SectorInfo) {}
// Failed state recovery
type SectorRetrySealPreCommit1 struct{}
@ -351,6 +391,60 @@ func (evt SectorUpdateDealIDs) apply(state *SectorInfo) {
}
}
// Snap Deals failure and recovery
type SectorRetryReplicaUpdate struct{}
func (evt SectorRetryReplicaUpdate) apply(state *SectorInfo) {}
type SectorRetryProveReplicaUpdate struct{}
func (evt SectorRetryProveReplicaUpdate) apply(state *SectorInfo) {}
type SectorUpdateReplicaFailed struct{ error }
func (evt SectorUpdateReplicaFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorUpdateReplicaFailed) apply(state *SectorInfo) {}
type SectorProveReplicaUpdateFailed struct{ error }
func (evt SectorProveReplicaUpdateFailed) FormatError(xerrors.Printer) (next error) {
return evt.error
}
func (evt SectorProveReplicaUpdateFailed) apply(state *SectorInfo) {}
type SectorAbortUpgrade struct{ error }
func (evt SectorAbortUpgrade) apply(state *SectorInfo) {}
func (evt SectorAbortUpgrade) FormatError(xerrors.Printer) (next error) {
return evt.error
}
type SectorRevertUpgradeToProving struct{}
func (evt SectorRevertUpgradeToProving) apply(state *SectorInfo) {
// cleanup sector state so that it is back in proving
state.CCUpdate = false
state.UpdateSealed = nil
state.UpdateUnsealed = nil
state.ReplicaUpdateProof = nil
state.ReplicaUpdateMessage = nil
state.Pieces = state.CCPieces
state.CCPieces = nil
}
type SectorRetrySubmitReplicaUpdateWait struct{}
func (evt SectorRetrySubmitReplicaUpdateWait) apply(state *SectorInfo) {}
type SectorRetrySubmitReplicaUpdate struct{}
func (evt SectorRetrySubmitReplicaUpdate) apply(state *SectorInfo) {}
type SectorSubmitReplicaUpdateFailed struct{}
func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {}
// Faults
type SectorFaulty struct{}

View File

@ -59,6 +59,8 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e
return ctx.Send(SectorAddPiece{})
},
number: sector.SectorNumber,
ccUpdate: sector.CCUpdate,
}
} else {
// make sure we're only accounting for pieces which were correctly added
@ -329,6 +331,17 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec
return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err
}
func (m *Sealing) MatchPendingPiecesToOpenSectors(ctx context.Context) error {
sp, err := m.currentSealProof(ctx)
if err != nil {
return xerrors.Errorf("failed to get current seal proof: %w", err)
}
log.Debug("pieces to sector matching waiting for lock")
m.inputLk.Lock()
defer m.inputLk.Unlock()
return m.updateInput(ctx, sp)
}
// called with m.inputLk
func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error {
ssize, err := sp.SectorSize()
@ -356,8 +369,33 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
toAssign[proposalCid] = struct{}{}
memo := make(map[abi.SectorNumber]abi.ChainEpoch)
expF := func(sn abi.SectorNumber) (abi.ChainEpoch, error) {
if exp, ok := memo[sn]; ok {
return exp, nil
}
onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, sn, TipSetToken{})
if err != nil {
return 0, err
}
memo[sn] = onChainInfo.Expiration
return onChainInfo.Expiration, nil
}
for id, sector := range m.openSectors {
avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used
// check that sector lifetime is long enough to fit deal using latest expiration from on chain
ok, err := sector.dealFitsInLifetime(piece.deal.DealProposal.EndEpoch, expF)
if err != nil {
log.Errorf("failed to check expiration for cc Update sector %d", sector.number)
continue
}
if !ok {
exp, _ := expF(sector.number)
log.Infof("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch)
continue
}
if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding)
matches = append(matches, match{
@ -416,6 +454,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
}
if len(toAssign) > 0 {
log.Errorf("we are trying to create a new sector with open sectors %v", m.openSectors)
if err := m.tryCreateDealSector(ctx, sp); err != nil {
log.Errorw("Failed to create a new sector for deals", "error", err)
}

View File

@ -213,6 +213,21 @@ func (mr *MockSealingAPIMockRecorder) StateMarketStorageDealProposal(arg0, arg1,
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDealProposal", reflect.TypeOf((*MockSealingAPI)(nil).StateMarketStorageDealProposal), arg0, arg1, arg2)
}
// StateMinerActiveSectors mocks base method.
func (m *MockSealingAPI) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2)
ret0, _ := ret[0].([]*miner.SectorOnChainInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors.
func (mr *MockSealingAPIMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockSealingAPI)(nil).StateMinerActiveSectors), arg0, arg1, arg2)
}
// StateMinerAvailableBalance mocks base method.
func (m *MockSealingAPI) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) {
m.ctrl.T.Helper()

View File

@ -63,6 +63,7 @@ type SealingAPI interface {
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error)
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
StateMinerActiveSectors(context.Context, address.Address, TipSetToken) ([]*miner.SectorOnChainInfo, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error)
StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
@ -121,11 +122,24 @@ type Sealing struct {
}
type openSector struct {
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
number abi.SectorNumber
ccUpdate bool
maybeAccept func(cid.Cid) error // called with inputLk
}
func (o *openSector) dealFitsInLifetime(dealEnd abi.ChainEpoch, expF func(sn abi.SectorNumber) (abi.ChainEpoch, error)) (bool, error) {
if !o.ccUpdate {
return true, nil
}
expiration, err := expF(o.number)
if err != nil {
return false, err
}
return expiration >= dealEnd, nil
}
type pendingPiece struct {
size abi.UnpaddedPieceSize
deal api.PieceDealInfo

View File

@ -3,50 +3,65 @@ package sealing
type SectorState string
var ExistSectorStateList = map[SectorState]struct{}{
Empty: {},
WaitDeals: {},
Packing: {},
AddPiece: {},
AddPieceFailed: {},
GetTicket: {},
PreCommit1: {},
PreCommit2: {},
PreCommitting: {},
PreCommitWait: {},
SubmitPreCommitBatch: {},
PreCommitBatchWait: {},
WaitSeed: {},
Committing: {},
CommitFinalize: {},
CommitFinalizeFailed: {},
SubmitCommit: {},
CommitWait: {},
SubmitCommitAggregate: {},
CommitAggregateWait: {},
FinalizeSector: {},
Proving: {},
FailedUnrecoverable: {},
SealPreCommit1Failed: {},
SealPreCommit2Failed: {},
PreCommitFailed: {},
ComputeProofFailed: {},
CommitFailed: {},
PackingFailed: {},
FinalizeFailed: {},
DealsExpired: {},
RecoverDealIDs: {},
Faulty: {},
FaultReported: {},
FaultedFinal: {},
Terminating: {},
TerminateWait: {},
TerminateFinality: {},
TerminateFailed: {},
Removing: {},
RemoveFailed: {},
Removed: {},
Empty: {},
WaitDeals: {},
Packing: {},
AddPiece: {},
AddPieceFailed: {},
GetTicket: {},
PreCommit1: {},
PreCommit2: {},
PreCommitting: {},
PreCommitWait: {},
SubmitPreCommitBatch: {},
PreCommitBatchWait: {},
WaitSeed: {},
Committing: {},
CommitFinalize: {},
CommitFinalizeFailed: {},
SubmitCommit: {},
CommitWait: {},
SubmitCommitAggregate: {},
CommitAggregateWait: {},
FinalizeSector: {},
Proving: {},
FailedUnrecoverable: {},
SealPreCommit1Failed: {},
SealPreCommit2Failed: {},
PreCommitFailed: {},
ComputeProofFailed: {},
CommitFailed: {},
PackingFailed: {},
FinalizeFailed: {},
DealsExpired: {},
RecoverDealIDs: {},
Faulty: {},
FaultReported: {},
FaultedFinal: {},
Terminating: {},
TerminateWait: {},
TerminateFinality: {},
TerminateFailed: {},
Removing: {},
RemoveFailed: {},
Removed: {},
SnapDealsWaitDeals: {},
SnapDealsAddPiece: {},
SnapDealsPacking: {},
UpdateReplica: {},
ProveReplicaUpdate: {},
SubmitReplicaUpdate: {},
ReplicaUpdateWait: {},
FinalizeReplicaUpdate: {},
SnapDealsAddPieceFailed: {},
SnapDealsDealsExpired: {},
SnapDealsRecoverDealIDs: {},
ReplicaUpdateFailed: {},
AbortUpgrade: {},
}
// cmd/lotus-miner/info.go defines CLI colors corresponding to these states
// update files there when adding new states
const (
UndefinedSectorState SectorState = ""
@ -79,6 +94,17 @@ const (
FinalizeSector SectorState = "FinalizeSector"
Proving SectorState = "Proving"
// snap deals / cc update
SnapDealsWaitDeals SectorState = "SnapDealsWaitDeals"
SnapDealsAddPiece SectorState = "SnapDealsAddPiece"
SnapDealsPacking SectorState = "SnapDealsPacking"
UpdateReplica SectorState = "UpdateReplica"
ProveReplicaUpdate SectorState = "ProveReplicaUpdate"
SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate"
ReplicaUpdateWait SectorState = "ReplicaUpdateWait"
FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate"
// error modes
FailedUnrecoverable SectorState = "FailedUnrecoverable"
AddPieceFailed SectorState = "AddPieceFailed"
@ -92,6 +118,13 @@ const (
DealsExpired SectorState = "DealsExpired"
RecoverDealIDs SectorState = "RecoverDealIDs"
// snap deals error modes
SnapDealsAddPieceFailed SectorState = "SnapDealsAddPieceFailed"
SnapDealsDealsExpired SectorState = "SnapDealsDealsExpired"
SnapDealsRecoverDealIDs SectorState = "SnapDealsRecoverDealIDs"
AbortUpgrade SectorState = "AbortUpgrade"
ReplicaUpdateFailed SectorState = "ReplicaUpdateFailed"
Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason
FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain
FaultedFinal SectorState = "FaultedFinal" // fault declared on chain
@ -108,11 +141,11 @@ const (
func toStatState(st SectorState, finEarly bool) statSectorState {
switch st {
case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed:
case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed, SnapDealsWaitDeals, SnapDealsAddPiece:
return sstStaging
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector:
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector, SnapDealsPacking, UpdateReplica, ProveReplicaUpdate, FinalizeReplicaUpdate:
return sstSealing
case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait:
case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, SubmitReplicaUpdate, ReplicaUpdateWait:
if finEarly {
// we use statSectorState for throttling storage use. With FinalizeEarly
// we can consider sectors in states after CommitFinalize as finalized, so

View File

@ -1,11 +1,13 @@
package sealing
import (
"context"
"time"
"github.com/hashicorp/go-multierror"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -181,6 +183,67 @@ func (m *Sealing) handleComputeProofFailed(ctx statemachine.Context, sector Sect
return ctx.Send(SectorRetryComputeProof{})
}
func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sector SectorInfo) error {
if sector.ReplicaUpdateMessage != nil {
mw, err := m.Api.StateSearchMsg(ctx.Context(), *sector.ReplicaUpdateMessage)
if err != nil {
// API error
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetrySubmitReplicaUpdateWait{})
}
if mw == nil {
return ctx.Send(SectorRetrySubmitReplicaUpdateWait{})
}
switch mw.Receipt.ExitCode {
case exitcode.Ok:
return ctx.Send(SectorRetrySubmitReplicaUpdateWait{})
case exitcode.SysErrOutOfGas:
return ctx.Send(SectorRetrySubmitReplicaUpdate{})
default:
// something else went wrong
}
}
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
return nil
}
if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil {
switch err.(type) {
case *ErrApi:
log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err)
return nil
case *ErrBadRU:
log.Errorf("bad replica update: %+v", err)
return ctx.Send(SectorRetryReplicaUpdate{})
case *ErrBadPR:
log.Errorf("bad PR1: +%v", err)
return ctx.Send(SectorRetryProveReplicaUpdate{})
case *ErrInvalidDeals:
return ctx.Send(SectorInvalidDealIDs{})
case *ErrExpiredDeals:
return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)})
default:
log.Errorf("sanity check error, not proceeding: +%v", err)
return xerrors.Errorf("checkPieces sanity check error: %w", err)
}
}
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetrySubmitReplicaUpdate{})
}
func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error {
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
@ -319,61 +382,40 @@ func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo
return ctx.Send(SectorRemove{})
}
func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
tok, height, err := m.Api.ChainHead(ctx.Context())
func (m *Sealing) handleDealsExpiredSnapDeals(ctx statemachine.Context, sector SectorInfo) error {
if !sector.CCUpdate {
// Should be impossible
return xerrors.Errorf("should never reach SnapDealsDealsExpired as a non-CCUpdate sector")
}
return ctx.Send(SectorAbortUpgrade{xerrors.Errorf("one of upgrade deals expired")})
}
func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo) error {
if !sector.CCUpdate {
return xerrors.Errorf("should never reach AbortUpgrade as a non-CCUpdate sector")
}
// Remove snap deals replica if any
if err := m.sealer.ReleaseReplicaUpgrade(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
return xerrors.Errorf("removing CC update files from sector storage")
}
return ctx.Send(SectorRevertUpgradeToProving{})
}
// failWith is a mutator or global mutator
func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, sector SectorInfo, failWith interface{}) error {
toFix, paddingPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
return err
}
var toFix []int
paddingPieces := 0
for i, p := range sector.Pieces {
// if no deal is associated with the piece, ensure that we added it as
// filler (i.e. ensure that it has a zero PieceCID)
if p.DealInfo == nil {
exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded())
if !p.Piece.PieceCID.Equals(exp) {
return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID)
}
paddingPieces++
continue
}
proposal, err := m.Api.StateMarketStorageDealProposal(ctx.Context(), p.DealInfo.DealID, tok)
if err != nil {
log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err)
toFix = append(toFix, i)
continue
}
if proposal.Provider != m.maddr {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, m.maddr)
toFix = append(toFix, i)
continue
}
if proposal.PieceCID != p.Piece.PieceCID {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)
toFix = append(toFix, i)
continue
}
if p.Piece.Size != proposal.PieceSize {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize)
toFix = append(toFix, i)
continue
}
if height >= proposal.StartEpoch {
// TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces
// (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval)
return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height)
}
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
return err
}
failed := map[int]error{}
updates := map[int]abi.DealID{}
for _, i := range toFix {
p := sector.Pieces[i]
@ -381,7 +423,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
// TODO: check if we are in an early enough state try to remove this piece
log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID)
// Not much to do here (and this can only happen for old spacerace sectors)
return ctx.Send(SectorRemove{})
return ctx.Send(failWith)
}
var dp *market.DealProposal
@ -416,7 +458,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
if len(failed)+paddingPieces == len(sector.Pieces) {
log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr)
return ctx.Send(SectorRemove{})
return ctx.Send(failWith)
}
// todo: try to remove bad pieces (hard; see the todo above)
@ -424,9 +466,73 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
// for now removing sectors is probably better than having them stuck in RecoverDealIDs
// and expire anyways
log.Errorf("removing sector %d: deals expired or unrecoverable: %+v", sector.SectorNumber, merr)
return ctx.Send(SectorRemove{})
return ctx.Send(failWith)
}
// Not much to do here, we can't go back in time to commit this sector
return ctx.Send(SectorUpdateDealIDs{Updates: updates})
}
func (m *Sealing) HandleRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error {
return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorRemove{})
}
func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error {
return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{})
}
func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) {
tok, height, err := api.ChainHead(ctx)
if err != nil {
return nil, 0, xerrors.Errorf("getting chain head: %w", err)
}
var toFix []int
paddingPieces := 0
for i, p := range sector.Pieces {
// if no deal is associated with the piece, ensure that we added it as
// filler (i.e. ensure that it has a zero PieceCID)
if p.DealInfo == nil {
exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded())
if !p.Piece.PieceCID.Equals(exp) {
return nil, 0, xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID)
}
paddingPieces++
continue
}
proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok)
if err != nil {
log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err)
toFix = append(toFix, i)
continue
}
if proposal.Provider != maddr {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, maddr)
toFix = append(toFix, i)
continue
}
if proposal.PieceCID != p.Piece.PieceCID {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)
toFix = append(toFix, i)
continue
}
if p.Piece.Size != proposal.PieceSize {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize)
toFix = append(toFix, i)
continue
}
if height >= proposal.StartEpoch {
// TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces
// (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval)
return nil, 0, xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height)
}
}
return toFix, paddingPieces, nil
}

View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/filecoin-project/go-state-types/network"
statemachine "github.com/filecoin-project/go-statemachine"
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
@ -25,6 +26,7 @@ import (
)
func TestStateRecoverDealIDs(t *testing.T) {
t.Skip("Bring this back when we can correctly mock a state machine context: Issue #7867")
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@ -40,7 +42,7 @@ func TestStateRecoverDealIDs(t *testing.T) {
sctx := mocks.NewMockContext(mockCtrl)
sctx.EXPECT().Context().AnyTimes().Return(ctx)
api.EXPECT().ChainHead(ctx).Times(1).Return(nil, abi.ChainEpoch(10), nil)
api.EXPECT().ChainHead(ctx).Times(2).Return(nil, abi.ChainEpoch(10), nil)
var dealId abi.DealID = 12
dealProposal := market.DealProposal{
@ -70,7 +72,9 @@ func TestStateRecoverDealIDs(t *testing.T) {
sctx.EXPECT().Send(sealing.SectorRemove{}).Return(nil)
err := fakeSealing.HandleRecoverDealIDs(sctx, sealing.SectorInfo{
// TODO sctx should satisfy an interface so it can be useable for mocking. This will fail because we are passing in an empty context now to get this to build.
// https://github.com/filecoin-project/lotus/issues/7867
err := fakeSealing.HandleRecoverDealIDs(statemachine.Context{}, sealing.SectorInfo{
Pieces: []sealing.Piece{
{
DealInfo: &api2.PieceDealInfo{

View File

@ -0,0 +1,209 @@
package sealing
import (
"bytes"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
statemachine "github.com/filecoin-project/go-statemachine"
api "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"golang.org/x/xerrors"
)
func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
switch err.(type) {
case *ErrApi:
log.Errorf("handleReplicaUpdate: api error, not proceeding: %+v", err)
return nil
case *ErrInvalidDeals:
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
return ctx.Send(SectorInvalidDealIDs{})
case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector?
return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)})
default:
return xerrors.Errorf("checkPieces sanity check error: %w", err)
}
}
out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos())
if err != nil {
return ctx.Send(SectorUpdateReplicaFailed{xerrors.Errorf("replica update failed: %w", err)})
}
return ctx.Send(SectorReplicaUpdate{
Out: out,
})
}
func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
if sector.UpdateSealed == nil || sector.UpdateUnsealed == nil {
return xerrors.Errorf("invalid sector %d with nil UpdateSealed or UpdateUnsealed output", sector.SectorNumber)
}
if sector.CommR == nil {
return xerrors.Errorf("invalid sector %d with nil CommR", sector.SectorNumber)
}
vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed)
if err != nil {
return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)})
}
proof, err := m.sealer.ProveReplicaUpdate2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed, vanillaProofs)
if err != nil {
return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (2) failed: %w", err)})
}
return ctx.Send(SectorProveReplicaUpdate{
Proof: proof,
})
}
func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil {
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
sl, err := m.Api.StateSectorPartition(ctx.Context(), m.maddr, sector.SectorNumber, tok)
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
updateProof, err := sector.SectorType.RegisteredUpdateProof()
if err != nil {
log.Errorf("failed to get update proof type from seal proof: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
enc := new(bytes.Buffer)
params := &miner.ProveReplicaUpdatesParams{
Updates: []miner.ReplicaUpdate{
{
SectorID: sector.SectorNumber,
Deadline: sl.Deadline,
Partition: sl.Partition,
NewSealedSectorCID: *sector.UpdateSealed,
Deals: sector.dealIDs(),
UpdateProofType: updateProof,
ReplicaProof: sector.ReplicaUpdateProof,
},
},
}
if err := params.MarshalCBOR(enc); err != nil {
log.Errorf("failed to serialize update replica params: %w", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting config: %w", err)
}
onChainInfo, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok)
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
sp, err := m.currentSealProof(ctx.Context())
if err != nil {
log.Errorf("sealer failed to return current seal proof not proceeding: %+v", err)
return nil
}
virtualPCI := miner.SectorPreCommitInfo{
SealProof: sp,
SectorNumber: sector.SectorNumber,
SealedCID: *sector.UpdateSealed,
//SealRandEpoch: 0,
DealIDs: sector.dealIDs(),
Expiration: onChainInfo.Expiration,
//ReplaceCapacity: false,
//ReplaceSectorDeadline: 0,
//ReplaceSectorPartition: 0,
//ReplaceSectorNumber: 0,
}
collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, virtualPCI, tok)
if err != nil {
return xerrors.Errorf("getting initial pledge collateral: %w", err)
}
collateral = big.Sub(collateral, onChainInfo.InitialPledge)
if collateral.LessThan(big.Zero()) {
collateral = big.Zero()
}
collateral, err = collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, collateral)
if err != nil {
log.Errorf("collateral send amount failed not proceeding: %+v", err)
return nil
}
goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee))
mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, tok)
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral)
if err != nil {
log.Errorf("no good address to send replica update message from: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, big.Zero(), big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid})
}
func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector SectorInfo) error {
if sector.ReplicaUpdateMessage == nil {
log.Errorf("handleReplicaUpdateWait: no replica update message cid recorded")
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.ReplicaUpdateMessage)
if err != nil {
log.Errorf("handleReplicaUpdateWait: failed to wait for message: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
switch mw.Receipt.ExitCode {
case exitcode.Ok:
//expected
case exitcode.SysErrInsufficientFunds:
fallthrough
case exitcode.SysErrOutOfGas:
log.Errorf("gas estimator was wrong or out of funds")
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
default:
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSetTok)
if err != nil {
log.Errorf("api err failed to get sector info: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
if si == nil {
log.Errorf("api err sector not found")
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
if !si.SealedCID.Equals(*sector.UpdateSealed) {
log.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID)
return ctx.Send(SectorAbortUpgrade{})
}
return ctx.Send(SectorReplicaUpdateLanded{})
}
func (m *Sealing) handleFinalizeReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
return ctx.Send(SectorFinalized{})
}

View File

@ -280,8 +280,8 @@ func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo)
}
// TODO: We should probably invoke this method in most (if not all) state transition failures after handlePreCommitting
func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) {
err := m.MarkForUpgrade(sid)
func (m *Sealing) remarkForUpgrade(ctx context.Context, sid abi.SectorNumber) {
err := m.MarkForUpgrade(ctx, sid)
if err != nil {
log.Errorf("error re-marking sector %d as for upgrade: %+v", sid, err)
}
@ -424,7 +424,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes())
if err != nil {
if params.ReplaceCapacity {
m.remarkForUpgrade(params.ReplaceSectorNumber)
m.remarkForUpgrade(ctx.Context(), params.ReplaceSectorNumber)
}
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
}

View File

@ -73,7 +73,7 @@ type SectorInfo struct {
// PreCommit2
CommD *cid.Cid
CommR *cid.Cid
CommR *cid.Cid // SectorKey
Proof []byte
PreCommitInfo *miner.SectorPreCommitInfo
@ -91,6 +91,14 @@ type SectorInfo struct {
CommitMessage *cid.Cid
InvalidProofs uint64 // failed proof computations (doesn't validate with proof inputs; can't compute)
// CCUpdate
CCUpdate bool
CCPieces []Piece
UpdateSealed *cid.Cid
UpdateUnsealed *cid.Cid
ReplicaUpdateProof storage.ReplicaUpdateProof
ReplicaUpdateMessage *cid.Cid
// Faults
FaultReportMsg *cid.Cid

View File

@ -4,6 +4,7 @@ import (
"context"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
"golang.org/x/xerrors"
@ -18,7 +19,8 @@ func (m *Sealing) IsMarkedForUpgrade(id abi.SectorNumber) bool {
return found
}
func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error {
func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error {
m.upgradeLk.Lock()
defer m.upgradeLk.Unlock()
@ -27,6 +29,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error {
return xerrors.Errorf("sector %d already marked for upgrade", id)
}
si, err := m.GetSectorInfo(id)
if err != nil {
return xerrors.Errorf("getting sector info: %w", err)
}
if si.State != Proving {
return xerrors.Errorf("can't mark sectors not in the 'Proving' state for upgrade")
}
if len(si.Pieces) != 1 {
return xerrors.Errorf("not a committed-capacity sector, expected 1 piece")
}
if si.Pieces[0].DealInfo != nil {
return xerrors.Errorf("not a committed-capacity sector, has deals")
}
m.toUpgrade[id] = struct{}{}
return nil
}
func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) error {
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting storage config: %w", err)
}
curStaging := m.stats.curStaging()
if cfg.MaxWaitDealsSectors > 0 && curStaging >= cfg.MaxWaitDealsSectors {
return xerrors.Errorf("already waiting for deals in %d >= %d (cfg.MaxWaitDealsSectors) sectors, no free resources to wait for deals in another",
curStaging, cfg.MaxWaitDealsSectors)
}
si, err := m.GetSectorInfo(id)
if err != nil {
return xerrors.Errorf("getting sector info: %w", err)
@ -44,11 +77,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error {
return xerrors.Errorf("not a committed-capacity sector, has deals")
}
// TODO: more checks to match actor constraints
tok, head, err := m.Api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("couldnt get chain head: %w", err)
}
onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, id, tok)
if err != nil {
return xerrors.Errorf("failed to read sector on chain info: %w", err)
}
m.toUpgrade[id] = struct{}{}
active, err := m.Api.StateMinerActiveSectors(ctx, m.maddr, tok)
if err != nil {
return xerrors.Errorf("failed to check active sectors: %w", err)
}
// Ensure the upgraded sector is active
var found bool
for _, si := range active {
if si.SectorNumber == id {
found = true
break
}
}
if !found {
return xerrors.Errorf("cannot mark inactive sector for upgrade")
}
return nil
if onChainInfo.Expiration-head < market7.DealMinDuration {
return xerrors.Errorf("pointless to upgrade sector %d, expiration %d is less than a min deal duration away from current epoch."+
"Upgrade expiration before marking for upgrade", id, onChainInfo.Expiration)
}
return m.sectors.Send(uint64(id), SectorStartCCUpdate{})
}
func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int {

8
go.mod
View File

@ -50,8 +50,8 @@ require (
github.com/filecoin-project/specs-actors/v4 v4.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.4
github.com/filecoin-project/specs-actors/v6 v6.0.1
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0
@ -173,3 +173,7 @@ require (
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
replace github.com/filecoin-project/test-vectors => ./extern/test-vectors
//replace github.com/filecoin-project/specs-actors/v7 => /Users/zenground0/pl/repos/specs-actors
// replace github.com/filecon-project/specs-storage => /Users/zenground0/pl/repos/specs-storage

12
go.sum
View File

@ -341,7 +341,6 @@ github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MU
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0=
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts=
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
@ -357,7 +356,6 @@ github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNd
github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo=
github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus=
github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8=
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY=
github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
@ -376,10 +374,11 @@ github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVi
github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew=
github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec h1:KV9vE+Sl2Y3qKsrpba4HcE7wHwK7v6O5U/S0xHbje6A=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff h1:JO62nquOGhjoDf9+JkAcV+wsD5yhoyIKOMj70ZNdD3Q=
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a h1:MS1mtAhZh0iSE7OxP1bb6+UNyYKsxg8n51FpHlX1d54=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
@ -722,7 +721,6 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28
github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE=
github.com/ipfs/go-graphsync v0.11.5 h1:WA5hVxGBtcal6L6nqubKiqRolaZxbexOK3GumGFJRR4=
github.com/ipfs/go-graphsync v0.11.5/go.mod h1:+/sZqRwRCQRrV7NCzgBtufmr5QGpUE98XSa7NlsztmM=
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE=

View File

@ -6,14 +6,17 @@ import (
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TODO: This needs to be repurposed into a SnapDeals test suite
func TestCCUpgrade(t *testing.T) {
kit.QuietMiningLogs()
@ -29,37 +32,43 @@ func TestCCUpgrade(t *testing.T) {
}
}
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullNode {
ctx := context.Background()
blockTime := 5 * time.Millisecond
blockTime := 1 * time.Millisecond
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.TurboUpgradeAt(upgradeHeight))
ens.InterconnectAll().BeginMining(blockTime)
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMiningMustPost(blockTime)
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
Upgraded := CC + 1
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
// wait for deadline 0 to pass so that committing starts after post on preseals
// this gives max time for post to complete minimizing chances of timeout
// waitForDeadline(ctx, t, 1, client, maddr)
miner.PledgeSectors(ctx, 1, 0, nil)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
require.Len(t, sl, 1, "expected 1 sector")
require.Equal(t, CC, sl[0], "unexpected sector number")
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
{
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
require.NoError(t, err)
require.Less(t, 50000, int(si.Expiration))
}
waitForSectorActive(ctx, t, CCUpgrade, client, maddr)
err = miner.SectorMarkForUpgrade(ctx, sl[0])
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
require.NoError(t, err)
sl, err = miner.SectorsList(ctx)
require.NoError(t, err)
require.Len(t, sl, 1, "expected 1 sector")
dh := kit.NewDealHarness(t, client, miner, miner)
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
Rseed: 6,
@ -68,37 +77,112 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
kit.AssertFilesEqual(t, inPath, outPath)
// Validate upgrade
{
exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK)
if err != nil {
require.Contains(t, err.Error(), "failed to find sector 3") // already cleaned up
} else {
require.NoError(t, err)
require.NotNil(t, exp)
require.Greater(t, 50000, int(exp.OnTime))
}
}
{
exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK)
require.NoError(t, err)
require.Less(t, 50000, int(exp.OnTime))
}
dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
require.NoError(t, err)
assert.Equal(t, 1, len(status.Deals))
return client
}
// Sector should expire.
func waitForDeadline(ctx context.Context, t *testing.T, waitIdx uint64, node *kit.TestFullNode, maddr address.Address) {
for {
// Wait for the sector to expire.
status, err := miner.SectorsStatus(ctx, CC, true)
ts, err := node.ChainHead(ctx)
require.NoError(t, err)
if status.OnTime == 0 && status.Early == 0 {
break
dl, err := node.StateMinerProvingDeadline(ctx, maddr, ts.Key())
require.NoError(t, err)
if dl.Index == waitIdx {
return
}
t.Log("waiting for sector to expire")
// wait one deadline per loop.
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime)
}
}
func waitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, node *kit.TestFullNode, maddr address.Address) {
for {
active, err := node.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for _, si := range active {
if si.SectorNumber == sn {
fmt.Printf("ACTIVE\n")
return
}
}
time.Sleep(time.Second)
}
}
func waitForSectorStartUpgrade(ctx context.Context, t *testing.T, sn abi.SectorNumber, miner *kit.TestMiner) {
for {
si, err := miner.StorageMiner.SectorsStatus(ctx, sn, false)
require.NoError(t, err)
if si.State != api.SectorState("Proving") {
t.Logf("Done proving sector in state: %s", si.State)
return
}
}
}
func TestCCUpgradeAndPoSt(t *testing.T) {
kit.QuietMiningLogs()
t.Run("upgrade and then post", func(t *testing.T) {
ctx := context.Background()
n := runTestCCUpgrade(t, 100)
ts, err := n.ChainHead(ctx)
require.NoError(t, err)
start := ts.Height()
// wait for a full proving period
t.Log("waiting for chain")
n.WaitTillChain(ctx, func(ts *types.TipSet) bool {
if ts.Height() > start+abi.ChainEpoch(2880) {
return true
}
return false
})
})
}
func TestTooManyMarkedForUpgrade(t *testing.T) {
kit.QuietMiningLogs()
ctx := context.Background()
blockTime := 1 * time.Millisecond
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMiningMustPost(blockTime)
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
waitForDeadline(ctx, t, 1, client, maddr)
miner.PledgeSectors(ctx, 3, 0, nil)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
require.Len(t, sl, 3, "expected 3 sectors")
{
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
require.NoError(t, err)
require.Less(t, 50000, int(si.Expiration))
}
waitForSectorActive(ctx, t, CCUpgrade, client, maddr)
waitForSectorActive(ctx, t, CCUpgrade+1, client, maddr)
waitForSectorActive(ctx, t, CCUpgrade+2, client, maddr)
err = miner.SectorMarkForUpgrade(ctx, CCUpgrade, true)
require.NoError(t, err)
err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+1, true)
require.NoError(t, err)
waitForSectorStartUpgrade(ctx, t, CCUpgrade, miner)
waitForSectorStartUpgrade(ctx, t, CCUpgrade+1, miner)
err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+2, true)
require.Error(t, err)
assert.Contains(t, err.Error(), "no free resources to wait for deals")
}

View File

@ -1,13 +1,18 @@
package kit
import (
"bytes"
"context"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
aminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
"github.com/stretchr/testify/require"
)
@ -30,6 +35,176 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
}
}
type partitionTracker struct {
partitions []api.Partition
posted bitfield.BitField
}
func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker {
dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK)
require.NoError(bm.t, err)
dl := dlines[dlIdx]
parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK)
require.NoError(bm.t, err)
return &partitionTracker{
partitions: parts,
posted: dl.PostSubmissions,
}
}
func (p *partitionTracker) count(t *testing.T) uint64 {
pCnt, err := p.posted.Count()
require.NoError(t, err)
return pCnt
}
func (p *partitionTracker) done(t *testing.T) bool {
return uint64(len(p.partitions)) == p.count(t)
}
func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, smsg *types.SignedMessage) (ret bool) {
defer func() {
ret = p.done(t)
}()
msg := smsg.Message
if !(msg.To == bm.miner.ActorAddr) {
return
}
if msg.Method != aminer.Methods.SubmitWindowedPoSt {
return
}
params := aminer.SubmitWindowedPoStParams{}
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(msg.Params)))
for _, part := range params.Partitions {
p.posted.Set(part.Index)
}
return
}
// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool
// and everything shuts down if a post fails. It also enforces that every block mined succeeds
func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) {
time.Sleep(3 * time.Second)
// wrap context in a cancellable context.
ctx, bm.cancel = context.WithCancel(ctx)
bm.wg.Add(1)
go func() {
defer bm.wg.Done()
activeDeadlines := make(map[int]struct{})
_ = activeDeadlines
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
wait := make(chan bool)
chg, err := bm.miner.FullNode.ChainNotify(ctx)
require.NoError(bm.t, err)
// read current out
curr := <-chg
require.Equal(bm.t, ts.Height(), curr[0].Val.Height())
for {
select {
case <-time.After(blocktime):
case <-ctx.Done():
return
}
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
require.Equal(bm.t, int64(0), nulls, "Injecting > 0 null blocks while `MustPost` mining is currently unsupported")
// Wake up and figure out if we are at the end of an active deadline
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
tsk := ts.Key()
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, tsk)
require.NoError(bm.t, err)
if ts.Height()+1 == dlinfo.Last() { // Last epoch in dline, we need to check that miner has posted
tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
if !tracker.done(bm.t) { // need to wait for post
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t))
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx)
require.NoError(bm.t, err)
// First check pending messages we'll mine this epoch
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
require.NoError(bm.t, err)
for _, msg := range msgs {
tracker.recordIfPost(bm.t, bm, msg)
}
// post not yet in mpool, wait for it
if !tracker.done(bm.t) {
bm.t.Logf("post missing from mpool, block mining suspended until it arrives")
POOL:
for {
bm.t.Logf("mpool event wait loop at block height %d, ts: %s", ts.Height(), ts.Key())
select {
case <-ctx.Done():
return
case evt := <-poolEvts:
bm.t.Logf("pool event: %d", evt.Type)
if evt.Type == api.MpoolAdd {
bm.t.Logf("incoming message %v", evt.Message)
if tracker.recordIfPost(bm.t, bm, evt.Message) {
break POOL
}
}
}
}
bm.t.Logf("done waiting on mpool")
}
}
}
var target abi.ChainEpoch
reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) {
require.NoError(bm.t, err)
target = epoch
wait <- success
}
var success bool
for i := int64(0); !success; i++ {
err = bm.miner.MineOne(ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls + i),
Done: reportSuccessFn,
})
success = <-wait
}
// Wait until it shows up on the given full nodes ChainHead
// TODO this replicates a flaky condition from MineUntil,
// it would be better to use api to wait for sync,
// but currently this is a bit difficult
// and flaky failure is easy to debug and retry
nloops := 200
for i := 0; i < nloops; i++ {
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
if ts.Height() == target {
break
}
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
time.Sleep(time.Millisecond * 10)
}
switch {
case err == nil: // wrap around
case ctx.Err() != nil: // context fired.
return
default: // log error
bm.t.Error(err)
}
}
}()
}
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
time.Sleep(time.Second)

View File

@ -104,8 +104,9 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
fmt.Printf("WAIT DEAL SEALEDS START\n")
dh.WaitDealSealed(ctx, deal, false, false, nil)
fmt.Printf("WAIT DEAL SEALEDS END\n")
return deal, res, path
}
@ -176,6 +177,7 @@ loop:
cb()
}
}
fmt.Printf("WAIT DEAL SEALED LOOP BROKEN\n")
}
// WaitDealSealedQuiet waits until the deal is sealed, without logging anything.
@ -290,12 +292,11 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
snums, err := dh.main.SectorsList(ctx)
require.NoError(dh.t, err)
for _, snum := range snums {
si, err := dh.main.SectorsStatus(ctx, snum, false)
require.NoError(dh.t, err)
dh.t.Logf("Sector state: %s", si.State)
dh.t.Logf("Sector state <%d>-[%d]:, %s", snum, si.SealProof, si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum))
}

View File

@ -675,6 +675,43 @@ func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble {
return n
}
func (n *Ensemble) BeginMiningMustPost(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
ctx := context.Background()
// wait one second to make sure that nodes are connected and have handshaken.
// TODO make this deterministic by listening to identify events on the
// libp2p eventbus instead (or something else).
time.Sleep(1 * time.Second)
var bms []*BlockMiner
if len(miners) == 0 {
// no miners have been provided explicitly, instantiate block miners
// for all active miners that aren't still mining.
for _, m := range n.active.miners {
if _, ok := n.active.bms[m]; ok {
continue // skip, already have a block miner
}
miners = append(miners, m)
}
}
if len(miners) > 1 {
n.t.Fatalf("Only one active miner for MustPost, but have %d", len(miners))
}
for _, m := range miners {
bm := NewBlockMiner(n.t, m)
bm.MineBlocksMustPost(ctx, blocktime)
n.t.Cleanup(bm.Stop)
bms = append(bms, bm)
n.active.bms[m] = bm
}
return bms
}
// BeginMining kicks off mining for the specified miners. If nil or 0-length,
// it will kick off mining for all enrolled and active miners. It also adds a
// cleanup function to stop all mining operations on test teardown.

View File

@ -5,6 +5,7 @@ import (
"context"
"sync"
"github.com/filecoin-project/go-bitfield"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@ -110,7 +111,7 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
// Watch for a pre-commit message to the provider.
matchEvent := func(msg *types.Message) (bool, error) {
matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch)
matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch || msg.Method == miner.Methods.ProveReplicaUpdates)
return matched, nil
}
@ -145,6 +146,20 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
return false, err
}
// If this is a replica update method that succeeded the deal is active
if msg.Method == miner.Methods.ProveReplicaUpdates {
sn, err := dealSectorInReplicaUpdateSuccess(msg, rec, res)
if err != nil {
return false, err
}
if sn != nil {
cb(*sn, true, nil)
return false, nil
}
// Didn't find the deal ID in this message, so keep looking
return true, nil
}
// Extract the message parameters
sn, err := dealSectorInPreCommitMsg(msg, res)
if err != nil {
@ -264,6 +279,42 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr
return nil
}
func dealSectorInReplicaUpdateSuccess(msg *types.Message, rec *types.MessageReceipt, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) {
var params miner.ProveReplicaUpdatesParams
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return nil, xerrors.Errorf("unmarshal prove replica update: %w", err)
}
var seekUpdate miner.ReplicaUpdate
var found bool
for _, update := range params.Updates {
for _, did := range update.Deals {
if did == res.DealID {
seekUpdate = update
found = true
break
}
}
}
if !found {
return nil, nil
}
// check that this update passed validation steps
var successBf bitfield.BitField
if err := successBf.UnmarshalCBOR(bytes.NewReader(rec.Return)); err != nil {
return nil, xerrors.Errorf("unmarshal return value: %w", err)
}
success, err := successBf.IsSet(uint64(seekUpdate.SectorID))
if err != nil {
return nil, xerrors.Errorf("failed to check success of replica update: %w", err)
}
if !success {
return nil, xerrors.Errorf("replica update %d failed", seekUpdate.SectorID)
}
return &seekUpdate.SectorID, nil
}
// dealSectorInPreCommitMsg tries to find a sector containing the specified deal
func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) {
switch msg.Method {

View File

@ -535,8 +535,12 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
prand := abi.PoStRandomness(rand)
tSeed := build.Clock.Now()
nv, err := m.api.StateNetworkVersion(ctx, base.TipSet.Key())
if err != nil {
return nil, err
}
postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand)
postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand, round, nv)
if err != nil {
err = xerrors.Errorf("failed to compute winning post proof: %w", err)
return nil, err

View File

@ -10,8 +10,7 @@ import (
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/lotus/chain/types"
)
@ -61,13 +60,22 @@ out:
return xerrors.Errorf("getting sector info: %w", err)
}
_, err = m.epp.ComputeProof(ctx, []proof2.SectorInfo{
ts, err := m.api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head")
}
nv, err := m.api.StateNetworkVersion(ctx, ts.Key())
if err != nil {
return xerrors.Errorf("getting network version")
}
_, err = m.epp.ComputeProof(ctx, []proof7.ExtendedSectorInfo{
{
SealProof: si.SealProof,
SectorNumber: sector,
SealedCID: si.SealedCID,
},
}, r)
}, r, ts.Height(), nv)
if err != nil {
return xerrors.Errorf("failed to compute proof: %w", err)
}

View File

@ -109,10 +109,11 @@ func DefaultStorageMiner() *StorageMiner {
AvailableBalanceBuffer: types.FIL(big.Zero()),
DisableCollateralFallback: false,
BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
// XXX snap deals wait deals slack if first
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)),
@ -131,11 +132,13 @@ func DefaultStorageMiner() *StorageMiner {
},
Storage: sectorstorage.SealerConfig{
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
AllowReplicaUpdate: true,
AllowProveReplicaUpdate2: true,
// Default to 10 - tcp should still be able to figure this out, and
// it's the ratio between 10gbit / 1gbit

View File

@ -37,6 +37,7 @@ import (
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
@ -386,8 +387,8 @@ func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.Se
return sm.Miner.SectorPreCommitPending(ctx)
}
func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error {
return sm.Miner.MarkForUpgrade(id)
func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error {
return sm.Miner.MarkForUpgrade(ctx, id, snap)
}
func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) {
@ -398,6 +399,10 @@ func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.Secto
return sm.Miner.CommitPending(ctx)
}
func (sm *StorageMinerAPI) SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error {
return sm.Miner.SectorMatchPendingPiecesToOpenSectors(ctx)
}
func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error {
w, err := connectRemoteWorker(ctx, sm, url)
if err != nil {
@ -1155,8 +1160,8 @@ func (sm *StorageMinerAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocume
return build.OpenRPCDiscoverJSON_Miner(), nil
}
func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) {
return sm.Epp.ComputeProof(ctx, ssi, rand)
func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv network.Version) ([]builtin.PoStProof, error) {
return sm.Epp.ComputeProof(ctx, ssi, rand, poStEpoch, nv)
}
func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) {

View File

@ -112,6 +112,15 @@ func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr
return s.delegate.StateMinerSectorAllocated(ctx, maddr, sid, tsk)
}
func (s SealingAPIAdapter) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return nil, xerrors.Errorf("faile dto unmarshal TipSetToken to TipSetKey: %w", err)
}
return s.delegate.StateMinerActiveSectors(ctx, maddr, tsk)
}
func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) {
wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true)
if err != nil {

View File

@ -86,6 +86,7 @@ type fullNodeFilteredAPI interface {
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok types.TipSetKey) (types.BigInt, error)
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error)
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error)
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
@ -282,7 +283,7 @@ func (wpp *StorageWpp) GenerateCandidates(ctx context.Context, randomness abi.Po
return cds, nil
}
func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) {
func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, currEpoch abi.ChainEpoch, nv network.Version) ([]builtin.PoStProof, error) {
if build.InsecurePoStValidation {
return []builtin.PoStProof{{ProofBytes: []byte("valid proof")}}, nil
}

View File

@ -71,8 +71,15 @@ func (m *Miner) CommitPending(ctx context.Context) ([]abi.SectorID, error) {
return m.sealing.CommitPending(ctx)
}
func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error {
return m.sealing.MarkForUpgrade(id)
func (m *Miner) SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error {
return m.sealing.MatchPendingPiecesToOpenSectors(ctx)
}
func (m *Miner) MarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error {
if snap {
return m.sealing.MarkForSnapUpgrade(ctx, id)
}
return m.sealing.MarkForUpgrade(ctx, id)
}
func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool {

View File

@ -117,7 +117,7 @@ func (m *mockAPI) startGeneratePoST(
completeGeneratePoST CompleteGeneratePoSTCb,
) context.CancelFunc {
ctx, cancel := context.WithCancel(ctx)
log.Errorf("mock posting\n")
m.statesLk.Lock()
defer m.statesLk.Unlock()
m.postStates[deadline.Open] = postStatusProving

View File

@ -19,8 +19,8 @@ import (
"go.opencensus.io/trace"
"golang.org/x/xerrors"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@ -568,7 +568,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
for retries := 0; ; retries++ {
skipCount := uint64(0)
var partitions []miner.PoStPartition
var sinfos []proof2.SectorInfo
var xsinfos []proof7.ExtendedSectorInfo
for partIdx, partition := range batch {
// TODO: Can do this in parallel
toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors)
@ -611,14 +611,14 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
continue
}
sinfos = append(sinfos, ssi...)
xsinfos = append(xsinfos, ssi...)
partitions = append(partitions, miner.PoStPartition{
Index: uint64(batchPartitionStartIdx + partIdx),
Skipped: skipped,
})
}
if len(sinfos) == 0 {
if len(xsinfos) == 0 {
// nothing to prove for this batch
break
}
@ -637,14 +637,22 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
return nil, err
}
postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, append(abi.PoStRandomness{}, rand...))
defer func() {
if r := recover(); r != nil {
log.Errorf("recover: %s", r)
}
}()
postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), xsinfos, append(abi.PoStRandomness{}, rand...))
elapsed := time.Since(tsStart)
log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed)
if err != nil {
log.Errorf("error generating window post: %s", err)
}
if err == nil {
// If we proved nothing, something is very wrong.
if len(postOut) == 0 {
log.Errorf("len(postOut) == 0")
return nil, xerrors.Errorf("received no proofs back from generate window post")
}
@ -665,6 +673,14 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
}
// If we generated an incorrect proof, try again.
sinfos := make([]proof7.SectorInfo, len(xsinfos))
for i, xsi := range xsinfos {
sinfos[i] = proof7.SectorInfo{
SealProof: xsi.SealProof,
SectorNumber: xsi.SectorNumber,
SealedCID: xsi.SealedCID,
}
}
if correct, err := s.verifier.VerifyWindowPoSt(ctx, proof.WindowPoStVerifyInfo{
Randomness: abi.PoStRandomness(checkRand),
Proofs: postOut,
@ -687,7 +703,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
}
// Proof generation failed, so retry
log.Debugf("Proof generation failed, retry")
if len(ps) == 0 {
// If we didn't skip any new sectors, we failed
// for some other reason and we need to abort.
@ -715,10 +731,8 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
if !somethingToProve {
continue
}
posts = append(posts, params)
}
return posts, nil
}
@ -767,7 +781,7 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net
return batches, nil
}
func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof2.SectorInfo, error) {
func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof7.ExtendedSectorInfo, error) {
sset, err := s.api.StateMinerSectors(ctx, s.actor, &goodSectors, ts.Key())
if err != nil {
return nil, err
@ -777,22 +791,24 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors,
return nil, nil
}
substitute := proof2.SectorInfo{
substitute := proof7.ExtendedSectorInfo{
SectorNumber: sset[0].SectorNumber,
SealedCID: sset[0].SealedCID,
SealProof: sset[0].SealProof,
SectorKey: sset[0].SectorKeyCID,
}
sectorByID := make(map[uint64]proof2.SectorInfo, len(sset))
sectorByID := make(map[uint64]proof7.ExtendedSectorInfo, len(sset))
for _, sector := range sset {
sectorByID[uint64(sector.SectorNumber)] = proof2.SectorInfo{
sectorByID[uint64(sector.SectorNumber)] = proof7.ExtendedSectorInfo{
SectorNumber: sector.SectorNumber,
SealedCID: sector.SealedCID,
SealProof: sector.SealProof,
SectorKey: sector.SectorKeyCID,
}
}
proofSectors := make([]proof2.SectorInfo, 0, len(sset))
proofSectors := make([]proof7.ExtendedSectorInfo, 0, len(sset))
if err := allSectors.ForEach(func(sectorNo uint64) error {
if info, found := sectorByID[sectorNo]; found {
proofSectors = append(proofSectors, info)

View File

@ -116,11 +116,11 @@ func (m *mockStorageMinerAPI) GasEstimateFeeCap(context.Context, *types.Message,
type mockProver struct {
}
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof7.ExtendedSectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
panic("implement me")
}
func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof2.SectorInfo, pr abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) {
func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof7.ExtendedSectorInfo, pr abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) {
return []proof2.PoStProof{
{
PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1,
@ -132,7 +132,7 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si
type mockVerif struct {
}
func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) {
func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
panic("implement me")
}

View File

@ -432,7 +432,6 @@ github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MU
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0=
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts=
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
@ -448,7 +447,6 @@ github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNd
github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo=
github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus=
github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8=
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY=
github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
@ -467,10 +465,11 @@ github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVi
github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew=
github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec h1:KV9vE+Sl2Y3qKsrpba4HcE7wHwK7v6O5U/S0xHbje6A=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff h1:JO62nquOGhjoDf9+JkAcV+wsD5yhoyIKOMj70ZNdD3Q=
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a h1:MS1mtAhZh0iSE7OxP1bb6+UNyYKsxg8n51FpHlX1d54=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
@ -943,7 +942,6 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28
github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE=
github.com/ipfs/go-graphsync v0.11.5 h1:WA5hVxGBtcal6L6nqubKiqRolaZxbexOK3GumGFJRR4=
github.com/ipfs/go-graphsync v0.11.5/go.mod h1:+/sZqRwRCQRrV7NCzgBtufmr5QGpUE98XSa7NlsztmM=
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE=