Merge pull request #4873 from filecoin-project/feat/sdr-upgrade
Support seal proof type switching
This commit is contained in:
commit
693bbee953
@ -373,17 +373,17 @@ type WorkerStruct struct {
|
||||
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
||||
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
||||
|
||||
AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
|
||||
Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
AddPiece func(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit2 func(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit2 func(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeSector func(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
ReleaseUnsealed func(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||
UnsealPiece func(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
ReadPiece func(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
|
||||
Fetch func(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
||||
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
||||
@ -1513,47 +1513,47 @@ func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) {
|
||||
return w.Internal.Info(ctx)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
return w.Internal.SealPreCommit2(ctx, sector, pc1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
return w.Internal.SealCommit2(ctx, sector, c1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
return w.Internal.MoveStorage(ctx, sector, types)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
|
||||
return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
return w.Internal.ReadPiece(ctx, sink, sector, offset, size)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) Fetch(ctx context.Context, id storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return w.Internal.Fetch(ctx, id, fileType, ptype, am)
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ func init() {
|
||||
addExample(&pid)
|
||||
|
||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
||||
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
|
||||
addExample(abi.ChainEpoch(10101))
|
||||
addExample(crypto.SigTypeBLS)
|
||||
|
@ -31,7 +31,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
|
@ -109,7 +109,7 @@ var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
var OneFull = DefaultFullOpts(1)
|
||||
var TwoFull = DefaultFullOpts(2)
|
||||
|
||||
var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
var FullNodeWithActorsV2At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
@ -122,6 +122,25 @@ var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
}
|
||||
}
|
||||
|
||||
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
Network: network.Version6,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version7,
|
||||
Height: calico,
|
||||
Migration: stmgr.UpgradeCalico,
|
||||
}, {
|
||||
Network: network.Version8,
|
||||
Height: persian,
|
||||
}})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var MineNext = miner.MineReq{
|
||||
InjectNulls: 0,
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
|
@ -3,18 +3,22 @@ package test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -23,6 +27,90 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
pledge := make(chan struct{})
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
round := 0
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// 3 sealing rounds: before, during after.
|
||||
if round >= 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
head, err := client.ChainHead(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// rounds happen every 100 blocks, with a 50 block offset.
|
||||
if head.Height() >= abi.ChainEpoch(round*500+50) {
|
||||
round++
|
||||
pledge <- struct{}{}
|
||||
|
||||
ver, err := client.StateNetworkVersion(ctx, head.Key())
|
||||
assert.NoError(t, err)
|
||||
switch round {
|
||||
case 1:
|
||||
assert.Equal(t, network.Version6, ver)
|
||||
case 2:
|
||||
assert.Equal(t, network.Version7, ver)
|
||||
case 3:
|
||||
assert.Equal(t, network.Version8, ver)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// before.
|
||||
pledgeSectors(t, ctx, miner, 9, 0, pledge)
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
})
|
||||
|
||||
for i, id := range s {
|
||||
info, err := miner.SectorsStatus(ctx, id, true)
|
||||
require.NoError(t, err)
|
||||
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
|
||||
if i >= 3 {
|
||||
// after
|
||||
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -63,11 +151,13 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
|
||||
|
||||
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
|
||||
for i := 0; i < n; i++ {
|
||||
err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
if i%3 == 0 && blockNotif != nil {
|
||||
<-blockNotif
|
||||
log.Errorf("WAIT")
|
||||
}
|
||||
log.Errorf("PLEDGING %d", i)
|
||||
err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for {
|
||||
@ -126,7 +216,7 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
@ -209,15 +299,17 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
|
||||
// Drop the partition
|
||||
err = secs.ForEach(func(sid uint64) error {
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
},
|
||||
}, true)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var s abi.SectorID
|
||||
var s storage.SectorRef
|
||||
|
||||
// Drop 1 sectors from deadline 3 partition 0
|
||||
{
|
||||
@ -238,9 +330,11 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
require.NoError(t, err)
|
||||
fmt.Println("the sectors", all)
|
||||
|
||||
s = abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
s = storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
},
|
||||
}
|
||||
|
||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
|
||||
|
@ -23,6 +23,7 @@ var UpgradeLiftoffHeight = abi.ChainEpoch(-5)
|
||||
|
||||
const UpgradeKumquatHeight = 15
|
||||
const UpgradeCalicoHeight = 20
|
||||
const UpgradePersianHeight = 25
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
|
@ -40,13 +40,10 @@ const UpgradeKumquatHeight = 170000
|
||||
|
||||
// TODO: Height??
|
||||
const UpgradeCalicoHeight = 999999
|
||||
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInDay * 2)
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
|
||||
policy.SetSupportedProofTypes(
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
)
|
||||
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
|
@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
|
||||
// Consensus / Network
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
const NewestNetworkVersion = network.Version6
|
||||
const NewestNetworkVersion = network.Version8
|
||||
const ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
// Epochs
|
||||
|
@ -89,12 +89,13 @@ var (
|
||||
UpgradeLiftoffHeight abi.ChainEpoch = -5
|
||||
UpgradeKumquatHeight abi.ChainEpoch = -6
|
||||
UpgradeCalicoHeight abi.ChainEpoch = -7
|
||||
UpgradePersianHeight abi.ChainEpoch = -8
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
NewestNetworkVersion = network.Version5
|
||||
NewestNetworkVersion = network.Version8
|
||||
ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
Devnet = true
|
||||
|
@ -84,8 +84,8 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion = newVer(0, 17, 0)
|
||||
MinerAPIVersion = newVer(0, 17, 0)
|
||||
WorkerAPIVersion = newVer(0, 16, 0)
|
||||
MinerAPIVersion = newVer(0, 18, 0)
|
||||
WorkerAPIVersion = newVer(0, 17, 0)
|
||||
)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
)
|
||||
|
||||
func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
|
||||
@ -26,3 +28,42 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error))
|
||||
|
||||
return bitfield.MultiMerge(parts...)
|
||||
}
|
||||
|
||||
// SealProofTypeFromSectorSize returns preferred seal proof type for creating
|
||||
// new miner actors and new sectors
|
||||
func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) {
|
||||
switch {
|
||||
case nv < network.Version7:
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
case nv >= network.Version7:
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
|
@ -26,26 +26,29 @@ const (
|
||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
// This should only be used for testing.
|
||||
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
for _, t := range types {
|
||||
newTypes[t] = struct{}{}
|
||||
}
|
||||
// Set for all miner versions.
|
||||
miner0.SupportedProofTypes = newTypes
|
||||
miner2.PreCommitSealProofTypesV0 = newTypes
|
||||
miner2.PreCommitSealProofTypesV7 = newTypes
|
||||
miner2.PreCommitSealProofTypesV8 = newTypes
|
||||
miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
AddSupportedProofTypes(types...)
|
||||
}
|
||||
|
||||
// AddSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
// This should only be used for testing.
|
||||
func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
for _, t := range types {
|
||||
if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
|
||||
panic("must specify v1 proof types only")
|
||||
}
|
||||
// Set for all miner versions.
|
||||
miner0.SupportedProofTypes[t] = struct{}{}
|
||||
miner2.PreCommitSealProofTypesV0[t] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV7[t] = struct{}{}
|
||||
miner2.PreCommitSealProofTypesV8[t] = struct{}{}
|
||||
miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ func VersionForNetwork(version network.Version) Version {
|
||||
switch version {
|
||||
case network.Version0, network.Version1, network.Version2, network.Version3:
|
||||
return Version0
|
||||
case network.Version4, network.Version5, network.Version6:
|
||||
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8:
|
||||
return Version2
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported network version %d", version))
|
||||
|
@ -23,8 +23,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
@ -101,7 +99,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
||||
i := i
|
||||
m := m
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize)
|
||||
spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, GenesisNetworkVersion)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
@ -97,6 +97,10 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradeCalicoHeight,
|
||||
Network: network.Version7,
|
||||
Migration: UpgradeCalico,
|
||||
}, {
|
||||
Height: build.UpgradePersianHeight,
|
||||
Network: network.Version8,
|
||||
Migration: nil,
|
||||
}}
|
||||
|
||||
if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade
|
||||
@ -661,20 +665,27 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
info, err := store.Put(ctx, new(types.StateInfo0))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
}
|
||||
|
||||
newHamtRoot, err := nv7.MigrateStateTree(ctx, store, root, epoch, nv7.DefaultConfig())
|
||||
if stateRoot.Version != types.StateTreeVersion1 {
|
||||
return cid.Undef, xerrors.Errorf(
|
||||
"expected state root version 1 for calico upgrade, got %d",
|
||||
stateRoot.Version,
|
||||
)
|
||||
}
|
||||
|
||||
newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err)
|
||||
}
|
||||
|
||||
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||
Version: types.StateTreeVersion1,
|
||||
Version: stateRoot.Version,
|
||||
Actors: newHamtRoot,
|
||||
Info: info,
|
||||
Info: stateRoot.Info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
|
@ -207,12 +207,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra
|
||||
return nil, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := spt.RegisteredWinningPoStProof()
|
||||
wpt, err := info.SealProofType.RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting window proof type: %w", err)
|
||||
}
|
||||
@ -252,7 +247,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra
|
||||
out := make([]builtin.SectorInfo, len(sectors))
|
||||
for i, sinfo := range sectors {
|
||||
out[i] = builtin.SectorInfo{
|
||||
SealProof: spt,
|
||||
SealProof: sinfo.SealProof,
|
||||
SectorNumber: sinfo.SectorNumber,
|
||||
SealedCID: sinfo.SealedCID,
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
@ -211,15 +212,6 @@ var sealBenchCmd = &cli.Command{
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
// Only fetch parameters if actually needed
|
||||
if !c.Bool("skip-commit2") {
|
||||
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil {
|
||||
@ -231,7 +223,7 @@ var sealBenchCmd = &cli.Command{
|
||||
Root: sbdir,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(sbfs, cfg)
|
||||
sb, err := ffiwrapper.New(sbfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -295,7 +287,7 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
if !c.Bool("skip-commit2") {
|
||||
log.Info("generating winning post candidates")
|
||||
wipt, err := spt.RegisteredWinningPoStProof()
|
||||
wipt, err := spt(sectorSize).RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -428,7 +420,7 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
fmt.Println(string(data))
|
||||
} else {
|
||||
fmt.Printf("----\nresults (v27) (%d)\n", sectorSize)
|
||||
fmt.Printf("----\nresults (v28) (%d)\n", sectorSize)
|
||||
if robench == "" {
|
||||
fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingResults[0].AddPiece, bps(bo.SectorSize, bo.SealingResults[0].AddPiece)) // TODO: average across multiple sealings
|
||||
fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingResults[0].PreCommit1, bps(bo.SectorSize, bo.SealingResults[0].PreCommit1))
|
||||
@ -477,9 +469,12 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
}
|
||||
|
||||
for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
|
||||
sid := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -507,9 +502,12 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
end := start + sectorsPerWorker
|
||||
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
|
||||
ix := int(i - 1)
|
||||
sid := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -538,7 +536,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
<-preCommit2Sema
|
||||
|
||||
sealedSectors[ix] = saproof2.SectorInfo{
|
||||
SealProof: sb.SealProofType(),
|
||||
SealProof: sid.ProofType,
|
||||
SectorNumber: i,
|
||||
SealedCID: cids.Sealed,
|
||||
}
|
||||
@ -592,7 +590,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
svi := saproof2.SealVerifyInfo{
|
||||
SectorID: abi.SectorID{Miner: mid, Number: i},
|
||||
SealedCID: cids.Sealed,
|
||||
SealProof: sb.SealProofType(),
|
||||
SealProof: sid.ProofType,
|
||||
Proof: proof,
|
||||
DealIDs: nil,
|
||||
Randomness: ticket,
|
||||
@ -614,7 +612,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
if !skipunseal {
|
||||
log.Infof("[%d] Unsealing sector", i)
|
||||
{
|
||||
p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
p, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire unsealed sector for removing: %w", err)
|
||||
}
|
||||
@ -625,7 +623,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
}
|
||||
}
|
||||
|
||||
err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
|
||||
err := sb.UnsealPiece(context.TODO(), sid, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -708,23 +706,22 @@ var proveCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(c2in.SectorSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(nil, cfg)
|
||||
sb, err := ffiwrapper.New(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
proof, err := sb.SealCommit2(context.TODO(), abi.SectorID{Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum)}, c2in.Phase1Out)
|
||||
ref := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(c2in.SectorNum),
|
||||
},
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize)),
|
||||
}
|
||||
|
||||
proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -733,7 +730,7 @@ var proveCmd = &cli.Command{
|
||||
|
||||
fmt.Printf("proof: %x\n", proof)
|
||||
|
||||
fmt.Printf("----\nresults (v27) (%d)\n", c2in.SectorSize)
|
||||
fmt.Printf("----\nresults (v28) (%d)\n", c2in.SectorSize)
|
||||
dur := sealCommit2.Sub(start)
|
||||
|
||||
fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), dur))
|
||||
@ -747,3 +744,12 @@ func bps(data abi.SectorSize, d time.Duration) string {
|
||||
bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds()))
|
||||
return types.SizeStr(types.BigInt{Int: bps}) + "/s"
|
||||
}
|
||||
|
||||
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
|
||||
spt, err := miner.SealProofTypeFromSectorSize(ssize, build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return spt
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
@ -356,11 +355,6 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
|
||||
// Setup remote sector store
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting proof type: %w", err)
|
||||
}
|
||||
|
||||
sminfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get api info: %w", err)
|
||||
@ -374,7 +368,6 @@ var runCmd = &cli.Command{
|
||||
|
||||
workerApi := &worker{
|
||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||
SealProof: spt,
|
||||
TaskTypes: taskTypes,
|
||||
NoSwap: cctx.Bool("no-swap"),
|
||||
}, remote, localStore, nodeApi, nodeApi, wsts),
|
||||
|
@ -8,8 +8,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -19,6 +17,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
@ -128,12 +127,12 @@ var preSealCmd = &cli.Command{
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
rp, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
|
||||
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gm, key, err := seed.PreSeal(maddr, rp, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
|
||||
gm, key, err := seed.PreSeal(maddr, spt, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
|
||||
@ -42,10 +43,6 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(sbroot, 0775); err != nil { //nolint:gosec
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -56,7 +53,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
Root: sbroot,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(sbfs, cfg)
|
||||
sb, err := ffiwrapper.New(sbfs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -69,16 +66,17 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
var sealedSectors []*genesis.PreSeal
|
||||
for i := 0; i < sectors; i++ {
|
||||
sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next}
|
||||
ref := storage.SectorRef{ID: sid, ProofType: spt}
|
||||
next++
|
||||
|
||||
var preseal *genesis.PreSeal
|
||||
if !fakeSectors {
|
||||
preseal, err = presealSector(sb, sbfs, sid, spt, ssize, preimage)
|
||||
preseal, err = presealSector(sb, sbfs, ref, ssize, preimage)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
preseal, err = presealSectorFake(sbfs, sid, spt, ssize)
|
||||
preseal, err = presealSectorFake(sbfs, ref, ssize)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -148,7 +146,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
return miner, &minerAddr.KeyInfo, nil
|
||||
}
|
||||
|
||||
func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
|
||||
func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -182,12 +180,12 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector
|
||||
return &genesis.PreSeal{
|
||||
CommR: cids.Sealed,
|
||||
CommD: cids.Unsealed,
|
||||
SectorID: sid.Number,
|
||||
ProofType: spt,
|
||||
SectorID: sid.ID.Number,
|
||||
ProofType: sid.ProofType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
||||
func presealSectorFake(sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||
@ -198,7 +196,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
|
||||
return nil, xerrors.Errorf("mkdir cache: %w", err)
|
||||
}
|
||||
|
||||
commr, err := ffi.FauxRep(spt, paths.Cache, paths.Sealed)
|
||||
commr, err := ffi.FauxRep(sid.ProofType, paths.Cache, paths.Sealed)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("fauxrep: %w", err)
|
||||
}
|
||||
@ -206,13 +204,13 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
|
||||
return &genesis.PreSeal{
|
||||
CommR: commr,
|
||||
CommD: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()),
|
||||
SectorID: sid.Number,
|
||||
ProofType: spt,
|
||||
SectorID: sid.ID.Number,
|
||||
ProofType: sid.ProofType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
func cleanupUnsealed(sbfs *basicfs.Provider, ref storage.SectorRef) error {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), ref, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func TestWorkerKeyChange(t *testing.T) {
|
||||
|
||||
blocktime := 1 * time.Millisecond
|
||||
|
||||
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithUpgradeAt(1), test.FullNodeWithUpgradeAt(1)}, test.OneMiner)
|
||||
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithActorsV2At(1), test.FullNodeWithActorsV2At(1)}, test.OneMiner)
|
||||
|
||||
client1 := n[0]
|
||||
client2 := n[1]
|
||||
|
@ -433,11 +433,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
||||
return err
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(a)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting id address: %w", err)
|
||||
@ -451,9 +446,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
||||
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
|
||||
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
|
||||
|
||||
smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}, sectorstorage.SealerConfig{
|
||||
smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), sectorstorage.SealerConfig{
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
@ -657,9 +650,14 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
|
||||
}
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize))
|
||||
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
return address.Undef, xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
|
||||
spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&power2.CreateMinerParams{
|
||||
|
@ -1422,7 +1422,7 @@ Response:
|
||||
"ToUpgrade": true,
|
||||
"LastErr": "string value",
|
||||
"Log": null,
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"Activation": 10101,
|
||||
"Expiration": 10101,
|
||||
"DealWeight": "0",
|
||||
|
@ -55,8 +55,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1,
|
||||
"sealing",
|
||||
@ -155,8 +158,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null,
|
||||
1024,
|
||||
@ -187,8 +193,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -217,8 +226,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1
|
||||
]
|
||||
@ -262,8 +274,11 @@ Inputs:
|
||||
[
|
||||
{},
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1040384,
|
||||
1024
|
||||
@ -293,8 +308,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -323,8 +341,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null,
|
||||
null,
|
||||
@ -360,8 +381,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -387,8 +411,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null,
|
||||
null
|
||||
@ -415,8 +442,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -499,8 +529,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1040384,
|
||||
1024,
|
||||
|
@ -3874,7 +3874,7 @@ Response:
|
||||
"WorkerChangeEpoch": 10101,
|
||||
"PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
|
||||
"Multiaddrs": null,
|
||||
"SealProofType": 3,
|
||||
"SealProofType": 8,
|
||||
"SectorSize": 34359738368,
|
||||
"WindowPoStPartitionSectors": 42,
|
||||
"ConsensusFaultElapsed": 10101
|
||||
@ -3892,7 +3892,7 @@ Inputs:
|
||||
[
|
||||
"f01234",
|
||||
{
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SectorNumber": 9,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
@ -3989,7 +3989,7 @@ Inputs:
|
||||
[
|
||||
"f01234",
|
||||
{
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SectorNumber": 9,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
@ -4194,7 +4194,7 @@ Inputs:
|
||||
]
|
||||
```
|
||||
|
||||
Response: `6`
|
||||
Response: `8`
|
||||
|
||||
### StateReadState
|
||||
StateReadState returns the indicated actor's state.
|
||||
@ -4415,7 +4415,7 @@ Response:
|
||||
```json
|
||||
{
|
||||
"SectorNumber": 9,
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
@ -4486,7 +4486,7 @@ Response:
|
||||
```json
|
||||
{
|
||||
"Info": {
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SectorNumber": 9,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit 1985275547f222e8c97a8ab70b5cc26bc1fa50b1
|
||||
Subproject commit 1d9cb3e8ff53f51f9318fc57e5d00bc79bdc0128
|
19
extern/sector-storage/faults.go
vendored
19
extern/sector-storage/faults.go
vendored
@ -9,17 +9,18 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
// FaultTracker TODO: Track things more actively
|
||||
type FaultTracker interface {
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error)
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error)
|
||||
}
|
||||
|
||||
// CheckProvable returns unprovable sectors
|
||||
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
|
||||
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) {
|
||||
var bad []abi.SectorID
|
||||
|
||||
ssize, err := pp.SectorSize()
|
||||
@ -33,27 +34,27 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
locked, err := m.index.StorageTryLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
||||
locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
if !locked {
|
||||
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed")
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, ssize, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if lp.Sealed == "" || lp.Cache == "" {
|
||||
log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -69,14 +70,14 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
||||
st, err := os.Stat(p)
|
||||
if err != nil {
|
||||
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if sz != 0 {
|
||||
if st.Size() != int64(ssize)*sz {
|
||||
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
11
extern/sector-storage/ffiwrapper/basicfs/fs.go
vendored
11
extern/sector-storage/ffiwrapper/basicfs/fs.go
vendored
@ -7,6 +7,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
@ -23,7 +24,7 @@ type Provider struct {
|
||||
waitSector map[sectorFile]chan struct{}
|
||||
}
|
||||
|
||||
func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
||||
return storiface.SectorPaths{}, nil, err
|
||||
}
|
||||
@ -37,7 +38,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
||||
done := func() {}
|
||||
|
||||
out := storiface.SectorPaths{
|
||||
ID: id,
|
||||
ID: id.ID,
|
||||
}
|
||||
|
||||
for _, fileType := range storiface.PathTypes {
|
||||
@ -49,10 +50,10 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
||||
if b.waitSector == nil {
|
||||
b.waitSector = map[sectorFile]chan struct{}{}
|
||||
}
|
||||
ch, found := b.waitSector[sectorFile{id, fileType}]
|
||||
ch, found := b.waitSector[sectorFile{id.ID, fileType}]
|
||||
if !found {
|
||||
ch = make(chan struct{}, 1)
|
||||
b.waitSector[sectorFile{id, fileType}] = ch
|
||||
b.waitSector[sectorFile{id.ID, fileType}] = ch
|
||||
}
|
||||
b.lk.Unlock()
|
||||
|
||||
@ -63,7 +64,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
||||
return storiface.SectorPaths{}, nil, ctx.Err()
|
||||
}
|
||||
|
||||
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id))
|
||||
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID))
|
||||
|
||||
prevDone := done
|
||||
done = func() {
|
||||
|
34
extern/sector-storage/ffiwrapper/config.go
vendored
34
extern/sector-storage/ffiwrapper/config.go
vendored
@ -1,34 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
SealProofType abi.RegisteredSealProof
|
||||
|
||||
_ struct{} // guard against nameless init
|
||||
}
|
||||
|
||||
func sizeFromConfig(cfg Config) (abi.SectorSize, error) {
|
||||
return cfg.SealProofType.SectorSize()
|
||||
}
|
||||
|
||||
func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredSealProof, error) {
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
}
|
12
extern/sector-storage/ffiwrapper/sealer.go
vendored
12
extern/sector-storage/ffiwrapper/sealer.go
vendored
@ -1,16 +1,12 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("ffiwrapper")
|
||||
|
||||
type Sealer struct {
|
||||
sealProofType abi.RegisteredSealProof
|
||||
ssize abi.SectorSize // a function of sealProofType and postProofType
|
||||
|
||||
sectors SectorProvider
|
||||
stopping chan struct{}
|
||||
}
|
||||
@ -18,11 +14,3 @@ type Sealer struct {
|
||||
func (sb *Sealer) Stop() {
|
||||
close(sb.stopping)
|
||||
}
|
||||
|
||||
func (sb *Sealer) SectorSize() abi.SectorSize {
|
||||
return sb.ssize
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealProofType() abi.RegisteredSealProof {
|
||||
return sb.sealProofType
|
||||
}
|
||||
|
101
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
101
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
@ -27,16 +27,8 @@ import (
|
||||
|
||||
var _ Storage = &Sealer{}
|
||||
|
||||
func New(sectors SectorProvider, cfg *Config) (*Sealer, error) {
|
||||
sectorSize, err := sizeFromConfig(*cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func New(sectors SectorProvider) (*Sealer, error) {
|
||||
sb := &Sealer{
|
||||
sealProofType: cfg.SealProofType,
|
||||
ssize: sectorSize,
|
||||
|
||||
sectors: sectors,
|
||||
|
||||
stopping: make(chan struct{}),
|
||||
@ -45,25 +37,29 @@ func New(sectors SectorProvider, cfg *Config) (*Sealer, error) {
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
// TODO: Allocate the sector here instead of in addpiece
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
|
||||
func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
|
||||
var offset abi.UnpaddedPieceSize
|
||||
for _, size := range existingPieceSizes {
|
||||
offset += size
|
||||
}
|
||||
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
if offset.Padded()+pieceSize.Padded() > maxPieceSize {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset)
|
||||
}
|
||||
|
||||
var err error
|
||||
var done func()
|
||||
var stagedFile *partialFile
|
||||
|
||||
@ -135,7 +131,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
break
|
||||
}
|
||||
|
||||
c, err := sb.pieceCid(buf[:read])
|
||||
c, err := sb.pieceCid(sector.ProofType, buf[:read])
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err)
|
||||
}
|
||||
@ -162,7 +158,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
return pieceCids[0], nil
|
||||
}
|
||||
|
||||
pieceCID, err := ffi.GenerateUnsealedCID(sb.sealProofType, pieceCids)
|
||||
pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
|
||||
}
|
||||
@ -178,13 +174,13 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) {
|
||||
func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) {
|
||||
prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in)))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err)
|
||||
}
|
||||
|
||||
pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, abi.UnpaddedPieceSize(len(in)))
|
||||
pieceCID, err := ffi.GeneratePieceCIDFromFile(spt, prf, abi.UnpaddedPieceSize(len(in)))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("generating piece commitment: %w", err)
|
||||
}
|
||||
@ -194,8 +190,12 @@ func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) {
|
||||
return pieceCID, werr()
|
||||
}
|
||||
|
||||
func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
// try finding existing
|
||||
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
|
||||
@ -317,12 +317,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
||||
// </eww>
|
||||
|
||||
// TODO: This may be possible to do in parallel
|
||||
err = ffi.UnsealRange(sb.sealProofType,
|
||||
err = ffi.UnsealRange(sector.ProofType,
|
||||
srcPaths.Cache,
|
||||
sealed,
|
||||
opw,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
sector.ID.Number,
|
||||
sector.ID.Miner,
|
||||
randomness,
|
||||
commd,
|
||||
uint64(at.Unpadded()),
|
||||
@ -356,14 +356,18 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
path, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
pf, err := openPartialFile(maxPieceSize, path.Unsealed)
|
||||
if err != nil {
|
||||
@ -408,7 +412,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||
@ -443,29 +447,33 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
||||
for _, piece := range pieces {
|
||||
sum += piece.Size.Unpadded()
|
||||
}
|
||||
ussize := abi.PaddedPieceSize(sb.ssize).Unpadded()
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ussize := abi.PaddedPieceSize(ssize).Unpadded()
|
||||
if sum != ussize {
|
||||
return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
|
||||
}
|
||||
|
||||
// TODO: context cancellation respect
|
||||
p1o, err := ffi.SealPreCommitPhase1(
|
||||
sb.sealProofType,
|
||||
sector.ProofType,
|
||||
paths.Cache,
|
||||
paths.Unsealed,
|
||||
paths.Sealed,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
sector.ID.Number,
|
||||
sector.ID.Miner,
|
||||
ticket,
|
||||
pieces,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
|
||||
return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
|
||||
}
|
||||
return p1o, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||
@ -474,7 +482,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
|
||||
sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
|
||||
return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
|
||||
}
|
||||
|
||||
return storage.SectorCids{
|
||||
@ -483,40 +491,45 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire sector paths: %w", err)
|
||||
}
|
||||
defer done()
|
||||
output, err := ffi.SealCommitPhase1(
|
||||
sb.sealProofType,
|
||||
sector.ProofType,
|
||||
cids.Sealed,
|
||||
cids.Unsealed,
|
||||
paths.Cache,
|
||||
paths.Sealed,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
sector.ID.Number,
|
||||
sector.ID.Miner,
|
||||
ticket,
|
||||
seed,
|
||||
pieces,
|
||||
)
|
||||
if err != nil {
|
||||
log.Warn("StandaloneSealCommit error: ", err)
|
||||
log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed)
|
||||
log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed)
|
||||
|
||||
return nil, xerrors.Errorf("StandaloneSealCommit: %w", err)
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) {
|
||||
return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner)
|
||||
func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storage.Proof, error) {
|
||||
return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner)
|
||||
}
|
||||
|
||||
func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||
func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
if len(keepUnsealed) > 0 {
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
|
||||
sr := pieceRun(0, maxPieceSize)
|
||||
|
||||
@ -580,10 +593,10 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
||||
}
|
||||
defer done()
|
||||
|
||||
return ffi.ClearCache(uint64(sb.ssize), paths.Cache)
|
||||
return ffi.ClearCache(uint64(ssize), paths.Cache)
|
||||
}
|
||||
|
||||
func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
// This call is meant to mark storage as 'freeable'. Given that unsealing is
|
||||
// very expensive, we don't remove data as soon as we can - instead we only
|
||||
// do that when we don't have free space for data that really needs it
|
||||
@ -593,7 +606,7 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safe
|
||||
return xerrors.Errorf("not supported at this layer")
|
||||
}
|
||||
|
||||
func (sb *Sealer) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
return xerrors.Errorf("not supported at this layer") // happens in localworker
|
||||
}
|
||||
|
||||
|
83
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
83
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
@ -43,7 +43,7 @@ var sectorSize, _ = sealProofType.SectorSize()
|
||||
var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2}
|
||||
|
||||
type seal struct {
|
||||
id abi.SectorID
|
||||
ref storage.SectorRef
|
||||
cids storage.SectorCids
|
||||
pi abi.PieceInfo
|
||||
ticket abi.SealRandomness
|
||||
@ -56,12 +56,12 @@ func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader {
|
||||
)
|
||||
}
|
||||
|
||||
func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) {
|
||||
func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done func()) {
|
||||
defer done()
|
||||
dlen := abi.PaddedPieceSize(sectorSize).Unpadded()
|
||||
|
||||
var err error
|
||||
r := data(id.Number, dlen)
|
||||
r := data(id.ID.Number, dlen)
|
||||
s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
@ -84,19 +84,19 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
|
||||
defer done()
|
||||
seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
|
||||
|
||||
pc1, err := sb.SealCommit1(context.TODO(), s.id, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
|
||||
pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
proof, err := sb.SealCommit2(context.TODO(), s.id, pc1)
|
||||
proof, err := sb.SealCommit2(context.TODO(), s.ref, pc1)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
ok, err := ProofVerifier.VerifySeal(proof2.SealVerifyInfo{
|
||||
SectorID: s.id,
|
||||
SectorID: s.ref.ID,
|
||||
SealedCID: s.cids.Sealed,
|
||||
SealProof: sealProofType,
|
||||
SealProof: s.ref.ProofType,
|
||||
Proof: proof,
|
||||
Randomness: s.ticket,
|
||||
InteractiveRandomness: seed,
|
||||
@ -111,7 +111,7 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.SectorID, done func()) {
|
||||
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) {
|
||||
defer done()
|
||||
|
||||
var b bytes.Buffer
|
||||
@ -120,7 +120,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expect, _ := ioutil.ReadAll(data(si.Number, 1016))
|
||||
expect, _ := ioutil.ReadAll(data(si.ID.Number, 1016))
|
||||
if !bytes.Equal(b.Bytes(), expect) {
|
||||
t.Fatal("read wrong bytes")
|
||||
}
|
||||
@ -150,7 +150,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expect, _ = ioutil.ReadAll(data(si.Number, 1016))
|
||||
expect, _ = ioutil.ReadAll(data(si.ID.Number, 1016))
|
||||
require.Equal(t, expect, b.Bytes())
|
||||
|
||||
b.Reset()
|
||||
@ -174,13 +174,13 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
|
||||
sis := make([]proof2.SectorInfo, len(seals))
|
||||
for i, s := range seals {
|
||||
sis[i] = proof2.SectorInfo{
|
||||
SealProof: sealProofType,
|
||||
SectorNumber: s.id.Number,
|
||||
SealProof: s.ref.ProofType,
|
||||
SectorNumber: s.ref.ID.Number,
|
||||
SealedCID: s.cids.Sealed,
|
||||
}
|
||||
}
|
||||
|
||||
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].id.Miner, sis, randomness)
|
||||
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness)
|
||||
if len(skipped) > 0 {
|
||||
require.Error(t, err)
|
||||
require.EqualValues(t, skipped, skp)
|
||||
@ -195,7 +195,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
|
||||
Randomness: randomness,
|
||||
Proofs: proofs,
|
||||
ChallengedSectors: sis,
|
||||
Prover: seals[0].id.Miner,
|
||||
Prover: seals[0].ref.ID.Miner,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
@ -205,7 +205,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
|
||||
}
|
||||
}
|
||||
|
||||
func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) {
|
||||
func corrupt(t *testing.T, sealer *Sealer, id storage.SectorRef) {
|
||||
paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage)
|
||||
require.NoError(t, err)
|
||||
defer done()
|
||||
@ -264,14 +264,10 @@ func TestSealAndVerify(t *testing.T) {
|
||||
}
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
}
|
||||
|
||||
sp := &basicfs.Provider{
|
||||
Root: cdir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
@ -286,9 +282,12 @@ func TestSealAndVerify(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
si := abi.SectorID{Miner: miner, Number: 1}
|
||||
si := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 1},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
|
||||
s := seal{id: si}
|
||||
s := seal{ref: si}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
@ -338,13 +337,10 @@ func TestSealPoStNoCommit(t *testing.T) {
|
||||
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
}
|
||||
sp := &basicfs.Provider{
|
||||
Root: dir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
@ -360,9 +356,12 @@ func TestSealPoStNoCommit(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
si := abi.SectorID{Miner: miner, Number: 1}
|
||||
si := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 1},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
|
||||
s := seal{id: si}
|
||||
s := seal{ref: si}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
@ -403,13 +402,10 @@ func TestSealAndVerify3(t *testing.T) {
|
||||
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
}
|
||||
sp := &basicfs.Provider{
|
||||
Root: dir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
@ -424,13 +420,22 @@ func TestSealAndVerify3(t *testing.T) {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
si1 := abi.SectorID{Miner: miner, Number: 1}
|
||||
si2 := abi.SectorID{Miner: miner, Number: 2}
|
||||
si3 := abi.SectorID{Miner: miner, Number: 3}
|
||||
si1 := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 1},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
si2 := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 2},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
si3 := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 3},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
|
||||
s1 := seal{id: si1}
|
||||
s2 := seal{id: si2}
|
||||
s3 := seal{id: si3}
|
||||
s1 := seal{ref: si1}
|
||||
s2 := seal{ref: si2}
|
||||
s3 := seal{ref: si3}
|
||||
|
||||
wg.Add(3)
|
||||
go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck
|
||||
@ -451,7 +456,7 @@ func TestSealAndVerify3(t *testing.T) {
|
||||
corrupt(t, sb, si1)
|
||||
corrupt(t, sb, si2)
|
||||
|
||||
post(t, sb, []abi.SectorID{si1, si2}, s1, s2, s3)
|
||||
post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3)
|
||||
}
|
||||
|
||||
func BenchmarkWriteWithAlignment(b *testing.B) {
|
||||
|
6
extern/sector-storage/ffiwrapper/types.go
vendored
6
extern/sector-storage/ffiwrapper/types.go
vendored
@ -29,8 +29,8 @@ type Storage interface {
|
||||
storage.Prover
|
||||
StorageSealer
|
||||
|
||||
UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error
|
||||
ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
|
||||
UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error
|
||||
ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
|
||||
}
|
||||
|
||||
type Verifier interface {
|
||||
@ -44,7 +44,7 @@ type Verifier interface {
|
||||
type SectorProvider interface {
|
||||
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
|
||||
// * returns an error when allocate is set, and existing isn't, and the sector exists
|
||||
AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
|
||||
AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
|
||||
}
|
||||
|
||||
var _ SectorProvider = &basicfs.Provider{}
|
||||
|
10
extern/sector-storage/ffiwrapper/verifier_cgo.go
vendored
10
extern/sector-storage/ffiwrapper/verifier_cgo.go
vendored
@ -11,6 +11,7 @@ import (
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
@ -74,12 +75,15 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
|
||||
continue
|
||||
}
|
||||
|
||||
sid := abi.SectorID{Miner: mid, Number: s.SectorNumber}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: mid, Number: s.SectorNumber},
|
||||
ProofType: s.SealProof,
|
||||
}
|
||||
|
||||
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
|
||||
if err != nil {
|
||||
log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err)
|
||||
skipped = append(skipped, sid)
|
||||
log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err)
|
||||
skipped = append(skipped, sid.ID)
|
||||
continue
|
||||
}
|
||||
doneFuncs = append(doneFuncs, d)
|
||||
|
84
extern/sector-storage/manager.go
vendored
84
extern/sector-storage/manager.go
vendored
@ -47,9 +47,7 @@ type Worker interface {
|
||||
}
|
||||
|
||||
type SectorManager interface {
|
||||
SectorSize() abi.SectorSize
|
||||
|
||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||
ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||
|
||||
ffiwrapper.StorageSealer
|
||||
storage.Prover
|
||||
@ -61,8 +59,6 @@ type WorkerID uuid.UUID // worker session UUID
|
||||
var ClosedWorkerID = uuid.UUID{}
|
||||
|
||||
type Manager struct {
|
||||
scfg *ffiwrapper.Config
|
||||
|
||||
ls stores.LocalStorage
|
||||
storage *stores.Remote
|
||||
localStore *stores.Local
|
||||
@ -105,13 +101,13 @@ type StorageAuth http.Header
|
||||
type WorkerStateStore *statestore.StateStore
|
||||
type ManagerStateStore *statestore.StateStore
|
||||
|
||||
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) {
|
||||
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) {
|
||||
lstor, err := stores.NewLocal(ctx, ls, si, urls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si, spt: cfg.SealProofType}, cfg)
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||
}
|
||||
@ -119,15 +115,13 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg
|
||||
stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit)
|
||||
|
||||
m := &Manager{
|
||||
scfg: cfg,
|
||||
|
||||
ls: ls,
|
||||
storage: stor,
|
||||
localStore: lstor,
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||
index: si,
|
||||
|
||||
sched: newScheduler(cfg.SealProofType),
|
||||
sched: newScheduler(),
|
||||
|
||||
Prover: prover,
|
||||
|
||||
@ -162,7 +156,6 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg
|
||||
}
|
||||
|
||||
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
|
||||
SealProof: cfg.SealProofType,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, si, m, wss))
|
||||
if err != nil {
|
||||
@ -198,23 +191,18 @@ func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
m.remoteHnd.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (m *Manager) SectorSize() abi.SectorSize {
|
||||
sz, _ := m.scfg.SealProofType.SectorSize()
|
||||
return sz
|
||||
}
|
||||
|
||||
func schedNop(context.Context, Worker) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) schedFetch(sector abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error {
|
||||
func (m *Manager) schedFetch(sector storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error {
|
||||
return func(ctx context.Context, worker Worker) error {
|
||||
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error {
|
||||
func (m *Manager) readPiece(sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error {
|
||||
return func(ctx context.Context, w Worker) error {
|
||||
r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size))
|
||||
if err != nil {
|
||||
@ -227,19 +215,19 @@ func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storifac
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) {
|
||||
func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) {
|
||||
|
||||
// acquire a lock purely for reading unsealed sectors
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTNone); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil {
|
||||
returnErr = xerrors.Errorf("acquiring read sector lock: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
// passing 0 spt because we only need it when allowFetch is true
|
||||
best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
||||
best, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false)
|
||||
if err != nil {
|
||||
returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
||||
return
|
||||
@ -249,7 +237,7 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect
|
||||
if foundUnsealed { // append to existing
|
||||
// There is unsealed sector, see if we can read from it
|
||||
|
||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||
selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||
m.readPiece(sink, sector, offset, size, &readOk))
|
||||
@ -262,7 +250,7 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect
|
||||
return
|
||||
}
|
||||
|
||||
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
||||
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
||||
foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -273,7 +261,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
||||
return xerrors.Errorf("acquiring unseal sector lock: %w", err)
|
||||
}
|
||||
|
||||
@ -302,7 +290,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
return err
|
||||
}
|
||||
|
||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||
selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||
m.readPiece(sink, sector, offset, size, &readOk))
|
||||
@ -317,16 +305,16 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (m *Manager) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
log.Warnf("stub NewSector")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
func (m *Manager) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTUnsealed); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUnsealed); err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
@ -335,7 +323,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
if len(existingPieces) == 0 { // new
|
||||
selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing)
|
||||
} else { // use existing
|
||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||
selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false)
|
||||
}
|
||||
|
||||
var out abi.PieceInfo
|
||||
@ -353,7 +341,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
func (m *Manager) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@ -380,7 +368,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil {
|
||||
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
@ -404,7 +392,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) {
|
||||
func (m *Manager) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@ -431,11 +419,11 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, true)
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, true)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||
err := m.startWork(ctx, w, wk)(w.SealPreCommit2(ctx, sector, phase1Out))
|
||||
@ -453,7 +441,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) {
|
||||
func (m *Manager) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@ -480,14 +468,14 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
||||
// with direct access to the data. We want to do that because this step is
|
||||
// generally very cheap / fast, and transferring data is not worth the effort
|
||||
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false)
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||
err := m.startWork(ctx, w, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids))
|
||||
@ -505,7 +493,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) {
|
||||
func (m *Manager) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (out storage.Proof, err error) {
|
||||
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit2, sector, phase1Out)
|
||||
if err != nil {
|
||||
return storage.Proof{}, xerrors.Errorf("getWork: %w", err)
|
||||
@ -548,17 +536,17 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||
func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
unsealed := storiface.FTUnsealed
|
||||
{
|
||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding unsealed sector: %w", err)
|
||||
}
|
||||
@ -568,7 +556,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
||||
}
|
||||
}
|
||||
|
||||
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false)
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false)
|
||||
|
||||
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||
m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||
@ -601,28 +589,28 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
log.Warnw("ReleaseUnsealed todo")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if rerr := m.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil {
|
||||
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
|
||||
}
|
||||
if rerr := m.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil {
|
||||
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTCache, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
|
||||
}
|
||||
if rerr := m.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil {
|
||||
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
|
||||
}
|
||||
|
||||
|
36
extern/sector-storage/manager_test.go
vendored
36
extern/sector-storage/manager_test.go
vendored
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
@ -90,28 +91,23 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man
|
||||
st := newTestStorage(t)
|
||||
|
||||
si := stores.NewIndex()
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
lstor, err := stores.NewLocal(ctx, st, si, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, spt: cfg.SealProofType}, cfg)
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si})
|
||||
require.NoError(t, err)
|
||||
|
||||
stor := stores.NewRemote(lstor, si, nil, 6000)
|
||||
|
||||
m := &Manager{
|
||||
scfg: cfg,
|
||||
|
||||
ls: st,
|
||||
storage: stor,
|
||||
localStore: lstor,
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||
index: si,
|
||||
|
||||
sched: newScheduler(cfg.SealProofType),
|
||||
sched: newScheduler(),
|
||||
|
||||
Prover: prover,
|
||||
|
||||
@ -141,12 +137,14 @@ func TestSimple(t *testing.T) {
|
||||
}
|
||||
|
||||
err := m.AddWorker(ctx, newTestWorker(WorkerConfig{
|
||||
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
TaskTypes: localTasks,
|
||||
}, lstor, m))
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||
require.NoError(t, err)
|
||||
@ -176,14 +174,16 @@ func TestRedoPC1(t *testing.T) {
|
||||
}
|
||||
|
||||
tw := newTestWorker(WorkerConfig{
|
||||
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
TaskTypes: localTasks,
|
||||
}, lstor, m)
|
||||
|
||||
err := m.AddWorker(ctx, tw)
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||
require.NoError(t, err)
|
||||
@ -228,14 +228,16 @@ func TestRestartManager(t *testing.T) {
|
||||
}
|
||||
|
||||
tw := newTestWorker(WorkerConfig{
|
||||
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
TaskTypes: localTasks,
|
||||
}, lstor, m)
|
||||
|
||||
err := m.AddWorker(ctx, tw)
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||
require.NoError(t, err)
|
||||
@ -329,14 +331,16 @@ func TestRestartWorker(t *testing.T) {
|
||||
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||
return &testExec{apch: arch}, nil
|
||||
}, WorkerConfig{
|
||||
SealProof: 0,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, idx, m, statestore.New(wds))
|
||||
|
||||
err := m.AddWorker(ctx, w)
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
apDone := make(chan struct{})
|
||||
|
||||
@ -363,7 +367,6 @@ func TestRestartWorker(t *testing.T) {
|
||||
w = newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||
return &testExec{apch: arch}, nil
|
||||
}, WorkerConfig{
|
||||
SealProof: 0,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, idx, m, statestore.New(wds))
|
||||
|
||||
@ -400,7 +403,6 @@ func TestReenableWorker(t *testing.T) {
|
||||
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||
return &testExec{apch: arch}, nil
|
||||
}, WorkerConfig{
|
||||
SealProof: 0,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, idx, m, statestore.New(wds))
|
||||
|
||||
|
109
extern/sector-storage/mock/mock.go
vendored
109
extern/sector-storage/mock/mock.go
vendored
@ -27,21 +27,14 @@ var log = logging.Logger("sbmock")
|
||||
type SectorMgr struct {
|
||||
sectors map[abi.SectorID]*sectorState
|
||||
pieces map[cid.Cid][]byte
|
||||
sectorSize abi.SectorSize
|
||||
nextSectorID abi.SectorNumber
|
||||
proofType abi.RegisteredSealProof
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
type mockVerif struct{}
|
||||
|
||||
func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr {
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
|
||||
sectors := make(map[abi.SectorID]*sectorState)
|
||||
for _, sid := range genesisSectors {
|
||||
sectors[sid] = §orState{
|
||||
@ -53,9 +46,7 @@ func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *Sect
|
||||
return &SectorMgr{
|
||||
sectors: sectors,
|
||||
pieces: map[cid.Cid][]byte{},
|
||||
sectorSize: ssize,
|
||||
nextSectorID: 5,
|
||||
proofType: rt,
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,17 +66,17 @@ type sectorState struct {
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
log.Warn("Add piece: ", sectorID, size, mgr.proofType)
|
||||
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
log.Warn("Add piece: ", sectorID, size, sectorID.ProofType)
|
||||
|
||||
var b bytes.Buffer
|
||||
tr := io.TeeReader(r, &b)
|
||||
|
||||
c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, tr, size)
|
||||
c, err := ffiwrapper.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err)
|
||||
}
|
||||
@ -95,12 +86,12 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist
|
||||
mgr.lk.Lock()
|
||||
mgr.pieces[c] = b.Bytes()
|
||||
|
||||
ss, ok := mgr.sectors[sectorID]
|
||||
ss, ok := mgr.sectors[sectorID.ID]
|
||||
if !ok {
|
||||
ss = §orState{
|
||||
state: statePacking,
|
||||
}
|
||||
mgr.sectors[sectorID] = ss
|
||||
mgr.sectors[sectorID.ID] = ss
|
||||
}
|
||||
mgr.lk.Unlock()
|
||||
|
||||
@ -115,10 +106,6 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SectorSize() abi.SectorSize {
|
||||
return mgr.sectorSize
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
@ -127,9 +114,9 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error {
|
||||
func (mgr *SectorMgr) ForceState(sid storage.SectorRef, st int) error {
|
||||
mgr.lk.Lock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
mgr.lk.Unlock()
|
||||
if !ok {
|
||||
return xerrors.Errorf("no sector with id %d in storage", sid)
|
||||
@ -140,18 +127,23 @@ func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
mgr.lk.Lock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
mgr.lk.Unlock()
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("no sector with id %d in storage", sid)
|
||||
}
|
||||
|
||||
ssize, err := sid.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get proof sector size: %w", err)
|
||||
}
|
||||
|
||||
ss.lk.Lock()
|
||||
defer ss.lk.Unlock()
|
||||
|
||||
ussize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded()
|
||||
ussize := abi.PaddedPieceSize(ssize).Unpadded()
|
||||
|
||||
// TODO: verify pieces in sinfo.pieces match passed in pieces
|
||||
|
||||
@ -180,7 +172,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick
|
||||
}
|
||||
}
|
||||
|
||||
commd, err := MockVerifier.GenerateDataCommitment(mgr.proofType, pis)
|
||||
commd, err := MockVerifier.GenerateDataCommitment(sid.ProofType, pis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -195,7 +187,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
db := []byte(string(phase1Out))
|
||||
db[0] ^= 'd'
|
||||
|
||||
@ -214,9 +206,9 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
mgr.lk.Lock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
mgr.lk.Unlock()
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("no such sector %d", sid)
|
||||
@ -236,16 +228,16 @@ func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket
|
||||
|
||||
var out [32]byte
|
||||
for i := range out {
|
||||
out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff)
|
||||
out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.ID.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
var out [1920]byte
|
||||
for i := range out[:len(phase1Out)] {
|
||||
out[i] = phase1Out[i] ^ byte(sid.Number&0xff)
|
||||
out[i] = phase1Out[i] ^ byte(sid.ID.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
@ -253,10 +245,10 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1O
|
||||
|
||||
// Test Instrumentation Methods
|
||||
|
||||
func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error {
|
||||
func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such sector in storage")
|
||||
}
|
||||
@ -265,10 +257,10 @@ func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) MarkCorrupted(sid abi.SectorID, corrupted bool) error {
|
||||
func (mgr *SectorMgr) MarkCorrupted(sid storage.SectorRef, corrupted bool) error {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such sector in storage")
|
||||
}
|
||||
@ -353,66 +345,73 @@ func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSea
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
||||
if len(mgr.sectors[sectorID].pieces) > 1 || offset != 0 {
|
||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
||||
if len(mgr.sectors[sectorID.ID].pieces) > 1 || offset != 0 {
|
||||
panic("implme")
|
||||
}
|
||||
|
||||
_, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID].pieces[0]]), int64(size))
|
||||
_, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID.ID].pieces[0]]), int64(size))
|
||||
return err
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) {
|
||||
usize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded()
|
||||
func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) {
|
||||
psize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return storage.SectorRef{}, nil, err
|
||||
}
|
||||
usize := abi.PaddedPieceSize(psize).Unpadded()
|
||||
sid, err := mgr.AcquireSectorNumber()
|
||||
if err != nil {
|
||||
return abi.SectorID{}, nil, err
|
||||
return storage.SectorRef{}, nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, usize)
|
||||
_, _ = rand.Read(buf) // nolint:gosec
|
||||
|
||||
id := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: sid,
|
||||
id := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: sid,
|
||||
},
|
||||
ProofType: spt,
|
||||
}
|
||||
|
||||
pi, err := mgr.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return abi.SectorID{}, nil, err
|
||||
return storage.SectorRef{}, nil, err
|
||||
}
|
||||
|
||||
return id, []abi.PieceInfo{pi}, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Range) error {
|
||||
func (mgr *SectorMgr) FinalizeSector(context.Context, storage.SectorRef, []storage.Range) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
|
||||
if _, has := mgr.sectors[sector]; !has {
|
||||
if _, has := mgr.sectors[sector.ID]; !has {
|
||||
return xerrors.Errorf("sector not found")
|
||||
}
|
||||
|
||||
delete(mgr.sectors, sector)
|
||||
delete(mgr.sectors, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []abi.SectorID) ([]abi.SectorID, error) {
|
||||
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef) ([]abi.SectorID, error) {
|
||||
var bad []abi.SectorID
|
||||
|
||||
for _, sid := range ids {
|
||||
_, found := mgr.sectors[sid]
|
||||
_, found := mgr.sectors[sid.ID]
|
||||
|
||||
if !found || mgr.sectors[sid].failed {
|
||||
bad = append(bad, sid)
|
||||
if !found || mgr.sectors[sid.ID].failed {
|
||||
bad = append(bad, sid.ID)
|
||||
}
|
||||
}
|
||||
|
||||
|
4
extern/sector-storage/mock/mock_test.go
vendored
4
extern/sector-storage/mock/mock_test.go
vendored
@ -9,9 +9,9 @@ import (
|
||||
)
|
||||
|
||||
func TestOpFinish(t *testing.T) {
|
||||
sb := NewMockSectorMgr(2048, nil)
|
||||
sb := NewMockSectorMgr(nil)
|
||||
|
||||
sid, pieces, err := sb.StageFakeData(123)
|
||||
sid, pieces, err := sb.StageFakeData(123, abi.RegisteredSealProof_StackedDrg2KiBV1_1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
2
extern/sector-storage/request_queue.go
vendored
2
extern/sector-storage/request_queue.go
vendored
@ -20,7 +20,7 @@ func (q requestQueue) Less(i, j int) bool {
|
||||
return q[i].taskType.Less(q[j].taskType)
|
||||
}
|
||||
|
||||
return q[i].sector.Number < q[j].sector.Number // optimize minerActor.NewSectors bitfield
|
||||
return q[i].sector.ID.Number < q[j].sector.ID.Number // optimize minerActor.NewSectors bitfield
|
||||
}
|
||||
|
||||
func (q requestQueue) Swap(i, j int) {
|
||||
|
9
extern/sector-storage/resources.go
vendored
9
extern/sector-storage/resources.go
vendored
@ -314,4 +314,13 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
|
||||
func init() {
|
||||
ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately
|
||||
ResourceTable[sealtasks.TTReadUnsealed] = ResourceTable[sealtasks.TTFetch]
|
||||
|
||||
// V1_1 is the same as V1
|
||||
for _, m := range ResourceTable {
|
||||
m[abi.RegisteredSealProof_StackedDrg2KiBV1_1] = m[abi.RegisteredSealProof_StackedDrg2KiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg8MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg8MiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg512MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg512MiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg32GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg32GiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1]
|
||||
}
|
||||
}
|
||||
|
14
extern/sector-storage/roprov.go
vendored
14
extern/sector-storage/roprov.go
vendored
@ -5,7 +5,7 @@ import (
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -14,23 +14,17 @@ import (
|
||||
type readonlyProvider struct {
|
||||
index stores.SectorIndex
|
||||
stor *stores.Local
|
||||
spt abi.RegisteredSealProof
|
||||
}
|
||||
|
||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
if allocate != storiface.FTNone {
|
||||
return storiface.SectorPaths{}, nil, xerrors.New("read-only storage")
|
||||
}
|
||||
|
||||
ssize, err := l.spt.SectorSize()
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to determine sector size: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// use TryLock to avoid blocking
|
||||
locked, err := l.index.StorageTryLock(ctx, id, existing, storiface.FTNone)
|
||||
locked, err := l.index.StorageTryLock(ctx, id.ID, existing, storiface.FTNone)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
@ -40,7 +34,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock")
|
||||
}
|
||||
|
||||
p, _, err := l.stor.AcquireSector(ctx, id, ssize, existing, allocate, sealing, storiface.AcquireMove)
|
||||
p, _, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing, storiface.AcquireMove)
|
||||
|
||||
return p, cancel, err
|
||||
}
|
||||
|
23
extern/sector-storage/sched.go
vendored
23
extern/sector-storage/sched.go
vendored
@ -11,6 +11,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -51,8 +52,6 @@ type WorkerSelector interface {
|
||||
}
|
||||
|
||||
type scheduler struct {
|
||||
spt abi.RegisteredSealProof
|
||||
|
||||
workersLk sync.RWMutex
|
||||
workers map[WorkerID]*workerHandle
|
||||
|
||||
@ -122,7 +121,7 @@ type activeResources struct {
|
||||
}
|
||||
|
||||
type workerRequest struct {
|
||||
sector abi.SectorID
|
||||
sector storage.SectorRef
|
||||
taskType sealtasks.TaskType
|
||||
priority int // larger values more important
|
||||
sel WorkerSelector
|
||||
@ -143,10 +142,8 @@ type workerResponse struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
||||
func newScheduler() *scheduler {
|
||||
return &scheduler{
|
||||
spt: spt,
|
||||
|
||||
workers: map[WorkerID]*workerHandle{},
|
||||
|
||||
schedule: make(chan *workerRequest),
|
||||
@ -168,7 +165,7 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
||||
}
|
||||
}
|
||||
|
||||
func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error {
|
||||
func (sh *scheduler) Schedule(ctx context.Context, sector storage.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error {
|
||||
ret := make(chan workerResponse)
|
||||
|
||||
select {
|
||||
@ -315,7 +312,7 @@ func (sh *scheduler) diag() SchedDiagInfo {
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
|
||||
out.Requests = append(out.Requests, SchedDiagRequestInfo{
|
||||
Sector: task.sector,
|
||||
Sector: task.sector.ID,
|
||||
TaskType: task.taskType,
|
||||
Priority: task.priority,
|
||||
})
|
||||
@ -378,7 +375,7 @@ func (sh *scheduler) trySched() {
|
||||
}()
|
||||
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
needRes := ResourceTable[task.taskType][sh.spt]
|
||||
needRes := ResourceTable[task.taskType][task.sector.ProofType]
|
||||
|
||||
task.indexHeap = sqi
|
||||
for wnd, windowRequest := range sh.openWindows {
|
||||
@ -400,7 +397,7 @@ func (sh *scheduler) trySched() {
|
||||
}
|
||||
|
||||
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
|
||||
ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker)
|
||||
ok, err := task.sel.Ok(rpcCtx, task.taskType, task.sector.ProofType, worker)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Errorf("trySched(1) req.sel.Ok error: %+v", err)
|
||||
@ -456,21 +453,21 @@ func (sh *scheduler) trySched() {
|
||||
|
||||
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
needRes := ResourceTable[task.taskType][sh.spt]
|
||||
needRes := ResourceTable[task.taskType][task.sector.ProofType]
|
||||
|
||||
selectedWindow := -1
|
||||
for _, wnd := range acceptableWindows[task.indexHeap] {
|
||||
wid := sh.openWindows[wnd].worker
|
||||
wr := sh.workers[wid].info.Resources
|
||||
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd)
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", wr) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.Number, task.taskType, wnd)
|
||||
log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.ID.Number, task.taskType, wnd)
|
||||
|
||||
windows[wnd].allocated.add(wr, needRes)
|
||||
// TODO: We probably want to re-sort acceptableWindows here based on new
|
||||
|
59
extern/sector-storage/sched_test.go
vendored
59
extern/sector-storage/sched_test.go
vendored
@ -47,55 +47,55 @@ type schedTestWorker struct {
|
||||
session uuid.UUID
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) Remove(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) NewSector(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) Fetch(ctx context.Context, id storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@ -165,8 +165,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
||||
}
|
||||
|
||||
func TestSchedStartStop(t *testing.T) {
|
||||
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||
sched := newScheduler(spt)
|
||||
sched := newScheduler()
|
||||
go sched.runSched()
|
||||
|
||||
addTestWorker(t, sched, stores.NewIndex(), "fred", nil)
|
||||
@ -211,12 +210,15 @@ func TestSched(t *testing.T) {
|
||||
go func() {
|
||||
defer rm.wg.Done()
|
||||
|
||||
sectorNum := abi.SectorID{
|
||||
Miner: 8,
|
||||
Number: sid,
|
||||
sectorRef := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: 8,
|
||||
Number: sid,
|
||||
},
|
||||
ProofType: spt,
|
||||
}
|
||||
|
||||
err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error {
|
||||
err := sched.Schedule(ctx, sectorRef, taskType, sel, func(ctx context.Context, w Worker) error {
|
||||
wi, err := w.Info(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -286,7 +288,7 @@ func TestSched(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
index := stores.NewIndex()
|
||||
|
||||
sched := newScheduler(spt)
|
||||
sched := newScheduler()
|
||||
sched.testSync = make(chan struct{})
|
||||
|
||||
go sched.runSched()
|
||||
@ -518,7 +520,6 @@ func (s slowishSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b
|
||||
var _ WorkerSelector = slowishSelector(true)
|
||||
|
||||
func BenchmarkTrySched(b *testing.B) {
|
||||
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
defer logging.SetAllLoggers(logging.LevelDebug)
|
||||
ctx := context.Background()
|
||||
@ -528,7 +529,7 @@ func BenchmarkTrySched(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
||||
sched := newScheduler(spt)
|
||||
sched := newScheduler()
|
||||
sched.workers[WorkerID{}] = &workerHandle{
|
||||
workerRpc: nil,
|
||||
info: storiface.WorkerInfo{
|
||||
@ -568,9 +569,8 @@ func BenchmarkTrySched(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestWindowCompact(t *testing.T) {
|
||||
sh := scheduler{
|
||||
spt: abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
}
|
||||
sh := scheduler{}
|
||||
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||
|
||||
test := func(start [][]sealtasks.TaskType, expect [][]sealtasks.TaskType) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
@ -584,8 +584,11 @@ func TestWindowCompact(t *testing.T) {
|
||||
window := &schedWindow{}
|
||||
|
||||
for _, task := range windowTasks {
|
||||
window.todo = append(window.todo, &workerRequest{taskType: task})
|
||||
window.allocated.add(wh.info.Resources, ResourceTable[task][sh.spt])
|
||||
window.todo = append(window.todo, &workerRequest{
|
||||
taskType: task,
|
||||
sector: storage.SectorRef{ProofType: spt},
|
||||
})
|
||||
window.allocated.add(wh.info.Resources, ResourceTable[task][spt])
|
||||
}
|
||||
|
||||
wh.activeWindows = append(wh.activeWindows, window)
|
||||
@ -604,7 +607,7 @@ func TestWindowCompact(t *testing.T) {
|
||||
|
||||
for ti, task := range tasks {
|
||||
require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti)
|
||||
expectRes.add(wh.info.Resources, ResourceTable[task][sh.spt])
|
||||
expectRes.add(wh.info.Resources, ResourceTable[task][spt])
|
||||
}
|
||||
|
||||
require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi)
|
||||
|
8
extern/sector-storage/sched_worker.go
vendored
8
extern/sector-storage/sched_worker.go
vendored
@ -294,7 +294,7 @@ func (sw *schedWorker) workerCompactWindows() {
|
||||
var moved []int
|
||||
|
||||
for ti, todo := range window.todo {
|
||||
needRes := ResourceTable[todo.taskType][sw.sched.spt]
|
||||
needRes := ResourceTable[todo.taskType][todo.sector.ProofType]
|
||||
if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) {
|
||||
continue
|
||||
}
|
||||
@ -350,7 +350,7 @@ assignLoop:
|
||||
|
||||
worker.lk.Lock()
|
||||
for t, todo := range firstWindow.todo {
|
||||
needRes := ResourceTable[todo.taskType][sw.sched.spt]
|
||||
needRes := ResourceTable[todo.taskType][todo.sector.ProofType]
|
||||
if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) {
|
||||
tidx = t
|
||||
break
|
||||
@ -364,7 +364,7 @@ assignLoop:
|
||||
|
||||
todo := firstWindow.todo[tidx]
|
||||
|
||||
log.Debugf("assign worker sector %d", todo.sector.Number)
|
||||
log.Debugf("assign worker sector %d", todo.sector.ID.Number)
|
||||
err := sw.startProcessingTask(sw.taskDone, todo)
|
||||
|
||||
if err != nil {
|
||||
@ -389,7 +389,7 @@ assignLoop:
|
||||
func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRequest) error {
|
||||
w, sh := sw.worker, sw.sched
|
||||
|
||||
needRes := ResourceTable[req.taskType][sh.spt]
|
||||
needRes := ResourceTable[req.taskType][req.sector.ProofType]
|
||||
|
||||
w.lk.Lock()
|
||||
w.preparing.add(w.info.Resources, needRes)
|
||||
|
2
extern/sector-storage/stats.go
vendored
2
extern/sector-storage/stats.go
vendored
@ -46,7 +46,7 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob {
|
||||
for _, request := range window.todo {
|
||||
out[uuid.UUID(id)] = append(out[uuid.UUID(id)], storiface.WorkerJob{
|
||||
ID: storiface.UndefCall,
|
||||
Sector: request.sector,
|
||||
Sector: request.sector.ID,
|
||||
Task: request.taskType,
|
||||
RunWait: wi + 1,
|
||||
Start: request.start,
|
||||
|
9
extern/sector-storage/stores/http_handler.go
vendored
9
extern/sector-storage/stores/http_handler.go
vendored
@ -12,6 +12,8 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
|
||||
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
var log = logging.Logger("stores")
|
||||
@ -73,7 +75,12 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
|
||||
// The caller has a lock on this sector already, no need to get one here
|
||||
|
||||
// passing 0 spt because we don't allocate anything
|
||||
paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
si := storage.SectorRef{
|
||||
ID: id,
|
||||
ProofType: 0,
|
||||
}
|
||||
|
||||
paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
|
6
extern/sector-storage/stores/interface.go
vendored
6
extern/sector-storage/stores/interface.go
vendored
@ -5,12 +5,14 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error)
|
||||
AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error)
|
||||
Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error
|
||||
|
||||
// like remove, but doesn't remove the primary sector copy, nor the last
|
||||
@ -18,7 +20,7 @@ type Store interface {
|
||||
RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error
|
||||
|
||||
// move sectors into storage
|
||||
MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error
|
||||
MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error
|
||||
|
||||
FsStat(ctx context.Context, id ID) (fsutil.FsStat, error)
|
||||
}
|
||||
|
31
extern/sector-storage/stores/local.go
vendored
31
extern/sector-storage/stores/local.go
vendored
@ -14,6 +14,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -325,7 +326,12 @@ func (st *Local) reportStorage(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) {
|
||||
func (st *Local) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) {
|
||||
ssize, err := sid.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st.localLk.Lock()
|
||||
|
||||
done := func() {}
|
||||
@ -375,11 +381,16 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector
|
||||
return done, nil
|
||||
}
|
||||
|
||||
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
func (st *Local) AcquireSector(ctx context.Context, sid storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
|
||||
ssize, err := sid.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, err
|
||||
}
|
||||
|
||||
st.localLk.RLock()
|
||||
defer st.localLk.RUnlock()
|
||||
|
||||
@ -391,7 +402,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
||||
continue
|
||||
}
|
||||
|
||||
si, err := st.index.StorageFindSector(ctx, sid, fileType, ssize, false)
|
||||
si, err := st.index.StorageFindSector(ctx, sid.ID, fileType, ssize, false)
|
||||
if err != nil {
|
||||
log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err)
|
||||
continue
|
||||
@ -407,7 +418,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
||||
continue
|
||||
}
|
||||
|
||||
spath := p.sectorPath(sid, fileType)
|
||||
spath := p.sectorPath(sid.ID, fileType)
|
||||
storiface.SetPathByType(&out, fileType, spath)
|
||||
storiface.SetPathByType(&storageIDs, fileType, string(info.ID))
|
||||
|
||||
@ -449,7 +460,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
||||
|
||||
// TODO: Check free space
|
||||
|
||||
best = p.sectorPath(sid, fileType)
|
||||
best = p.sectorPath(sid.ID, fileType)
|
||||
bestID = si.ID
|
||||
break
|
||||
}
|
||||
@ -578,13 +589,13 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storifa
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error {
|
||||
dest, destIds, err := st.AcquireSector(ctx, s, ssize, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove)
|
||||
func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error {
|
||||
dest, destIds, err := st.AcquireSector(ctx, s, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire dest storage: %w", err)
|
||||
}
|
||||
|
||||
src, srcIds, err := st.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
src, srcIds, err := st.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire src storage: %w", err)
|
||||
}
|
||||
@ -616,7 +627,7 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect
|
||||
|
||||
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
|
||||
|
||||
if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s, fileType); err != nil {
|
||||
if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s.ID, fileType); err != nil {
|
||||
return xerrors.Errorf("dropping source sector from index: %w", err)
|
||||
}
|
||||
|
||||
@ -625,7 +636,7 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect
|
||||
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
||||
}
|
||||
|
||||
if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s, fileType, true); err != nil {
|
||||
if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s.ID, fileType, true); err != nil {
|
||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(storiface.PathByType(destIds, fileType)), err)
|
||||
}
|
||||
}
|
||||
|
27
extern/sector-storage/stores/remote.go
vendored
27
extern/sector-storage/stores/remote.go
vendored
@ -19,6 +19,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
@ -58,7 +59,7 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
func (r *Remote) AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
@ -66,9 +67,9 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
for {
|
||||
r.fetchLk.Lock()
|
||||
|
||||
c, locked := r.fetching[s]
|
||||
c, locked := r.fetching[s.ID]
|
||||
if !locked {
|
||||
r.fetching[s] = make(chan struct{})
|
||||
r.fetching[s.ID] = make(chan struct{})
|
||||
r.fetchLk.Unlock()
|
||||
break
|
||||
}
|
||||
@ -85,12 +86,12 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
|
||||
defer func() {
|
||||
r.fetchLk.Lock()
|
||||
close(r.fetching[s])
|
||||
delete(r.fetching, s)
|
||||
close(r.fetching[s.ID])
|
||||
delete(r.fetching, s.ID)
|
||||
r.fetchLk.Unlock()
|
||||
}()
|
||||
|
||||
paths, stores, err := r.local.AcquireSector(ctx, s, ssize, existing, allocate, pathType, op)
|
||||
paths, stores, err := r.local.AcquireSector(ctx, s, existing, allocate, pathType, op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("local acquire error: %w", err)
|
||||
}
|
||||
@ -106,7 +107,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
}
|
||||
}
|
||||
|
||||
apaths, ids, err := r.local.AcquireSector(ctx, s, ssize, storiface.FTNone, toFetch, pathType, op)
|
||||
apaths, ids, err := r.local.AcquireSector(ctx, s, storiface.FTNone, toFetch, pathType, op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err)
|
||||
}
|
||||
@ -116,7 +117,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
odt = storiface.FsOverheadFinalized
|
||||
}
|
||||
|
||||
releaseStorage, err := r.local.Reserve(ctx, s, ssize, toFetch, ids, odt)
|
||||
releaseStorage, err := r.local.Reserve(ctx, s, toFetch, ids, odt)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err)
|
||||
}
|
||||
@ -134,7 +135,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
dest := storiface.PathByType(apaths, fileType)
|
||||
storageID := storiface.PathByType(ids, fileType)
|
||||
|
||||
url, err := r.acquireFromRemote(ctx, s, fileType, dest)
|
||||
url, err := r.acquireFromRemote(ctx, s.ID, fileType, dest)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, err
|
||||
}
|
||||
@ -142,7 +143,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
storiface.SetPathByType(&paths, fileType, dest)
|
||||
storiface.SetPathByType(&stores, fileType, storageID)
|
||||
|
||||
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == storiface.AcquireMove); err != nil {
|
||||
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s.ID, fileType, op == storiface.AcquireMove); err != nil {
|
||||
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
||||
continue
|
||||
}
|
||||
@ -281,14 +282,14 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error {
|
||||
func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error {
|
||||
// Make sure we have the data local
|
||||
_, _, err := r.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
_, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire src storage (remote): %w", err)
|
||||
}
|
||||
|
||||
return r.local.MoveStorage(ctx, s, ssize, types)
|
||||
return r.local.MoveStorage(ctx, s, types)
|
||||
}
|
||||
|
||||
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error {
|
||||
|
22
extern/sector-storage/storiface/worker.go
vendored
22
extern/sector-storage/storiface/worker.go
vendored
@ -77,17 +77,17 @@ var _ fmt.Stringer = &CallID{}
|
||||
var UndefCall CallID
|
||||
|
||||
type WorkerCalls interface {
|
||||
AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
|
||||
SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
|
||||
SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (CallID, error)
|
||||
SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error)
|
||||
SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (CallID, error)
|
||||
FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (CallID, error)
|
||||
ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (CallID, error)
|
||||
MoveStorage(ctx context.Context, sector abi.SectorID, types SectorFileType) (CallID, error)
|
||||
UnsealPiece(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
|
||||
ReadPiece(context.Context, io.Writer, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error)
|
||||
Fetch(context.Context, abi.SectorID, SectorFileType, PathType, AcquireMode) (CallID, error)
|
||||
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
|
||||
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
|
||||
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (CallID, error)
|
||||
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error)
|
||||
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error)
|
||||
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error)
|
||||
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error)
|
||||
MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error)
|
||||
UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
|
||||
ReadPiece(context.Context, io.Writer, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error)
|
||||
Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error)
|
||||
}
|
||||
|
||||
type WorkerReturn interface {
|
||||
|
22
extern/sector-storage/teststorage_test.go
vendored
22
extern/sector-storage/teststorage_test.go
vendored
@ -31,50 +31,50 @@ func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID,
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||
func (t *testExec) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
func (t *testExec) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
func (t *testExec) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||
func (t *testExec) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||
func (t *testExec) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||
func (t *testExec) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||
resp := make(chan apres)
|
||||
t.apch <- resp
|
||||
ar := <-resp
|
||||
return ar.pi, ar.err
|
||||
}
|
||||
|
||||
func (t *testExec) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
func (t *testExec) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
17
extern/sector-storage/testworker_test.go
vendored
17
extern/sector-storage/testworker_test.go
vendored
@ -31,11 +31,6 @@ type testWorker struct {
|
||||
}
|
||||
|
||||
func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerReturn) *testWorker {
|
||||
ssize, err := wcfg.SealProof.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
acceptTasks := map[sealtasks.TaskType]struct{}{}
|
||||
for _, taskType := range wcfg.TaskTypes {
|
||||
acceptTasks[taskType] = struct{}{}
|
||||
@ -46,15 +41,15 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerR
|
||||
lstor: lstor,
|
||||
ret: ret,
|
||||
|
||||
mockSeal: mock.NewMockSectorMgr(ssize, nil),
|
||||
mockSeal: mock.NewMockSectorMgr(nil),
|
||||
|
||||
session: uuid.New(),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallID)) (storiface.CallID, error) {
|
||||
func (t *testWorker) asyncCall(sector storage.SectorRef, work func(ci storiface.CallID)) (storiface.CallID, error) {
|
||||
ci := storiface.CallID{
|
||||
Sector: sector,
|
||||
Sector: sector.ID,
|
||||
ID: uuid.New(),
|
||||
}
|
||||
|
||||
@ -63,7 +58,7 @@ func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallI
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||
p, err := t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||
if err := t.ret.ReturnAddPiece(ctx, ci, p, errstr(err)); err != nil {
|
||||
@ -72,7 +67,7 @@ func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSiz
|
||||
})
|
||||
}
|
||||
|
||||
func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||
t.pc1s++
|
||||
|
||||
@ -90,7 +85,7 @@ func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ti
|
||||
})
|
||||
}
|
||||
|
||||
func (t *testWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (t *testWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||
if err := t.ret.ReturnFetch(ctx, ci, ""); err != nil {
|
||||
log.Error(err)
|
||||
|
67
extern/sector-storage/worker_local.go
vendored
67
extern/sector-storage/worker_local.go
vendored
@ -20,7 +20,7 @@ import (
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
storage "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
@ -31,7 +31,6 @@ import (
|
||||
var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SealProof abi.RegisteredSealProof
|
||||
TaskTypes []sealtasks.TaskType
|
||||
NoSwap bool
|
||||
}
|
||||
@ -40,7 +39,6 @@ type WorkerConfig struct {
|
||||
type ExecutorFunc func() (ffiwrapper.Storage, error)
|
||||
|
||||
type LocalWorker struct {
|
||||
scfg *ffiwrapper.Config
|
||||
storage stores.Store
|
||||
localStore *stores.Local
|
||||
sindex stores.SectorIndex
|
||||
@ -64,9 +62,6 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store
|
||||
}
|
||||
|
||||
w := &LocalWorker{
|
||||
scfg: &ffiwrapper.Config{
|
||||
SealProofType: wcfg.SealProof,
|
||||
},
|
||||
storage: store,
|
||||
localStore: local,
|
||||
sindex: sindex,
|
||||
@ -119,18 +114,13 @@ type localWorkerPathProvider struct {
|
||||
op storiface.AcquireMode
|
||||
}
|
||||
|
||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
ssize, err := l.w.scfg.SealProofType.SectorSize()
|
||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing, l.op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, ssize, existing, allocate, sealing, l.op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, ssize, allocate, storageIDs, storiface.FSOverheadSeal)
|
||||
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
|
||||
}
|
||||
@ -147,7 +137,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.
|
||||
|
||||
sid := storiface.PathByType(storageIDs, fileType)
|
||||
|
||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == storiface.AcquireMove); err != nil {
|
||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil {
|
||||
log.Errorf("declare sector error: %+v", err)
|
||||
}
|
||||
}
|
||||
@ -155,7 +145,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.
|
||||
}
|
||||
|
||||
func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) {
|
||||
return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg)
|
||||
return ffiwrapper.New(&localWorkerPathProvider{w: l})
|
||||
}
|
||||
|
||||
type ReturnType string
|
||||
@ -222,9 +212,9 @@ var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storifac
|
||||
Fetch: rfunc(storiface.WorkerReturn.ReturnFetch),
|
||||
}
|
||||
|
||||
func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) {
|
||||
ci := storiface.CallID{
|
||||
Sector: sector,
|
||||
Sector: sector.ID,
|
||||
ID: uuid.New(),
|
||||
}
|
||||
|
||||
@ -297,7 +287,7 @@ func errstr(err error) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (l *LocalWorker) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -306,7 +296,7 @@ func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error
|
||||
return sb.NewSector(ctx, sector)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) AddPiece(ctx context.Context, sector storage.SectorRef, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -317,7 +307,7 @@ func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return l.asyncCall(ctx, sector, Fetch, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||
_, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, storiface.FTNone, ptype)
|
||||
if err == nil {
|
||||
@ -328,16 +318,16 @@ func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType s
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return l.asyncCall(ctx, sector, SealPreCommit1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||
|
||||
{
|
||||
// cleanup previous failed attempts if they exist
|
||||
if err := l.storage.Remove(ctx, sector, storiface.FTSealed, true); err != nil {
|
||||
if err := l.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); err != nil {
|
||||
return nil, xerrors.Errorf("cleaning up sealed data: %w", err)
|
||||
}
|
||||
|
||||
if err := l.storage.Remove(ctx, sector, storiface.FTCache, true); err != nil {
|
||||
if err := l.storage.Remove(ctx, sector.ID, storiface.FTCache, true); err != nil {
|
||||
return nil, xerrors.Errorf("cleaning up cache data: %w", err)
|
||||
}
|
||||
}
|
||||
@ -351,7 +341,7 @@ func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, t
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -362,7 +352,7 @@ func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, p
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -373,7 +363,7 @@ func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, tick
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -384,7 +374,7 @@ func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phas
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -396,7 +386,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k
|
||||
}
|
||||
|
||||
if len(keepUnsealed) == 0 {
|
||||
if err := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); err != nil {
|
||||
if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); err != nil {
|
||||
return nil, xerrors.Errorf("removing unsealed data: %w", err)
|
||||
}
|
||||
}
|
||||
@ -405,7 +395,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
return storiface.UndefCall, xerrors.Errorf("implement me")
|
||||
}
|
||||
|
||||
@ -425,18 +415,13 @@ func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
return l.asyncCall(ctx, sector, MoveStorage, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||
ssize, err := l.scfg.SealProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, l.storage.MoveStorage(ctx, sector, ssize, types)
|
||||
return nil, l.storage.MoveStorage(ctx, sector, types)
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -447,11 +432,11 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde
|
||||
return nil, xerrors.Errorf("unsealing sector: %w", err)
|
||||
}
|
||||
|
||||
if err = l.storage.RemoveCopies(ctx, sector, storiface.FTSealed); err != nil {
|
||||
if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTSealed); err != nil {
|
||||
return nil, xerrors.Errorf("removing source data: %w", err)
|
||||
}
|
||||
|
||||
if err = l.storage.RemoveCopies(ctx, sector, storiface.FTCache); err != nil {
|
||||
if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTCache); err != nil {
|
||||
return nil, xerrors.Errorf("removing source data: %w", err)
|
||||
}
|
||||
|
||||
@ -459,7 +444,7 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
|
22
extern/sector-storage/worker_tracked.go
vendored
22
extern/sector-storage/worker_tracked.go
vendored
@ -42,7 +42,7 @@ func (wt *workTracker) onDone(callID storiface.CallID) {
|
||||
delete(wt.running, callID)
|
||||
}
|
||||
|
||||
func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) {
|
||||
func (wt *workTracker) track(wid WorkerID, sid storage.SectorRef, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) {
|
||||
return func(callID storiface.CallID, err error) (storiface.CallID, error) {
|
||||
if err != nil {
|
||||
return callID, err
|
||||
@ -60,7 +60,7 @@ func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.Task
|
||||
wt.running[callID] = trackedWork{
|
||||
job: storiface.WorkerJob{
|
||||
ID: callID,
|
||||
Sector: sid,
|
||||
Sector: sid.ID,
|
||||
Task: task,
|
||||
Start: time.Now(),
|
||||
},
|
||||
@ -99,39 +99,39 @@ type trackedWorker struct {
|
||||
tracker *workTracker
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) Fetch(ctx context.Context, s storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size))
|
||||
}
|
||||
|
||||
|
14
extern/storage-sealing/checks.go
vendored
14
extern/storage-sealing/checks.go
vendored
@ -14,7 +14,6 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
)
|
||||
|
||||
@ -166,23 +165,14 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
|
||||
return &ErrBadSeed{xerrors.Errorf("seed has changed")}
|
||||
}
|
||||
|
||||
ss, err := m.api.StateMinerSectorSize(ctx, m.maddr, tok)
|
||||
if err != nil {
|
||||
return &ErrApi{err}
|
||||
}
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *si.CommR != pci.Info.SealedCID {
|
||||
log.Warn("on-chain sealed CID doesn't match!")
|
||||
}
|
||||
|
||||
ok, err := m.verif.VerifySeal(proof2.SealVerifyInfo{
|
||||
SectorID: m.minerSector(si.SectorNumber),
|
||||
SectorID: m.minerSectorID(si.SectorNumber),
|
||||
SealedCID: pci.Info.SealedCID,
|
||||
SealProof: spt,
|
||||
SealProof: pci.Info.SealProof,
|
||||
Proof: proof,
|
||||
Randomness: si.TicketValue,
|
||||
InteractiveRandomness: si.SeedValue,
|
||||
|
15
extern/storage-sealing/fsm.go
vendored
15
extern/storage-sealing/fsm.go
vendored
@ -267,7 +267,7 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
|
||||
*/
|
||||
|
||||
m.stats.updateSector(m.minerSector(state.SectorNumber), state.State)
|
||||
m.stats.updateSector(m.minerSectorID(state.SectorNumber), state.State)
|
||||
|
||||
switch state.State {
|
||||
// Happy path
|
||||
@ -394,6 +394,15 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||
return xerrors.Errorf("getting the sealing delay: %w", err)
|
||||
}
|
||||
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting current seal proof: %w", err)
|
||||
}
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
defer m.unsealedInfoMap.lk.Unlock()
|
||||
for _, sector := range trackedSectors {
|
||||
@ -408,7 +417,9 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||
// something's funky here, but probably safe to move on
|
||||
log.Warnf("sector %v was already in the unsealedInfoMap when restarting", sector.SectorNumber)
|
||||
} else {
|
||||
ui := UnsealedSectorInfo{}
|
||||
ui := UnsealedSectorInfo{
|
||||
ssize: ssize,
|
||||
}
|
||||
for _, p := range sector.Pieces {
|
||||
if p.DealInfo != nil {
|
||||
ui.numDeals++
|
||||
|
21
extern/storage-sealing/garbage.go
vendored
21
extern/storage-sealing/garbage.go
vendored
@ -6,9 +6,10 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) {
|
||||
func (m *Sealing) pledgeSector(ctx context.Context, sectorID storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) {
|
||||
if len(sizes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@ -47,21 +48,31 @@ func (m *Sealing) PledgeSector() error {
|
||||
// this, as we run everything here async, and it's cancelled when the
|
||||
// command exits
|
||||
|
||||
size := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded()
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
size, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
sid, err := m.sc.Next()
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
sectorID := m.minerSector(sid)
|
||||
sectorID := m.minerSector(spt, sid)
|
||||
err = m.sealer.NewSector(ctx, sectorID)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
pieces, err := m.pledgeSector(ctx, sectorID, []abi.UnpaddedPieceSize{}, size)
|
||||
pieces, err := m.pledgeSector(ctx, sectorID, []abi.UnpaddedPieceSize{}, abi.PaddedPieceSize(size).Unpadded())
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
@ -75,7 +86,7 @@ func (m *Sealing) PledgeSector() error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := m.newSectorCC(sid, ps); err != nil {
|
||||
if err := m.newSectorCC(ctx, sid, ps); err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
101
extern/storage-sealing/sealing.go
vendored
101
extern/storage-sealing/sealing.go
vendored
@ -8,13 +8,15 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
padreader "github.com/filecoin-project/go-padreader"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -53,6 +55,7 @@ type SealingAPI interface {
|
||||
StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error)
|
||||
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
||||
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
|
||||
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
|
||||
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
|
||||
@ -105,6 +108,7 @@ type UnsealedSectorInfo struct {
|
||||
// stored should always equal sum of pieceSizes.Padded()
|
||||
stored abi.PaddedPieceSize
|
||||
pieceSizes []abi.UnpaddedPieceSize
|
||||
ssize abi.SectorSize
|
||||
}
|
||||
|
||||
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee) *Sealing {
|
||||
@ -151,19 +155,30 @@ func (m *Sealing) Run(ctx context.Context) error {
|
||||
func (m *Sealing) Stop(ctx context.Context) error {
|
||||
return m.sectors.Stop(ctx)
|
||||
}
|
||||
|
||||
func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
log.Infof("Adding piece for deal %d (publish msg: %s)", d.DealID, d.PublishCid)
|
||||
if (padreader.PaddedSize(uint64(size))) != size {
|
||||
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
|
||||
}
|
||||
|
||||
if size > abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() {
|
||||
sp, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
|
||||
ssize, err := sp.SectorSize()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
if size > abi.PaddedPieceSize(ssize).Unpadded() {
|
||||
return 0, 0, xerrors.Errorf("piece cannot fit into a sector")
|
||||
}
|
||||
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
|
||||
sid, pads, err := m.getSectorAndPadding(size)
|
||||
sid, pads, err := m.getSectorAndPadding(ctx, size)
|
||||
if err != nil {
|
||||
m.unsealedInfoMap.lk.Unlock()
|
||||
return 0, 0, xerrors.Errorf("getting available sector: %w", err)
|
||||
@ -185,7 +200,7 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec
|
||||
return 0, 0, xerrors.Errorf("adding piece to sector: %w", err)
|
||||
}
|
||||
|
||||
startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(m.sealer.SectorSize())
|
||||
startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(ssize)
|
||||
|
||||
m.unsealedInfoMap.lk.Unlock()
|
||||
|
||||
@ -201,7 +216,16 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec
|
||||
// Caller should hold m.unsealedInfoMap.lk
|
||||
func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size abi.UnpaddedPieceSize, r io.Reader, di *DealInfo) error {
|
||||
log.Infof("Adding piece to sector %d", sectorID)
|
||||
ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r)
|
||||
sp, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
ssize, err := sp.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sp, sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("writing piece: %w", err)
|
||||
}
|
||||
@ -224,6 +248,7 @@ func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size
|
||||
numDeals: num,
|
||||
stored: ui.stored + piece.Piece.Size,
|
||||
pieceSizes: append(ui.pieceSizes, piece.Piece.Size.Unpadded()),
|
||||
ssize: ssize,
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -257,16 +282,16 @@ func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error {
|
||||
}
|
||||
|
||||
// Caller should hold m.unsealedInfoMap.lk
|
||||
func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) {
|
||||
ss := abi.PaddedPieceSize(m.sealer.SectorSize())
|
||||
func (m *Sealing) getSectorAndPadding(ctx context.Context, size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) {
|
||||
for k, v := range m.unsealedInfoMap.infos {
|
||||
pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded())
|
||||
if v.stored+size.Padded()+padLength <= ss {
|
||||
|
||||
if v.stored+size.Padded()+padLength <= abi.PaddedPieceSize(v.ssize) {
|
||||
return k, pads, nil
|
||||
}
|
||||
}
|
||||
|
||||
ns, err := m.newDealSector()
|
||||
ns, ssize, err := m.newDealSector(ctx)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
@ -275,23 +300,24 @@ func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNum
|
||||
numDeals: 0,
|
||||
stored: 0,
|
||||
pieceSizes: nil,
|
||||
ssize: ssize,
|
||||
}
|
||||
|
||||
return ns, nil, nil
|
||||
}
|
||||
|
||||
// newDealSector creates a new sector for deal storage
|
||||
func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
|
||||
func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.SectorSize, error) {
|
||||
// First make sure we don't have too many 'open' sectors
|
||||
|
||||
cfg, err := m.getConfig()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting config: %w", err)
|
||||
return 0, 0, xerrors.Errorf("getting config: %w", err)
|
||||
}
|
||||
|
||||
if cfg.MaxSealingSectorsForDeals > 0 {
|
||||
if m.stats.curSealing() > cfg.MaxSealingSectorsForDeals {
|
||||
return 0, ErrTooManySectorsSealing
|
||||
return 0, 0, ErrTooManySectorsSealing
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,36 +364,36 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
|
||||
}
|
||||
}
|
||||
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
|
||||
// Now actually create a new sector
|
||||
|
||||
sid, err := m.sc.Next()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting sector number: %w", err)
|
||||
return 0, 0, xerrors.Errorf("getting sector number: %w", err)
|
||||
}
|
||||
|
||||
err = m.sealer.NewSector(context.TODO(), m.minerSector(sid))
|
||||
err = m.sealer.NewSector(context.TODO(), m.minerSector(spt, sid))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("initializing sector: %w", err)
|
||||
}
|
||||
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize())
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("bad sector size: %w", err)
|
||||
return 0, 0, xerrors.Errorf("initializing sector: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Creating sector %d", sid)
|
||||
err = m.sectors.Send(uint64(sid), SectorStart{
|
||||
ID: sid,
|
||||
SectorType: rt,
|
||||
SectorType: spt,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("starting the sector fsm: %w", err)
|
||||
return 0, 0, xerrors.Errorf("starting the sector fsm: %w", err)
|
||||
}
|
||||
|
||||
cf, err := m.getConfig()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting the sealing delay: %w", err)
|
||||
return 0, 0, xerrors.Errorf("getting the sealing delay: %w", err)
|
||||
}
|
||||
|
||||
if cf.WaitDealsDelay > 0 {
|
||||
@ -380,25 +406,42 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
|
||||
}()
|
||||
}
|
||||
|
||||
return sid, nil
|
||||
ssize, err := spt.SectorSize()
|
||||
return sid, ssize, err
|
||||
}
|
||||
|
||||
// newSectorCC accepts a slice of pieces with no deal (junk data)
|
||||
func (m *Sealing) newSectorCC(sid abi.SectorNumber, pieces []Piece) error {
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize())
|
||||
func (m *Sealing) newSectorCC(ctx context.Context, sid abi.SectorNumber, pieces []Piece) error {
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("bad sector size: %w", err)
|
||||
return xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Creating CC sector %d", sid)
|
||||
return m.sectors.Send(uint64(sid), SectorStartCC{
|
||||
ID: sid,
|
||||
Pieces: pieces,
|
||||
SectorType: rt,
|
||||
SectorType: spt,
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSector(num abi.SectorNumber) abi.SectorID {
|
||||
func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) {
|
||||
mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return mi.SealProofType, nil
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSector(spt abi.RegisteredSealProof, num abi.SectorNumber) storage.SectorRef {
|
||||
return storage.SectorRef{
|
||||
ID: m.minerSectorID(num),
|
||||
ProofType: spt,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSectorID(num abi.SectorNumber) abi.SectorID {
|
||||
mid, err := address.IDFromAddress(m.maddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
2
extern/storage-sealing/states_proving.go
vendored
2
extern/storage-sealing/states_proving.go
vendored
@ -32,7 +32,7 @@ func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInf
|
||||
}
|
||||
|
||||
func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorNumber)); err != nil {
|
||||
if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
|
||||
return ctx.Send(SectorRemoveFailed{err})
|
||||
}
|
||||
|
||||
|
21
extern/storage-sealing/states_sealing.go
vendored
21
extern/storage-sealing/states_sealing.go
vendored
@ -31,7 +31,12 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
||||
allocated += piece.Piece.Size.Unpadded()
|
||||
}
|
||||
|
||||
ubytes := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded()
|
||||
ssize, err := sector.SectorType.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ubytes := abi.PaddedPieceSize(ssize).Unpadded()
|
||||
|
||||
if allocated > ubytes {
|
||||
return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes)
|
||||
@ -46,7 +51,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
||||
log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber)
|
||||
}
|
||||
|
||||
fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...)
|
||||
fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err)
|
||||
}
|
||||
@ -148,7 +153,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
|
||||
// process has just restarted and the worker had the result ready)
|
||||
}
|
||||
|
||||
pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
|
||||
pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
|
||||
if err != nil {
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
|
||||
}
|
||||
@ -159,7 +164,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
|
||||
}
|
||||
|
||||
func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error {
|
||||
cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.PreCommit1Out)
|
||||
cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
|
||||
}
|
||||
@ -386,12 +391,12 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
Unsealed: *sector.CommD,
|
||||
Sealed: *sector.CommR,
|
||||
}
|
||||
c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids)
|
||||
c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(1): %w", err)})
|
||||
}
|
||||
|
||||
proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), c2in)
|
||||
proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), c2in)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
|
||||
}
|
||||
@ -492,7 +497,7 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo)
|
||||
func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// TODO: Maybe wait for some finality
|
||||
|
||||
if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil {
|
||||
if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil {
|
||||
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
|
||||
}
|
||||
|
||||
@ -503,7 +508,7 @@ func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInf
|
||||
// TODO: track sector health / expiration
|
||||
log.Infof("Proving sector %d", sector.SectorNumber)
|
||||
|
||||
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil {
|
||||
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
|
10
go.mod
10
go.mod
@ -30,7 +30,7 @@ require (
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||
github.com/filecoin-project/go-data-transfer v1.1.0
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
|
||||
github.com/filecoin-project/go-fil-markets v1.0.4
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49
|
||||
github.com/filecoin-project/go-multistore v0.0.3
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
||||
@ -41,7 +41,7 @@ require (
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||
github.com/filecoin-project/specs-actors v0.9.13
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.0
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||
github.com/go-kit/kit v0.10.0
|
||||
@ -136,9 +136,9 @@ require (
|
||||
go.uber.org/fx v1.9.0
|
||||
go.uber.org/multierr v1.5.0
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.28
|
||||
|
29
go.sum
29
go.sum
@ -255,8 +255,8 @@ github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.4 h1:OGEoNppGcAjzIznDHFb/yy7ypVgHMO2CQZg6E9nViWI=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.4/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335 h1:DF8eu0WdEBnSVdu71+jfT4YMk6fO7AIJk2ZiWd3l15c=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
@ -291,8 +291,8 @@ github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK
|
||||
github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.0 h1:V7lHeF2ylfFi84F4y80u5FE4BpPHYGvB71kLrhXkJto=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.0/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
@ -1497,8 +1497,8 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:
|
||||
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f h1:nMhj+x/m7ZQsHBz0L3gpytp0v6ogokdbrQDnhB8Kh7s=
|
||||
github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I=
|
||||
github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb h1:/7/dQyiKnxAOj9L69FhST7uMe17U015XPzX7cy+5ykM=
|
||||
github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb/go.mod h1:pbNsDSxn1ICiNn9Ct4ZGNrwzfkkwYbx/lw8VuyutFIg=
|
||||
github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo=
|
||||
github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk=
|
||||
github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
|
||||
@ -1506,6 +1506,7 @@ github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZD
|
||||
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
|
||||
github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
|
||||
github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU=
|
||||
@ -1596,6 +1597,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -1670,6 +1672,8 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@ -1687,6 +1691,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1753,11 +1759,15 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c h1:38q6VNPWR010vN82/SB121GujZNIfAUb4YttE2rhGuc=
|
||||
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -1803,6 +1813,8 @@ golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc=
|
||||
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4=
|
||||
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1931,8 +1943,13 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbc
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
|
||||
modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
|
||||
modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM=
|
||||
modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254=
|
||||
modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk=
|
||||
modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk=
|
||||
modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE=
|
||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
specstorage "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
@ -52,9 +53,12 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sid := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
ref := specstorage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SectorType,
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
@ -63,7 +67,7 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi
|
||||
if si.CommD != nil {
|
||||
commD = *si.CommD
|
||||
}
|
||||
err := rpn.sealer.ReadPiece(ctx, w, sid, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD)
|
||||
err := rpn.sealer.ReadPiece(ctx, w, ref, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD)
|
||||
_ = w.CloseWithError(err)
|
||||
}()
|
||||
|
||||
|
@ -167,6 +167,19 @@ func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, miner a
|
||||
return mi.Worker, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, miner address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
mi, err := n.StateMinerInfo(ctx, miner, tsk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return mi.SealProofType, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) {
|
||||
signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK)
|
||||
if err != nil {
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
metricsi "github.com/ipfs/go-metrics-interface"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/exchange"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
@ -339,7 +340,7 @@ func Online() Option {
|
||||
Override(new(stores.SectorIndex), From(new(*stores.Index))),
|
||||
Override(new(dtypes.MinerID), modules.MinerID),
|
||||
Override(new(dtypes.MinerAddress), modules.MinerAddress),
|
||||
Override(new(*ffiwrapper.Config), modules.ProofsConfig),
|
||||
Override(new(abi.RegisteredSealProof), modules.SealProofType),
|
||||
Override(new(stores.LocalStorage), From(new(repo.LockedRepo))),
|
||||
Override(new(sealing.SectorIDCounter), modules.SectorIDCounter),
|
||||
Override(new(*sectorstorage.Manager), modules.SectorStorage),
|
||||
|
@ -43,7 +43,6 @@ import (
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -141,11 +140,6 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
||||
return nil, xerrors.Errorf("failed getting miner's deadline info: %w", err)
|
||||
}
|
||||
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("bad sector size: %w", err)
|
||||
}
|
||||
|
||||
if uint64(params.Data.PieceSize.Padded()) > uint64(mi.SectorSize) {
|
||||
return nil, xerrors.New("data doesn't fit in a sector")
|
||||
}
|
||||
@ -171,7 +165,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
||||
EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart),
|
||||
Price: params.EpochPrice,
|
||||
Collateral: params.ProviderCollateral,
|
||||
Rt: rt,
|
||||
Rt: mi.SealProofType,
|
||||
FastRetrieval: params.FastRetrieval,
|
||||
VerifiedDeal: params.VerifiedDeal,
|
||||
StoreID: storeID,
|
||||
@ -647,7 +641,7 @@ func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Addre
|
||||
|
||||
func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
|
||||
|
||||
// Hard-code the sector size to 32GiB, because:
|
||||
// Hard-code the sector type to 32GiBV1_1, because:
|
||||
// - pieceio.GeneratePieceCommitment requires a RegisteredSealProof
|
||||
// - commP itself is sector-size independent, with rather low probability of that changing
|
||||
// ( note how the final rust call is identical for every RegSP type )
|
||||
@ -655,12 +649,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet
|
||||
//
|
||||
// IF/WHEN this changes in the future we will have to be able to calculate
|
||||
// "old style" commP, and thus will need to introduce a version switch or similar
|
||||
arbitrarySectorSize := abi.SectorSize(32 << 30)
|
||||
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(arbitrarySectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("bad sector size: %w", err)
|
||||
}
|
||||
arbitraryProofType := abi.RegisteredSealProof_StackedDrg32GiBV1_1
|
||||
|
||||
rdr, err := os.Open(inpath)
|
||||
if err != nil {
|
||||
@ -673,7 +662,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commP, pieceSize, err := pieceio.GeneratePieceCommitment(rt, rdr, uint64(stat.Size()))
|
||||
commP, pieceSize, err := pieceio.GeneratePieceCommitment(arbitraryProofType, rdr, uint64(stat.Size()))
|
||||
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("computing commP failed: %w", err)
|
||||
|
@ -123,7 +123,12 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad
|
||||
}
|
||||
|
||||
func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
|
||||
act, err := m.StateManager.LoadActorTsk(ctx, actor, tsk)
|
||||
ts, err := m.Chain.GetTipSetFromKey(tsk)
|
||||
if err != nil {
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to load tipset: %w", err)
|
||||
}
|
||||
|
||||
act, err := m.StateManager.LoadActor(ctx, actor, ts)
|
||||
if err != nil {
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor: %w", err)
|
||||
}
|
||||
@ -133,7 +138,16 @@ func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address,
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err)
|
||||
}
|
||||
|
||||
return mas.Info()
|
||||
// TODO: You know, this is terrible.
|
||||
// I mean, we _really_ shouldn't do this. Maybe we should convert somewhere else?
|
||||
info, err := mas.Info()
|
||||
if err != nil {
|
||||
return miner.MinerInfo{}, err
|
||||
}
|
||||
if m.StateManager.GetNtwkVersion(ctx, ts.Height()) >= network.Version7 && info.SealProofType < abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
|
||||
info.SealProofType += abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) {
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -43,7 +42,6 @@ import (
|
||||
type StorageMinerAPI struct {
|
||||
common.CommonAPI
|
||||
|
||||
ProofsConfig *ffiwrapper.Config
|
||||
SectorBlocks *sectorblocks.SectorBlocks
|
||||
|
||||
PieceStore dtypes.ProviderPieceStore
|
||||
|
@ -83,8 +83,8 @@ func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) {
|
||||
return address.NewFromBytes(maddrb)
|
||||
}
|
||||
|
||||
func GetParams(sbc *ffiwrapper.Config) error {
|
||||
ssize, err := sbc.SealProofType.SectorSize()
|
||||
func GetParams(spt abi.RegisteredSealProof) error {
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -95,6 +95,7 @@ func GetParams(sbc *ffiwrapper.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: We should fetch the params for the actual proof type, not just based on the size.
|
||||
if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil {
|
||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||
}
|
||||
@ -119,22 +120,13 @@ func StorageNetworkName(ctx helpers.MetricsCtx, a lapi.FullNode) (dtypes.Network
|
||||
return a.StateNetworkName(ctx)
|
||||
}
|
||||
|
||||
func ProofsConfig(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (*ffiwrapper.Config, error) {
|
||||
func SealProofType(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (abi.RegisteredSealProof, error) {
|
||||
mi, err := fnapi.StateMinerInfo(context.TODO(), address.Address(maddr), types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("bad sector size: %w", err)
|
||||
}
|
||||
|
||||
sb := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
return sb, nil
|
||||
return mi.SealProofType, nil
|
||||
}
|
||||
|
||||
type sidsc struct {
|
||||
@ -512,7 +504,6 @@ func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.Conside
|
||||
}
|
||||
|
||||
func StorageProvider(minerAddress dtypes.MinerAddress,
|
||||
ffiConfig *ffiwrapper.Config,
|
||||
storedAsk *storedask.StoredAsk,
|
||||
h host.Host, ds dtypes.MetadataDS,
|
||||
mds dtypes.StagingMultiDstore,
|
||||
@ -530,7 +521,7 @@ func StorageProvider(minerAddress dtypes.MinerAddress,
|
||||
|
||||
opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df))
|
||||
|
||||
return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), ffiConfig.SealProofType, storedAsk, opt)
|
||||
return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), storedAsk, opt)
|
||||
}
|
||||
|
||||
func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
@ -595,13 +586,13 @@ func RetrievalProvider(h host.Host,
|
||||
var WorkerCallsPrefix = datastore.NewKey("/worker/calls")
|
||||
var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls")
|
||||
|
||||
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) {
|
||||
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
|
||||
wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix))
|
||||
smsts := statestore.New(namespace.Wrap(ds, ManagerWorkPrefix))
|
||||
|
||||
sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa, wsts, smsts)
|
||||
sst, err := sectorstorage.New(ctx, ls, si, sc, urls, sa, wsts, smsts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -93,6 +93,22 @@ func TestDealMining(t *testing.T) {
|
||||
test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false)
|
||||
}
|
||||
|
||||
func TestSDRUpgrade(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
t.Cleanup(func() {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
test.TestSDRUpgrade(t, builder.MockSbBuilder, 50*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestPledgeSectors(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
@ -356,7 +355,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
preseals = test.GenesisPreseals
|
||||
}
|
||||
|
||||
genm, k, err := mockstorage.PreSeal(2048, maddr, preseals)
|
||||
genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -458,7 +457,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
|
||||
storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
|
||||
node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) {
|
||||
return mock.NewMockSectorMgr(policy.GetDefaultSectorSize(), sectors), nil
|
||||
return mock.NewMockSectorMgr(sectors), nil
|
||||
}),
|
||||
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||
node.Unset(new(*sectorstorage.Manager)),
|
||||
|
@ -39,13 +39,8 @@ func NewSealingAPIAdapter(api storageMinerApi) SealingAPIAdapter {
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerSectorSize(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (abi.SectorSize, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||
}
|
||||
|
||||
// TODO: update storage-fsm to just StateMinerInfo
|
||||
mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk)
|
||||
mi, err := s.StateMinerInfo(ctx, maddr, tok)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -70,14 +65,19 @@ func (s SealingAPIAdapter) StateMinerInitialPledgeCollateral(ctx context.Context
|
||||
return s.delegate.StateMinerInitialPledgeCollateral(ctx, a, pci, tsk)
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) {
|
||||
func (s SealingAPIAdapter) StateMinerInfo(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (miner.MinerInfo, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||
}
|
||||
|
||||
// TODO: update storage-fsm to just StateMinerInfo
|
||||
mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk)
|
||||
return s.delegate.StateMinerInfo(ctx, maddr, tsk)
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) {
|
||||
// TODO: update storage-fsm to just StateMinerInfo
|
||||
mi, err := s.StateMinerInfo(ctx, maddr, tok)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
@ -214,12 +214,7 @@ func NewWinningPoStProver(api api.FullNode, prover storage.Prover, verifier ffiw
|
||||
return nil, xerrors.Errorf("getting sector size: %w", err)
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wpt, err := spt.RegisteredWinningPoStProof()
|
||||
wpt, err := mi.SealProofType.RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -13,17 +13,21 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
)
|
||||
|
||||
func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) {
|
||||
func PreSeal(spt abi.RegisteredSealProof, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) {
|
||||
k, err := wallet.GenerateKey(types.KTBLS)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
genm := &genesis.Miner{
|
||||
ID: maddr,
|
||||
Owner: k.Address,
|
||||
@ -34,15 +38,10 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis
|
||||
Sectors: make([]*genesis.PreSeal, sectors),
|
||||
}
|
||||
|
||||
st, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for i := range genm.Sectors {
|
||||
preseal := &genesis.PreSeal{}
|
||||
|
||||
preseal.ProofType = st
|
||||
preseal.ProofType = spt
|
||||
preseal.CommD = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded())
|
||||
d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD)
|
||||
r := mock.CommDR(d)
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -188,24 +189,30 @@ func (s *WindowPoStScheduler) runSubmitPoST(
|
||||
return submitErr
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) {
|
||||
func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField, tsk types.TipSetKey) (bitfield.BitField, error) {
|
||||
mid, err := address.IDFromAddress(s.actor)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
}
|
||||
|
||||
sectors := make(map[abi.SectorID]struct{})
|
||||
var tocheck []abi.SectorID
|
||||
err = check.ForEach(func(snum uint64) error {
|
||||
s := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(snum),
|
||||
}
|
||||
sectorInfos, err := s.api.StateMinerSectors(ctx, s.actor, &check, tsk)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
}
|
||||
|
||||
tocheck = append(tocheck, s)
|
||||
sectors[s] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
sectors := make(map[abi.SectorNumber]struct{})
|
||||
var tocheck []storage.SectorRef
|
||||
for _, info := range sectorInfos {
|
||||
sectors[info.SectorNumber] = struct{}{}
|
||||
tocheck = append(tocheck, storage.SectorRef{
|
||||
ProofType: info.SealProof,
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: info.SectorNumber,
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, xerrors.Errorf("iterating over bitfield: %w", err)
|
||||
}
|
||||
@ -215,20 +222,20 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
|
||||
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
|
||||
}
|
||||
for _, id := range bad {
|
||||
delete(sectors, id)
|
||||
delete(sectors, id.Number)
|
||||
}
|
||||
|
||||
log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors))
|
||||
|
||||
sbf := bitfield.New()
|
||||
for s := range sectors {
|
||||
sbf.Set(uint64(s.Number))
|
||||
sbf.Set(uint64(s))
|
||||
}
|
||||
|
||||
return sbf, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) {
|
||||
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries")
|
||||
defer span.End()
|
||||
|
||||
@ -254,7 +261,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
|
||||
|
||||
faulty += uc
|
||||
|
||||
recovered, err := s.checkSectors(ctx, unrecovered)
|
||||
recovered, err := s.checkSectors(ctx, unrecovered, tsk)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("checking unrecovered sectors: %w", err)
|
||||
}
|
||||
@ -320,7 +327,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
|
||||
return recoveries, sm, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
|
||||
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults")
|
||||
defer span.End()
|
||||
|
||||
@ -335,7 +342,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
|
||||
return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
good, err := s.checkSectors(ctx, nonFaulty)
|
||||
good, err := s.checkSectors(ctx, nonFaulty, tsk)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("checking sectors: %w", err)
|
||||
}
|
||||
@ -438,7 +445,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
}
|
||||
)
|
||||
|
||||
if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil {
|
||||
if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil {
|
||||
// TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse
|
||||
log.Errorf("checking sector recoveries: %v", err)
|
||||
}
|
||||
@ -457,7 +464,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
return // FORK: declaring faults after ignition upgrade makes no sense
|
||||
}
|
||||
|
||||
if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil {
|
||||
if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil {
|
||||
// TODO: This is also potentially really bad, but we try to post anyways
|
||||
log.Errorf("checking sector faults: %v", err)
|
||||
}
|
||||
@ -527,7 +534,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
|
||||
}
|
||||
|
||||
good, err := s.checkSectors(ctx, toProve)
|
||||
good, err := s.checkSectors(ctx, toProve, ts.Key())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
@ -116,7 +117,7 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si
|
||||
type mockFaultTracker struct {
|
||||
}
|
||||
|
||||
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
|
||||
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) {
|
||||
// Returns "bad" sectors so just return nil meaning all sectors are good
|
||||
return nil, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user