Merge pull request #1191 from filecoin-project/feat/new-sb-fs
Sectorbuilder FS refactor
This commit is contained in:
commit
9935e3acfc
@ -109,6 +109,7 @@ type FullNode interface {
|
|||||||
StateMinerPeerID(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error)
|
StateMinerPeerID(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error)
|
||||||
StateMinerElectionPeriodStart(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error)
|
StateMinerElectionPeriodStart(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error)
|
||||||
StateMinerSectorSize(context.Context, address.Address, *types.TipSet) (uint64, error)
|
StateMinerSectorSize(context.Context, address.Address, *types.TipSet) (uint64, error)
|
||||||
|
StateMinerFaults(context.Context, address.Address, *types.TipSet) ([]uint64, error)
|
||||||
StatePledgeCollateral(context.Context, *types.TipSet) (types.BigInt, error)
|
StatePledgeCollateral(context.Context, *types.TipSet) (types.BigInt, error)
|
||||||
StateWaitMsg(context.Context, cid.Cid) (*MsgWait, error)
|
StateWaitMsg(context.Context, cid.Cid) (*MsgWait, error)
|
||||||
StateListMiners(context.Context, *types.TipSet) ([]address.Address, error)
|
StateListMiners(context.Context, *types.TipSet) ([]address.Address, error)
|
||||||
|
@ -22,11 +22,11 @@ const (
|
|||||||
WaitSeed // waiting for seed
|
WaitSeed // waiting for seed
|
||||||
Committing
|
Committing
|
||||||
CommitWait // waiting for message to land on chain
|
CommitWait // waiting for message to land on chain
|
||||||
|
FinalizeSector
|
||||||
Proving
|
Proving
|
||||||
_ // reserved
|
_ // reserved
|
||||||
_
|
_
|
||||||
_
|
_
|
||||||
_
|
|
||||||
|
|
||||||
// recovery handling
|
// recovery handling
|
||||||
// Reseal
|
// Reseal
|
||||||
@ -64,6 +64,7 @@ var SectorStates = []string{
|
|||||||
WaitSeed: "WaitSeed",
|
WaitSeed: "WaitSeed",
|
||||||
Committing: "Committing",
|
Committing: "Committing",
|
||||||
CommitWait: "CommitWait",
|
CommitWait: "CommitWait",
|
||||||
|
FinalizeSector: "FinalizeSector",
|
||||||
Proving: "Proving",
|
Proving: "Proving",
|
||||||
|
|
||||||
SealFailed: "SealFailed",
|
SealFailed: "SealFailed",
|
||||||
|
@ -100,6 +100,7 @@ type FullNodeStruct struct {
|
|||||||
StateMinerPeerID func(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error) `perm:"read"`
|
StateMinerPeerID func(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error) `perm:"read"`
|
||||||
StateMinerElectionPeriodStart func(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error) `perm:"read"`
|
StateMinerElectionPeriodStart func(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error) `perm:"read"`
|
||||||
StateMinerSectorSize func(context.Context, address.Address, *types.TipSet) (uint64, error) `perm:"read"`
|
StateMinerSectorSize func(context.Context, address.Address, *types.TipSet) (uint64, error) `perm:"read"`
|
||||||
|
StateMinerFaults func(context.Context, address.Address, *types.TipSet) ([]uint64, error) `perm:"read"`
|
||||||
StateCall func(context.Context, *types.Message, *types.TipSet) (*api.MethodCall, error) `perm:"read"`
|
StateCall func(context.Context, *types.Message, *types.TipSet) (*api.MethodCall, error) `perm:"read"`
|
||||||
StateReplay func(context.Context, *types.TipSet, cid.Cid) (*api.ReplayResults, error) `perm:"read"`
|
StateReplay func(context.Context, *types.TipSet, cid.Cid) (*api.ReplayResults, error) `perm:"read"`
|
||||||
StateGetActor func(context.Context, address.Address, *types.TipSet) (*types.Actor, error) `perm:"read"`
|
StateGetActor func(context.Context, address.Address, *types.TipSet) (*types.Actor, error) `perm:"read"`
|
||||||
@ -417,6 +418,10 @@ func (c *FullNodeStruct) StateMinerSectorSize(ctx context.Context, actor address
|
|||||||
return c.Internal.StateMinerSectorSize(ctx, actor, ts)
|
return c.Internal.StateMinerSectorSize(ctx, actor, ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, ts *types.TipSet) ([]uint64, error) {
|
||||||
|
return c.Internal.StateMinerFaults(ctx, actor, ts)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.MethodCall, error) {
|
func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.MethodCall, error) {
|
||||||
return c.Internal.StateCall(ctx, msg, ts)
|
return c.Internal.StateCall(ctx, msg, ts)
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,8 @@ package stmgr
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
amt2 "github.com/filecoin-project/go-amt-ipld/v2"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
||||||
@ -253,6 +255,21 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma
|
|||||||
return mas.SlashedAt, nil
|
return mas.SlashedAt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetMinerFaults(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) ([]uint64, error) {
|
||||||
|
var mas actors.StorageMinerActorState
|
||||||
|
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("(get ssize) failed to load miner actor state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ss, lerr := amt2.LoadAMT(amt.WrapBlockstore(sm.cs.Blockstore()), mas.Sectors)
|
||||||
|
if lerr != nil {
|
||||||
|
return nil, aerrors.HandleExternalError(lerr, "could not load proving set node")
|
||||||
|
}
|
||||||
|
|
||||||
|
return mas.FaultSet.All(2 * ss.Count)
|
||||||
|
}
|
||||||
|
|
||||||
func GetStorageDeal(ctx context.Context, sm *StateManager, dealId uint64, ts *types.TipSet) (*actors.OnChainDeal, error) {
|
func GetStorageDeal(ctx context.Context, sm *StateManager, dealId uint64, ts *types.TipSet) (*actors.OnChainDeal, error) {
|
||||||
var state actors.StorageMarketState
|
var state actors.StorageMarketState
|
||||||
if _, err := sm.LoadActorState(ctx, actors.StorageMarketAddress, &state, ts); err != nil {
|
if _, err := sm.LoadActorState(ctx, actors.StorageMarketAddress, &state, ts); err != nil {
|
||||||
|
@ -146,7 +146,7 @@ func main() {
|
|||||||
Miner: maddr,
|
Miner: maddr,
|
||||||
SectorSize: sectorSize,
|
SectorSize: sectorSize,
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
Dir: sbdir,
|
Paths: sectorbuilder.SimplePath(sbdir),
|
||||||
}
|
}
|
||||||
|
|
||||||
if robench == "" {
|
if robench == "" {
|
||||||
@ -174,7 +174,7 @@ func main() {
|
|||||||
|
|
||||||
r := rand.New(rand.NewSource(100 + int64(i)))
|
r := rand.New(rand.NewSource(100 + int64(i)))
|
||||||
|
|
||||||
pi, err := sb.AddPiece(dataSize, i, r, nil)
|
pi, err := sb.AddPiece(context.TODO(), dataSize, i, r, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -225,7 +225,7 @@ func main() {
|
|||||||
|
|
||||||
if !c.Bool("skip-unseal") {
|
if !c.Bool("skip-unseal") {
|
||||||
log.Info("Unsealing sector")
|
log.Info("Unsealing sector")
|
||||||
rc, err := sb.ReadPieceFromSealedSector(1, 0, dataSize, ticket.TicketBytes[:], commD[:])
|
rc, err := sb.ReadPieceFromSealedSector(context.TODO(), 1, 0, dataSize, ticket.TicketBytes[:], commD[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,6 @@ func runSyncer(ctx context.Context, api api.FullNode, st *storage) {
|
|||||||
go subMpool(ctx, api, st)
|
go subMpool(ctx, api, st)
|
||||||
go subBlocks(ctx, api, st)
|
go subBlocks(ctx, api, st)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -35,7 +35,7 @@ func acceptJobs(ctx context.Context, api lapi.StorageMiner, endpoint string, aut
|
|||||||
SectorSize: ssize,
|
SectorSize: ssize,
|
||||||
Miner: act,
|
Miner: act,
|
||||||
WorkerThreads: 1,
|
WorkerThreads: 1,
|
||||||
Dir: repo,
|
Paths: sectorbuilder.SimplePath(repo),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
||||||
|
"github.com/filecoin-project/go-sectorbuilder/fs"
|
||||||
files "github.com/ipfs/go-ipfs-files"
|
files "github.com/ipfs/go-ipfs-files"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
"gopkg.in/cheggaaa/pb.v1"
|
"gopkg.in/cheggaaa/pb.v1"
|
||||||
@ -26,7 +28,7 @@ func (w *worker) sizeForType(typ string) int64 {
|
|||||||
func (w *worker) fetch(typ string, sectorID uint64) error {
|
func (w *worker) fetch(typ string, sectorID uint64) error {
|
||||||
outname := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
outname := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
||||||
|
|
||||||
url := w.minerEndpoint + "/remote/" + typ + "/" + w.sb.SectorName(sectorID)
|
url := w.minerEndpoint + "/remote/" + typ + "/" + fmt.Sprint(sectorID)
|
||||||
log.Infof("Fetch %s %s", typ, url)
|
log.Infof("Fetch %s %s", typ, url)
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
@ -76,21 +78,24 @@ func (w *worker) fetch(typ string, sectorID uint64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *worker) push(typ string, sectorID uint64) error {
|
func (w *worker) push(typ string, sectorID uint64) error {
|
||||||
filename := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
filename, err := w.sb.SectorPath(fs.DataType(typ), sectorID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
url := w.minerEndpoint + "/remote/" + typ + "/" + w.sb.SectorName(sectorID)
|
url := w.minerEndpoint + "/remote/" + typ + "/" + fmt.Sprint(sectorID)
|
||||||
log.Infof("Push %s %s", typ, url)
|
log.Infof("Push %s %s", typ, url)
|
||||||
|
|
||||||
stat, err := os.Stat(filename)
|
stat, err := os.Stat(string(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var r io.Reader
|
var r io.Reader
|
||||||
if stat.IsDir() {
|
if stat.IsDir() {
|
||||||
r, err = tarutil.TarDirectory(filename)
|
r, err = tarutil.TarDirectory(string(filename))
|
||||||
} else {
|
} else {
|
||||||
r, err = os.OpenFile(filename, os.O_RDONLY, 0644)
|
r, err = os.OpenFile(string(filename), os.O_RDONLY, 0644)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("opening push reader: %w", err)
|
return xerrors.Errorf("opening push reader: %w", err)
|
||||||
|
@ -196,7 +196,7 @@ var aggregateSectorDirsCmd = &cli.Command{
|
|||||||
agsb, err := sectorbuilder.New(§orbuilder.Config{
|
agsb, err := sectorbuilder.New(§orbuilder.Config{
|
||||||
Miner: maddr,
|
Miner: maddr,
|
||||||
SectorSize: ssize,
|
SectorSize: ssize,
|
||||||
Dir: destdir,
|
Paths: sectorbuilder.SimplePath(destdir),
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
}, namespace.Wrap(agmds, datastore.NewKey("/sectorbuilder")))
|
}, namespace.Wrap(agmds, datastore.NewKey("/sectorbuilder")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -257,7 +257,7 @@ var aggregateSectorDirsCmd = &cli.Command{
|
|||||||
sb, err := sectorbuilder.New(§orbuilder.Config{
|
sb, err := sectorbuilder.New(§orbuilder.Config{
|
||||||
Miner: maddr,
|
Miner: maddr,
|
||||||
SectorSize: genm.SectorSize,
|
SectorSize: genm.SectorSize,
|
||||||
Dir: dir,
|
Paths: sectorbuilder.SimplePath(dir),
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
}, namespace.Wrap(mds, datastore.NewKey("/sectorbuilder")))
|
}, namespace.Wrap(mds, datastore.NewKey("/sectorbuilder")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -32,7 +32,7 @@ func PreSeal(maddr address.Address, ssize uint64, offset uint64, sectors int, sb
|
|||||||
Miner: maddr,
|
Miner: maddr,
|
||||||
SectorSize: ssize,
|
SectorSize: ssize,
|
||||||
FallbackLastID: offset,
|
FallbackLastID: offset,
|
||||||
Dir: sbroot,
|
Paths: sectorbuilder.SimplePath(sbroot),
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ func PreSeal(maddr address.Address, ssize uint64, offset uint64, sectors int, sb
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pi, err := sb.AddPiece(size, sid, rand.Reader, nil)
|
pi, err := sb.AddPiece(context.TODO(), size, sid, rand.Reader, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -76,7 +76,7 @@ func PreSeal(maddr address.Address, ssize uint64, offset uint64, sectors int, sb
|
|||||||
return nil, xerrors.Errorf("commit: %w", err)
|
return nil, xerrors.Errorf("commit: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sb.TrimCache(sid); err != nil {
|
if err := sb.TrimCache(context.TODO(), sid); err != nil {
|
||||||
return nil, xerrors.Errorf("trim cache: %w", err)
|
return nil, xerrors.Errorf("trim cache: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,8 +57,20 @@ var infoCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
faults, err := api.StateMinerFaults(ctx, maddr, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Printf("\tCommitted: %s\n", types.BigMul(types.NewInt(secCounts.Sset), types.NewInt(sizeByte)).SizeStr())
|
fmt.Printf("\tCommitted: %s\n", types.BigMul(types.NewInt(secCounts.Sset), types.NewInt(sizeByte)).SizeStr())
|
||||||
fmt.Printf("\tProving: %s\n", types.BigMul(types.NewInt(secCounts.Pset), types.NewInt(sizeByte)).SizeStr())
|
if len(faults) == 0 {
|
||||||
|
fmt.Printf("\tProving: %s\n", types.BigMul(types.NewInt(secCounts.Pset), types.NewInt(sizeByte)).SizeStr())
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\tProving: %s (%s Faulty, %.2f%%)\n",
|
||||||
|
types.BigMul(types.NewInt(secCounts.Pset-uint64(len(faults))), types.NewInt(sizeByte)).SizeStr(),
|
||||||
|
types.BigMul(types.NewInt(uint64(len(faults))), types.NewInt(sizeByte)).SizeStr(),
|
||||||
|
float64(10000*uint64(len(faults))/secCounts.Pset)/100.)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: indicate whether the post worker is in use
|
// TODO: indicate whether the post worker is in use
|
||||||
wstat, err := nodeApi.WorkerStats(ctx)
|
wstat, err := nodeApi.WorkerStats(ctx)
|
||||||
|
@ -177,7 +177,7 @@ var initCmd = &cli.Command{
|
|||||||
oldsb, err := sectorbuilder.New(§orbuilder.Config{
|
oldsb, err := sectorbuilder.New(§orbuilder.Config{
|
||||||
SectorSize: ssize,
|
SectorSize: ssize,
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
Dir: pssb,
|
Paths: sectorbuilder.SimplePath(pssb),
|
||||||
}, namespace.Wrap(oldmds, datastore.NewKey("/sectorbuilder")))
|
}, namespace.Wrap(oldmds, datastore.NewKey("/sectorbuilder")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to open up preseal sectorbuilder: %w", err)
|
return xerrors.Errorf("failed to open up preseal sectorbuilder: %w", err)
|
||||||
@ -186,7 +186,7 @@ var initCmd = &cli.Command{
|
|||||||
nsb, err := sectorbuilder.New(§orbuilder.Config{
|
nsb, err := sectorbuilder.New(§orbuilder.Config{
|
||||||
SectorSize: ssize,
|
SectorSize: ssize,
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
Dir: lr.Path(),
|
Paths: sectorbuilder.SimplePath(lr.Path()),
|
||||||
}, namespace.Wrap(mds, datastore.NewKey("/sectorbuilder")))
|
}, namespace.Wrap(mds, datastore.NewKey("/sectorbuilder")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to open up sectorbuilder: %w", err)
|
return xerrors.Errorf("failed to open up sectorbuilder: %w", err)
|
||||||
@ -369,7 +369,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sbcfg, err := modules.SectorBuilderConfig(lr.Path(), 2, false, false)(mds, api)
|
sbcfg, err := modules.SectorBuilderConfig(sectorbuilder.SimplePath(lr.Path()), 2, false, false)(mds, api)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting genesis miner sector builder config: %w", err)
|
return xerrors.Errorf("getting genesis miner sector builder config: %w", err)
|
||||||
}
|
}
|
||||||
|
2
go.mod
2
go.mod
@ -20,7 +20,7 @@ require (
|
|||||||
github.com/filecoin-project/go-data-transfer v0.0.0-20191219005021-4accf56bd2ce
|
github.com/filecoin-project/go-data-transfer v0.0.0-20191219005021-4accf56bd2ce
|
||||||
github.com/filecoin-project/go-fil-markets v0.0.0-20200114015428-74d100f305f8
|
github.com/filecoin-project/go-fil-markets v0.0.0-20200114015428-74d100f305f8
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.1
|
github.com/filecoin-project/go-paramfetch v0.0.1
|
||||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200123143044-d9cc96c53c55
|
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200203173614-42d67726bb62
|
||||||
github.com/filecoin-project/go-statestore v0.1.0
|
github.com/filecoin-project/go-statestore v0.1.0
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||||
|
4
go.sum
4
go.sum
@ -117,8 +117,8 @@ github.com/filecoin-project/go-paramfetch v0.0.0-20200102181131-b20d579f2878/go.
|
|||||||
github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE=
|
github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE=
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||||
github.com/filecoin-project/go-sectorbuilder v0.0.1/go.mod h1:3OZ4E3B2OuwhJjtxR4r7hPU9bCfB+A+hm4alLEsaeDc=
|
github.com/filecoin-project/go-sectorbuilder v0.0.1/go.mod h1:3OZ4E3B2OuwhJjtxR4r7hPU9bCfB+A+hm4alLEsaeDc=
|
||||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200123143044-d9cc96c53c55 h1:XChPRKPZL+/N6a3ccLmjCJ7JrR+SFLFJDllv0BkxW4I=
|
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200203173614-42d67726bb62 h1:/+xdjMkIdiRs6vA2lJU56iqtEcl9BQgYXi8b2KuuYCg=
|
||||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200123143044-d9cc96c53c55/go.mod h1:ahsryULdwYoZ94K09HcfqX3QBwevWVldENSV/EdCbNg=
|
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200203173614-42d67726bb62/go.mod h1:jNGVCDihkMFnraYVLH1xl4ceZQVxx/u4dOORrTKeRi0=
|
||||||
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
|
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
|
||||||
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
|
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
|
logging "github.com/ipfs/go-log"
|
||||||
ci "github.com/libp2p/go-libp2p-core/crypto"
|
ci "github.com/libp2p/go-libp2p-core/crypto"
|
||||||
"github.com/libp2p/go-libp2p-core/host"
|
"github.com/libp2p/go-libp2p-core/host"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
@ -54,6 +55,8 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("builder")
|
||||||
|
|
||||||
// special is a type used to give keys to modules which
|
// special is a type used to give keys to modules which
|
||||||
// can't really be identified by the returned type
|
// can't really be identified by the returned type
|
||||||
type special struct{ id int }
|
type special struct{ id int }
|
||||||
@ -342,15 +345,20 @@ func ConfigStorageMiner(c interface{}, lr repo.LockedRepo) Option {
|
|||||||
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
||||||
}
|
}
|
||||||
|
|
||||||
path := cfg.SectorBuilder.Path
|
scfg := sectorbuilder.SimplePath(lr.Path())
|
||||||
if path == "" {
|
if cfg.SectorBuilder.Path == "" {
|
||||||
path = lr.Path()
|
if len(cfg.SectorBuilder.Storage) > 0 {
|
||||||
|
scfg = cfg.SectorBuilder.Storage
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
scfg = sectorbuilder.SimplePath(cfg.SectorBuilder.Path)
|
||||||
|
log.Warn("LEGACY SectorBuilder.Path FOUND IN CONFIG. Please use the new storage config")
|
||||||
}
|
}
|
||||||
|
|
||||||
return Options(
|
return Options(
|
||||||
ConfigCommon(&cfg.Common),
|
ConfigCommon(&cfg.Common),
|
||||||
|
|
||||||
Override(new(*sectorbuilder.Config), modules.SectorBuilderConfig(path,
|
Override(new(*sectorbuilder.Config), modules.SectorBuilderConfig(scfg,
|
||||||
cfg.SectorBuilder.WorkerCount,
|
cfg.SectorBuilder.WorkerCount,
|
||||||
cfg.SectorBuilder.DisableLocalPreCommit,
|
cfg.SectorBuilder.DisableLocalPreCommit,
|
||||||
cfg.SectorBuilder.DisableLocalCommit)),
|
cfg.SectorBuilder.DisableLocalCommit)),
|
||||||
|
@ -3,6 +3,8 @@ package config
|
|||||||
import (
|
import (
|
||||||
"encoding"
|
"encoding"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-sectorbuilder/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Common is common config between full node and miner
|
// Common is common config between full node and miner
|
||||||
@ -54,7 +56,8 @@ type Metrics struct {
|
|||||||
// // Storage Miner
|
// // Storage Miner
|
||||||
|
|
||||||
type SectorBuilder struct {
|
type SectorBuilder struct {
|
||||||
Path string
|
Path string // TODO: remove // FORK (-ish)
|
||||||
|
Storage []fs.PathConfig
|
||||||
WorkerCount uint
|
WorkerCount uint
|
||||||
|
|
||||||
DisableLocalPreCommit bool
|
DisableLocalPreCommit bool
|
||||||
|
@ -85,6 +85,10 @@ func (a *StateAPI) StateMinerSectorSize(ctx context.Context, actor address.Addre
|
|||||||
return stmgr.GetMinerSectorSize(ctx, a.StateManager, ts, actor)
|
return stmgr.GetMinerSectorSize(ctx, a.StateManager, ts, actor)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *StateAPI) StateMinerFaults(ctx context.Context, addr address.Address, ts *types.TipSet) ([]uint64, error) {
|
||||||
|
return stmgr.GetMinerFaults(ctx, a.StateManager, ts, addr)
|
||||||
|
}
|
||||||
|
|
||||||
func (a *StateAPI) StatePledgeCollateral(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
|
func (a *StateAPI) StatePledgeCollateral(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
|
||||||
param, err := actors.SerializeParams(&actors.PledgeCollateralParams{Size: types.NewInt(0)})
|
param, err := actors.SerializeParams(&actors.PledgeCollateralParams{Size: types.NewInt(0)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -7,15 +7,16 @@ import (
|
|||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"github.com/filecoin-project/lotus/api/apistruct"
|
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
files "github.com/ipfs/go-ipfs-files"
|
files "github.com/ipfs/go-ipfs-files"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-sectorbuilder"
|
"github.com/filecoin-project/go-sectorbuilder"
|
||||||
|
"github.com/filecoin-project/go-sectorbuilder/fs"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/apistruct"
|
||||||
"github.com/filecoin-project/lotus/lib/tarutil"
|
"github.com/filecoin-project/lotus/lib/tarutil"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/lotus/storage"
|
"github.com/filecoin-project/lotus/storage"
|
||||||
@ -43,8 +44,8 @@ func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
mux := mux.NewRouter()
|
mux := mux.NewRouter()
|
||||||
|
|
||||||
mux.HandleFunc("/remote/{type}/{sname}", sm.remoteGetSector).Methods("GET")
|
mux.HandleFunc("/remote/{type}/{id}", sm.remoteGetSector).Methods("GET")
|
||||||
mux.HandleFunc("/remote/{type}/{sname}", sm.remotePutSector).Methods("PUT")
|
mux.HandleFunc("/remote/{type}/{id}", sm.remotePutSector).Methods("PUT")
|
||||||
|
|
||||||
log.Infof("SERVEGETREMOTE %s", r.URL)
|
log.Infof("SERVEGETREMOTE %s", r.URL)
|
||||||
|
|
||||||
@ -54,14 +55,21 @@ func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
|||||||
func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Request) {
|
func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Request) {
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
|
|
||||||
path, err := sm.SectorBuilder.GetPath(vars["type"], vars["sname"])
|
id, err := strconv.ParseUint(vars["id"], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("parsing sector id: ", err)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
path, err := sm.SectorBuilder.SectorPath(fs.DataType(vars["type"]), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err := os.Stat(path)
|
stat, err := os.Stat(string(path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
@ -70,10 +78,10 @@ func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Reques
|
|||||||
|
|
||||||
var rd io.Reader
|
var rd io.Reader
|
||||||
if stat.IsDir() {
|
if stat.IsDir() {
|
||||||
rd, err = tarutil.TarDirectory(path)
|
rd, err = tarutil.TarDirectory(string(path))
|
||||||
w.Header().Set("Content-Type", "application/x-tar")
|
w.Header().Set("Content-Type", "application/x-tar")
|
||||||
} else {
|
} else {
|
||||||
rd, err = os.OpenFile(path, os.O_RDONLY, 0644)
|
rd, err = os.OpenFile(string(path), os.O_RDONLY, 0644)
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -92,7 +100,15 @@ func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Reques
|
|||||||
func (sm *StorageMinerAPI) remotePutSector(w http.ResponseWriter, r *http.Request) {
|
func (sm *StorageMinerAPI) remotePutSector(w http.ResponseWriter, r *http.Request) {
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
|
|
||||||
path, err := sm.SectorBuilder.GetPath(vars["type"], vars["sname"])
|
id, err := strconv.ParseUint(vars["id"], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("parsing sector id: ", err)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is going to get better with worker-to-worker transfers
|
||||||
|
path, err := sm.SectorBuilder.AllocSectorPath(fs.DataType(vars["type"]), id, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
@ -106,7 +122,7 @@ func (sm *StorageMinerAPI) remotePutSector(w http.ResponseWriter, r *http.Reques
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.RemoveAll(path); err != nil {
|
if err := os.RemoveAll(string(path)); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
@ -114,13 +130,13 @@ func (sm *StorageMinerAPI) remotePutSector(w http.ResponseWriter, r *http.Reques
|
|||||||
|
|
||||||
switch mediatype {
|
switch mediatype {
|
||||||
case "application/x-tar":
|
case "application/x-tar":
|
||||||
if err := tarutil.ExtractTar(r.Body, path); err != nil {
|
if err := tarutil.ExtractTar(r.Body, string(path)); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
if err := files.WriteTo(files.NewReaderFile(r.Body), path); err != nil {
|
if err := files.WriteTo(files.NewReaderFile(r.Body), string(path)); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
||||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||||
"github.com/filecoin-project/go-sectorbuilder"
|
"github.com/filecoin-project/go-sectorbuilder"
|
||||||
|
"github.com/filecoin-project/go-sectorbuilder/fs"
|
||||||
"github.com/filecoin-project/go-statestore"
|
"github.com/filecoin-project/go-statestore"
|
||||||
"github.com/ipfs/go-bitswap"
|
"github.com/ipfs/go-bitswap"
|
||||||
"github.com/ipfs/go-bitswap/network"
|
"github.com/ipfs/go-bitswap/network"
|
||||||
@ -62,7 +63,7 @@ func GetParams(sbc *sectorbuilder.Config) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SectorBuilderConfig(storagePath string, threads uint, noprecommit, nocommit bool) func(dtypes.MetadataDS, api.FullNode) (*sectorbuilder.Config, error) {
|
func SectorBuilderConfig(storage []fs.PathConfig, threads uint, noprecommit, nocommit bool) func(dtypes.MetadataDS, api.FullNode) (*sectorbuilder.Config, error) {
|
||||||
return func(ds dtypes.MetadataDS, api api.FullNode) (*sectorbuilder.Config, error) {
|
return func(ds dtypes.MetadataDS, api api.FullNode) (*sectorbuilder.Config, error) {
|
||||||
minerAddr, err := minerAddrFromDS(ds)
|
minerAddr, err := minerAddrFromDS(ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -74,9 +75,11 @@ func SectorBuilderConfig(storagePath string, threads uint, noprecommit, nocommit
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sp, err := homedir.Expand(storagePath)
|
for i := range storage {
|
||||||
if err != nil {
|
storage[i].Path, err = homedir.Expand(storage[i].Path)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if threads > math.MaxUint8 {
|
if threads > math.MaxUint8 {
|
||||||
@ -91,7 +94,7 @@ func SectorBuilderConfig(storagePath string, threads uint, noprecommit, nocommit
|
|||||||
NoPreCommit: noprecommit,
|
NoPreCommit: noprecommit,
|
||||||
NoCommit: nocommit,
|
NoCommit: nocommit,
|
||||||
|
|
||||||
Dir: sp,
|
Paths: storage,
|
||||||
}
|
}
|
||||||
|
|
||||||
return sb, nil
|
return sb, nil
|
||||||
|
@ -232,7 +232,7 @@ func builder(t *testing.T, nFull int, storage []int) ([]test.TestNode, []test.Te
|
|||||||
SectorSize: 1024,
|
SectorSize: 1024,
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
Miner: genMiner,
|
Miner: genMiner,
|
||||||
Dir: psd,
|
Paths: sectorbuilder.SimplePath(psd),
|
||||||
}, namespace.Wrap(mds, datastore.NewKey("/sectorbuilder")))
|
}, namespace.Wrap(mds, datastore.NewKey("/sectorbuilder")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -50,52 +50,85 @@ func (s *FPoStScheduler) doPost(ctx context.Context, eps uint64, ts *types.TipSe
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FPoStScheduler) declareFaults(ctx context.Context, fc uint64, params *actors.DeclareFaultsParams) error {
|
||||||
|
log.Warnf("DECLARING %d FAULTS", fc)
|
||||||
|
|
||||||
|
enc, aerr := actors.SerializeParams(params)
|
||||||
|
if aerr != nil {
|
||||||
|
return xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
To: s.actor,
|
||||||
|
From: s.worker,
|
||||||
|
Method: actors.MAMethods.DeclareFaults,
|
||||||
|
Params: enc,
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
GasLimit: types.NewInt(10000000), // i dont know help
|
||||||
|
GasPrice: types.NewInt(1),
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err := s.api.MpoolPushMessage(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("pushing faults message to mpool: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec, err := s.api.StateWaitMsg(ctx, sm.Cid())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("waiting for declare faults: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Receipt.ExitCode != 0 {
|
||||||
|
return xerrors.Errorf("declare faults exit %d", rec.Receipt.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Faults declared successfully")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FPoStScheduler) checkFaults(ctx context.Context, ssi sectorbuilder.SortedPublicSectorInfo) ([]uint64, error) {
|
func (s *FPoStScheduler) checkFaults(ctx context.Context, ssi sectorbuilder.SortedPublicSectorInfo) ([]uint64, error) {
|
||||||
faults := s.sb.Scrub(ssi)
|
faults := s.sb.Scrub(ssi)
|
||||||
var faultIDs []uint64
|
|
||||||
|
declaredFaults := map[uint64]struct{}{}
|
||||||
|
|
||||||
|
{
|
||||||
|
chainFaults, err := s.api.StateMinerFaults(ctx, s.actor, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("checking on-chain faults: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fault := range chainFaults {
|
||||||
|
declaredFaults[fault] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(faults) > 0 {
|
if len(faults) > 0 {
|
||||||
params := &actors.DeclareFaultsParams{Faults: types.NewBitField()}
|
params := &actors.DeclareFaultsParams{Faults: types.NewBitField()}
|
||||||
|
|
||||||
for _, fault := range faults {
|
for _, fault := range faults {
|
||||||
log.Warnf("fault detected: sector %d: %s", fault.SectorID, fault.Err)
|
if _, ok := declaredFaults[fault.SectorID]; ok {
|
||||||
faultIDs = append(faultIDs, fault.SectorID)
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: omit already declared (with finality in mind though..)
|
log.Warnf("new fault detected: sector %d: %s", fault.SectorID, fault.Err)
|
||||||
|
declaredFaults[fault.SectorID] = struct{}{}
|
||||||
params.Faults.Set(fault.SectorID)
|
params.Faults.Set(fault.SectorID)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warnf("DECLARING %d FAULTS", len(faults))
|
pc, err := params.Faults.Count()
|
||||||
|
|
||||||
enc, aerr := actors.SerializeParams(params)
|
|
||||||
if aerr != nil {
|
|
||||||
return nil, xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := &types.Message{
|
|
||||||
To: s.actor,
|
|
||||||
From: s.worker,
|
|
||||||
Method: actors.MAMethods.DeclareFaults,
|
|
||||||
Params: enc,
|
|
||||||
Value: types.NewInt(0),
|
|
||||||
GasLimit: types.NewInt(10000000), // i dont know help
|
|
||||||
GasPrice: types.NewInt(1),
|
|
||||||
}
|
|
||||||
|
|
||||||
sm, err := s.api.MpoolPushMessage(ctx, msg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("pushing faults message to mpool: %w", err)
|
return nil, xerrors.Errorf("counting faults: %w", err)
|
||||||
}
|
}
|
||||||
|
if pc > 0 {
|
||||||
|
if err := s.declareFaults(ctx, pc, params); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rec, err := s.api.StateWaitMsg(ctx, sm.Cid())
|
faultIDs := make([]uint64, 0, len(declaredFaults))
|
||||||
if err != nil {
|
for fault := range declaredFaults {
|
||||||
return nil, xerrors.Errorf("waiting for declare faults: %w", err)
|
faultIDs = append(faultIDs, fault)
|
||||||
}
|
|
||||||
|
|
||||||
if rec.Receipt.ExitCode != 0 {
|
|
||||||
return nil, xerrors.Errorf("declare faults exit %d", rec.Receipt.ExitCode)
|
|
||||||
}
|
|
||||||
log.Infof("Faults declared successfully")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return faultIDs, nil
|
return faultIDs, nil
|
||||||
@ -211,5 +244,19 @@ func (s *FPoStScheduler) submitPost(ctx context.Context, proof *actors.SubmitFal
|
|||||||
|
|
||||||
log.Infof("Submitted fallback post: %s", sm.Cid())
|
log.Infof("Submitted fallback post: %s", sm.Cid())
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid())
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Receipt.ExitCode == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Errorf("Submitting fallback post %s failed: exit %d", sm.Cid(), rec.Receipt.ExitCode)
|
||||||
|
}()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -50,6 +50,7 @@ type storageMinerApi interface {
|
|||||||
StateGetActor(ctx context.Context, actor address.Address, ts *types.TipSet) (*types.Actor, error)
|
StateGetActor(ctx context.Context, actor address.Address, ts *types.TipSet) (*types.Actor, error)
|
||||||
StateGetReceipt(context.Context, cid.Cid, *types.TipSet) (*types.MessageReceipt, error)
|
StateGetReceipt(context.Context, cid.Cid, *types.TipSet) (*types.MessageReceipt, error)
|
||||||
StateMarketStorageDeal(context.Context, uint64, *types.TipSet) (*actors.OnChainDeal, error)
|
StateMarketStorageDeal(context.Context, uint64, *types.TipSet) (*actors.OnChainDeal, error)
|
||||||
|
StateMinerFaults(context.Context, address.Address, *types.TipSet) ([]uint64, error)
|
||||||
|
|
||||||
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error)
|
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error)
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-sectorbuilder"
|
"github.com/filecoin-project/go-sectorbuilder"
|
||||||
|
"github.com/filecoin-project/go-sectorbuilder/fs"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -60,7 +61,7 @@ func (sb *SBMock) RateLimit() func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *SBMock) AddPiece(size uint64, sectorId uint64, r io.Reader, existingPieces []uint64) (sectorbuilder.PublicPieceInfo, error) {
|
func (sb *SBMock) AddPiece(ctx context.Context, size uint64, sectorId uint64, r io.Reader, existingPieces []uint64) (sectorbuilder.PublicPieceInfo, error) {
|
||||||
sb.lk.Lock()
|
sb.lk.Lock()
|
||||||
ss, ok := sb.sectors[sectorId]
|
ss, ok := sb.sectors[sectorId]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -284,7 +285,7 @@ func (sb *SBMock) GenerateEPostCandidates(sectorInfo sectorbuilder.SortedPublicS
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *SBMock) ReadPieceFromSealedSector(sectorID uint64, offset uint64, size uint64, ticket []byte, commD []byte) (io.ReadCloser, error) {
|
func (sb *SBMock) ReadPieceFromSealedSector(ctx context.Context, sectorID uint64, offset uint64, size uint64, ticket []byte, commD []byte) (io.ReadCloser, error) {
|
||||||
if len(sb.sectors[sectorID].pieces) > 1 {
|
if len(sb.sectors[sectorID].pieces) > 1 {
|
||||||
panic("implme")
|
panic("implme")
|
||||||
}
|
}
|
||||||
@ -301,7 +302,7 @@ func (sb *SBMock) StageFakeData() (uint64, []sectorbuilder.PublicPieceInfo, erro
|
|||||||
buf := make([]byte, usize)
|
buf := make([]byte, usize)
|
||||||
rand.Read(buf)
|
rand.Read(buf)
|
||||||
|
|
||||||
pi, err := sb.AddPiece(usize, sid, bytes.NewReader(buf), nil)
|
pi, err := sb.AddPiece(context.TODO(), usize, sid, bytes.NewReader(buf), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, nil, err
|
return 0, nil, err
|
||||||
}
|
}
|
||||||
@ -309,6 +310,26 @@ func (sb *SBMock) StageFakeData() (uint64, []sectorbuilder.PublicPieceInfo, erro
|
|||||||
return sid, []sectorbuilder.PublicPieceInfo{pi}, nil
|
return sid, []sectorbuilder.PublicPieceInfo{pi}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sb *SBMock) FinalizeSector(context.Context, uint64) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SBMock) DropStaged(context.Context, uint64) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SBMock) SectorPath(typ fs.DataType, sectorID uint64) (fs.SectorPath, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SBMock) AllocSectorPath(typ fs.DataType, sectorID uint64, cache bool) (fs.SectorPath, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SBMock) ReleaseSector(fs.DataType, fs.SectorPath) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (m mockVerif) VerifyElectionPost(ctx context.Context, sectorSize uint64, sectorInfo sectorbuilder.SortedPublicSectorInfo, challengeSeed []byte, proof []byte, candidates []sectorbuilder.EPostCandidate, proverID address.Address) (bool, error) {
|
func (m mockVerif) VerifyElectionPost(ctx context.Context, sectorSize uint64, sectorInfo sectorbuilder.SortedPublicSectorInfo, challengeSeed []byte, proof []byte, candidates []sectorbuilder.EPostCandidate, proverID address.Address) (bool, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
@ -48,10 +48,14 @@ var fsmPlanners = []func(events []statemachine.Event, state *SectorInfo) error{
|
|||||||
),
|
),
|
||||||
api.Committing: planCommitting,
|
api.Committing: planCommitting,
|
||||||
api.CommitWait: planOne(
|
api.CommitWait: planOne(
|
||||||
on(SectorProving{}, api.Proving),
|
on(SectorProving{}, api.FinalizeSector),
|
||||||
on(SectorCommitFailed{}, api.CommitFailed),
|
on(SectorCommitFailed{}, api.CommitFailed),
|
||||||
),
|
),
|
||||||
|
|
||||||
|
api.FinalizeSector: planOne(
|
||||||
|
on(SectorFinalized{}, api.Proving),
|
||||||
|
),
|
||||||
|
|
||||||
api.Proving: planOne(
|
api.Proving: planOne(
|
||||||
on(SectorFaultReported{}, api.FaultReported),
|
on(SectorFaultReported{}, api.FaultReported),
|
||||||
on(SectorFaulty{}, api.Faulty),
|
on(SectorFaulty{}, api.Faulty),
|
||||||
@ -150,6 +154,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
|||||||
return m.handleCommitting, nil
|
return m.handleCommitting, nil
|
||||||
case api.CommitWait:
|
case api.CommitWait:
|
||||||
return m.handleCommitWait, nil
|
return m.handleCommitWait, nil
|
||||||
|
case api.FinalizeSector:
|
||||||
|
return m.handleFinalizeSector, nil
|
||||||
case api.Proving:
|
case api.Proving:
|
||||||
// TODO: track sector health / expiration
|
// TODO: track sector health / expiration
|
||||||
log.Infof("Proving sector %d", state.SectorID)
|
log.Infof("Proving sector %d", state.SectorID)
|
||||||
|
@ -120,6 +120,14 @@ type SectorProving struct{}
|
|||||||
|
|
||||||
func (evt SectorProving) apply(*SectorInfo) {}
|
func (evt SectorProving) apply(*SectorInfo) {}
|
||||||
|
|
||||||
|
type SectorFinalized struct{}
|
||||||
|
|
||||||
|
func (evt SectorFinalized) apply(*SectorInfo) {}
|
||||||
|
|
||||||
|
type SectorFinalizeFailed struct{ error }
|
||||||
|
|
||||||
|
func (evt SectorFinalizeFailed) apply(*SectorInfo) {}
|
||||||
|
|
||||||
// Failed state recovery
|
// Failed state recovery
|
||||||
|
|
||||||
type SectorRetrySeal struct{}
|
type SectorRetrySeal struct{}
|
||||||
|
@ -50,6 +50,9 @@ func TestHappyPath(t *testing.T) {
|
|||||||
require.Equal(m.t, m.state.State, api.CommitWait)
|
require.Equal(m.t, m.state.State, api.CommitWait)
|
||||||
|
|
||||||
m.planSingle(SectorProving{})
|
m.planSingle(SectorProving{})
|
||||||
|
require.Equal(m.t, m.state.State, api.FinalizeSector)
|
||||||
|
|
||||||
|
m.planSingle(SectorFinalized{})
|
||||||
require.Equal(m.t, m.state.State, api.Proving)
|
require.Equal(m.t, m.state.State, api.Proving)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,6 +84,9 @@ func TestSeedRevert(t *testing.T) {
|
|||||||
require.Equal(m.t, m.state.State, api.CommitWait)
|
require.Equal(m.t, m.state.State, api.CommitWait)
|
||||||
|
|
||||||
m.planSingle(SectorProving{})
|
m.planSingle(SectorProving{})
|
||||||
|
require.Equal(m.t, m.state.State, api.FinalizeSector)
|
||||||
|
|
||||||
|
m.planSingle(SectorFinalized{})
|
||||||
require.Equal(m.t, m.state.State, api.Proving)
|
require.Equal(m.t, m.state.State, api.Proving)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
"math/bits"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
@ -16,6 +17,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (m *Sealing) pledgeReader(size uint64, parts uint64) io.Reader {
|
func (m *Sealing) pledgeReader(size uint64, parts uint64) io.Reader {
|
||||||
|
parts = 1 << bits.Len64(parts) // round down to nearest power of 2
|
||||||
|
if size/parts < 127 {
|
||||||
|
parts = size / 127
|
||||||
|
}
|
||||||
|
|
||||||
piece := sectorbuilder.UserBytesForSectorSize((size/127 + size) / parts)
|
piece := sectorbuilder.UserBytesForSectorSize((size/127 + size) / parts)
|
||||||
|
|
||||||
readers := make([]io.Reader, parts)
|
readers := make([]io.Reader, parts)
|
||||||
@ -31,6 +37,8 @@ func (m *Sealing) pledgeSector(ctx context.Context, sectorID uint64, existingPie
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes)
|
||||||
|
|
||||||
deals := make([]actors.StorageDealProposal, len(sizes))
|
deals := make([]actors.StorageDealProposal, len(sizes))
|
||||||
for i, size := range sizes {
|
for i, size := range sizes {
|
||||||
commP, err := m.fastPledgeCommitment(size, uint64(runtime.NumCPU()))
|
commP, err := m.fastPledgeCommitment(size, uint64(runtime.NumCPU()))
|
||||||
@ -53,6 +61,8 @@ func (m *Sealing) pledgeSector(ctx context.Context, sectorID uint64, existingPie
|
|||||||
deals[i] = sdp
|
deals[i] = sdp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Publishing deals for %d", sectorID)
|
||||||
|
|
||||||
params, aerr := actors.SerializeParams(&actors.PublishStorageDealsParams{
|
params, aerr := actors.SerializeParams(&actors.PublishStorageDealsParams{
|
||||||
Deals: deals,
|
Deals: deals,
|
||||||
})
|
})
|
||||||
@ -72,7 +82,7 @@ func (m *Sealing) pledgeSector(ctx context.Context, sectorID uint64, existingPie
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r, err := m.api.StateWaitMsg(ctx, smsg.Cid())
|
r, err := m.api.StateWaitMsg(ctx, smsg.Cid()) // TODO: more finality
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -87,12 +97,13 @@ func (m *Sealing) pledgeSector(ctx context.Context, sectorID uint64, existingPie
|
|||||||
return nil, xerrors.New("got unexpected number of DealIDs from PublishStorageDeals")
|
return nil, xerrors.New("got unexpected number of DealIDs from PublishStorageDeals")
|
||||||
}
|
}
|
||||||
|
|
||||||
out := make([]Piece, len(sizes))
|
log.Infof("Deals for sector %d: %+v", sectorID, resp.DealIDs)
|
||||||
|
|
||||||
|
out := make([]Piece, len(sizes))
|
||||||
for i, size := range sizes {
|
for i, size := range sizes {
|
||||||
ppi, err := m.sb.AddPiece(size, sectorID, m.pledgeReader(size, uint64(runtime.NumCPU())), existingPieceSizes)
|
ppi, err := m.sb.AddPiece(ctx, size, sectorID, m.pledgeReader(size, uint64(runtime.NumCPU())), existingPieceSizes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, xerrors.Errorf("add piece: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
existingPieceSizes = append(existingPieceSizes, size)
|
existingPieceSizes = append(existingPieceSizes, size)
|
||||||
|
@ -112,7 +112,7 @@ func (m *Sealing) AllocatePiece(size uint64) (sectorID uint64, offset uint64, er
|
|||||||
func (m *Sealing) SealPiece(ctx context.Context, size uint64, r io.Reader, sectorID uint64, dealID uint64) error {
|
func (m *Sealing) SealPiece(ctx context.Context, size uint64, r io.Reader, sectorID uint64, dealID uint64) error {
|
||||||
log.Infof("Seal piece for deal %d", dealID)
|
log.Infof("Seal piece for deal %d", dealID)
|
||||||
|
|
||||||
ppi, err := m.sb.AddPiece(size, sectorID, r, []uint64{})
|
ppi, err := m.sb.AddPiece(ctx, size, sectorID, r, []uint64{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("adding piece to sector: %w", err)
|
return xerrors.Errorf("adding piece to sector: %w", err)
|
||||||
}
|
}
|
||||||
@ -121,6 +121,7 @@ func (m *Sealing) SealPiece(ctx context.Context, size uint64, r io.Reader, secto
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) newSector(ctx context.Context, sid uint64, dealID uint64, ppi sectorbuilder.PublicPieceInfo) error {
|
func (m *Sealing) newSector(ctx context.Context, sid uint64, dealID uint64, ppi sectorbuilder.PublicPieceInfo) error {
|
||||||
|
log.Infof("Start sealing %d", sid)
|
||||||
return m.sectors.Send(sid, SectorStart{
|
return m.sectors.Send(sid, SectorStart{
|
||||||
id: sid,
|
id: sid,
|
||||||
pieces: []Piece{
|
pieces: []Piece{
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
||||||
|
"github.com/filecoin-project/go-sectorbuilder/fs"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -232,6 +233,23 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo)
|
|||||||
return ctx.Send(SectorProving{})
|
return ctx.Send(SectorProving{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error {
|
||||||
|
// TODO: Maybe wait for some finality
|
||||||
|
|
||||||
|
if err := m.sb.FinalizeSector(ctx.Context(), sector.SectorID); err != nil {
|
||||||
|
if !xerrors.Is(err, fs.ErrNoSuitablePath) {
|
||||||
|
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
|
||||||
|
}
|
||||||
|
log.Warnf("finalize sector: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.sb.DropStaged(ctx.Context(), sector.SectorID); err != nil {
|
||||||
|
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("drop staged: %w", err)})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.Send(SectorFinalized{})
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Sealing) handleFaulty(ctx statemachine.Context, sector SectorInfo) error {
|
func (m *Sealing) handleFaulty(ctx statemachine.Context, sector SectorInfo) error {
|
||||||
// TODO: check if the fault has already been reported, and that this sector is even valid
|
// TODO: check if the fault has already been reported, and that this sector is even valid
|
||||||
|
|
||||||
|
@ -49,8 +49,11 @@ func fillersFromRem(toFill uint64) ([]uint64, error) {
|
|||||||
|
|
||||||
func (m *Sealing) fastPledgeCommitment(size uint64, parts uint64) (commP [sectorbuilder.CommLen]byte, err error) {
|
func (m *Sealing) fastPledgeCommitment(size uint64, parts uint64) (commP [sectorbuilder.CommLen]byte, err error) {
|
||||||
parts = 1 << bits.Len64(parts) // round down to nearest power of 2
|
parts = 1 << bits.Len64(parts) // round down to nearest power of 2
|
||||||
|
if size/parts < 127 {
|
||||||
|
parts = size / 127
|
||||||
|
}
|
||||||
|
|
||||||
piece := sectorbuilder.UserBytesForSectorSize((size + size / 127) / parts)
|
piece := sectorbuilder.UserBytesForSectorSize((size + size/127) / parts)
|
||||||
out := make([]sectorbuilder.PublicPieceInfo, parts)
|
out := make([]sectorbuilder.PublicPieceInfo, parts)
|
||||||
var lk sync.Mutex
|
var lk sync.Mutex
|
||||||
|
|
||||||
|
@ -52,4 +52,11 @@ func TestFastPledge(t *testing.T) {
|
|||||||
if _, err := s.fastPledgeCommitment(sectorbuilder.UserBytesForSectorSize(sz), 5); err != nil {
|
if _, err := s.fastPledgeCommitment(sectorbuilder.UserBytesForSectorSize(sz), 5); err != nil {
|
||||||
t.Fatalf("%+v", err)
|
t.Fatalf("%+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sz = uint64(1024)
|
||||||
|
|
||||||
|
s = Sealing{sb: sbmock.NewMockSectorBuilder(0, sz)}
|
||||||
|
if _, err := s.fastPledgeCommitment(sectorbuilder.UserBytesForSectorSize(sz), 64); err != nil {
|
||||||
|
t.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,6 +96,7 @@ func (s *SectorBlockStore) Get(c cid.Cid) (blocks.Block, error) {
|
|||||||
log.Infof("reading block %s from sector %d(+%d;%d)", c, best.SectorID, best.Offset, best.Size)
|
log.Infof("reading block %s from sector %d(+%d;%d)", c, best.SectorID, best.Offset, best.Size)
|
||||||
|
|
||||||
r, err := s.sectorBlocks.sb.ReadPieceFromSealedSector(
|
r, err := s.sectorBlocks.sb.ReadPieceFromSealedSector(
|
||||||
|
context.TODO(),
|
||||||
best.SectorID,
|
best.SectorID,
|
||||||
best.Offset,
|
best.Offset,
|
||||||
best.Size,
|
best.Size,
|
||||||
|
Loading…
Reference in New Issue
Block a user