lpseal: MoveStorage

This commit is contained in:
Łukasz Magiera 2024-01-30 20:05:47 +01:00
parent 4649d3b8d0
commit d820e76beb
7 changed files with 194 additions and 2 deletions

View File

@ -88,6 +88,10 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
porepTask := lpseal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks)
activeTasks = append(activeTasks, porepTask)
}
if cfg.Subsystems.EnableMoveStorage {
moveStorageTask := lpseal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks)
activeTasks = append(activeTasks, moveStorageTask)
}
if cfg.Subsystems.EnableSendCommitMsg {
commitTask := lpseal.NewSubmitCommitTask(sp, db, full, sender, as, cfg.Fees.MaxCommitGasFee)
activeTasks = append(activeTasks, commitTask)

View File

@ -55,6 +55,10 @@ create table sectors_sdr_pipeline (
task_id_finalize bigint,
after_finalize bool not null default false,
-- MoveStorage (move data to storage)
task_id_move_storage bigint,
after_move_storage bool not null default false,
-- Commit message sending
commit_msg_cid text,

View File

@ -108,6 +108,8 @@ type ProviderSubsystemsConfig struct {
// This task handles encoding of unsealed data into last sdr layer and building
// of TreeR, TreeC and TreeD.
// In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
// Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
// which just remove unneeded tree data after PoRep is computed.
EnableSealSDRTrees bool
SealSDRTreesMaxTasks int
FinalizeMaxTasks int
@ -125,6 +127,12 @@ type ProviderSubsystemsConfig struct {
// from this lotus-provider instance.
EnableSendCommitMsg bool
// EnableMoveStorage enables the move-into-long-term-storage task to run
// on this lotus-provider instance. This tasks should only be enabled on
// nodes with long-term storage.
EnableMoveStorage bool
MoveStorageMaxTasks int
EnableWebGui bool
// The address that should listen for Web GUI requests.
GuiAddress string

View File

@ -288,9 +288,15 @@ func (sb *SealCalls) FinalizeSector(ctx context.Context, sector storiface.Sector
ssize, err := sector.ProofType.SectorSize()
// todo treed into unsealed
if err := ffi.ClearCache(uint64(ssize), paths.Cache); err != nil {
return xerrors.Errorf("clearing cache: %w", err)
}
return nil
}
func (sb *SealCalls) MoveStorage(ctx context.Context, sector storiface.SectorRef) error {
return sb.sectors.storage.MoveStorage(ctx, sector, storiface.FTCache|storiface.FTSealed)
}

View File

@ -26,6 +26,7 @@ const (
pollerPoRep
pollerCommitMsg
pollerFinalize
pollerMoveStorage
numPollers
)
@ -98,6 +99,9 @@ type pollTask struct {
TaskFinalize *int64 `db:"task_id_finalize"`
AfterFinalize bool `db:"after_finalize"`
TaskMoveStorage *int64 `db:"task_id_move_storage"`
AfterMoveStorage bool `db:"after_move_storage"`
TaskCommitMsg *int64 `db:"task_id_commit_msg"`
AfterCommitMsg bool `db:"after_commit_msg"`
@ -120,6 +124,7 @@ func (s *SealPoller) poll(ctx context.Context) error {
after_precommit_msg_success, seed_epoch,
task_id_porep, porep_proof, after_porep,
task_id_finalize, after_finalize,
task_id_move_storage, after_move_storage,
task_id_commit_msg, after_commit_msg,
after_commit_msg_success,
failed, failed_reason
@ -217,6 +222,22 @@ func (s *SealPoller) pollStartFinalize(ctx context.Context, task pollTask, ts *t
}
}
func (s *SealPoller) pollStartMoveStorage(ctx context.Context, task pollTask) {
if s.pollers[pollerMoveStorage].IsSet() && task.AfterFinalize && task.TaskMoveStorage == nil {
s.pollers[pollerMoveStorage].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_move_storage = $1 WHERE sp_id = $2 AND sector_number = $3 and task_id_move_storage is null`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (s *SealPoller) mustPoll(err error) {
if err != nil {
log.Errorw("poller operation failed", "error", err)

View File

@ -65,8 +65,6 @@ func (f *FinalizeTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (do
}
func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
// query:
var tasks []struct {
TaskID harmonytask.TaskID `db:"task_id_finalize"`
SpID int64 `db:"sp_id"`
@ -93,7 +91,17 @@ func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.T
return nil, xerrors.Errorf("getting local storage: %w", err)
}
acceptables := map[harmonytask.TaskID]bool{}
for _, t := range ids {
acceptables[t] = true
}
for _, t := range tasks {
if _, ok := acceptables[t.TaskID]; !ok {
continue
}
for _, l := range ls {
if string(l.ID) == t.StorageID {
return &t.TaskID, nil

View File

@ -0,0 +1,141 @@
package lpseal
import (
"context"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
"github.com/filecoin-project/lotus/lib/harmony/resources"
"github.com/filecoin-project/lotus/provider/lpffi"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
"golang.org/x/xerrors"
)
type MoveStorageTask struct {
sp *SealPoller
sc *lpffi.SealCalls
db *harmonydb.DB
max int
}
func NewMoveStorageTask(sp *SealPoller, sc *lpffi.SealCalls, db *harmonydb.DB, max int) *MoveStorageTask {
return &MoveStorageTask{
max: max,
sp: sp,
sc: sc,
db: db,
}
}
func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
var task struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof int64 `db:"reg_seal_proof"`
}
ctx := context.Background()
err = m.db.Select(ctx, &task, `
select sp_id, sector_number, reg_seal_proof from sectors_sdr_pipeline where task_id_move_storage=$1`, taskID)
if err != nil {
return false, xerrors.Errorf("getting task: %w", err)
}
sector := storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(task.SpID),
Number: abi.SectorNumber(task.SectorNumber),
},
ProofType: abi.RegisteredSealProof(task.RegSealProof),
}
err = m.sc.MoveStorage(ctx, sector)
if err != nil {
return false, xerrors.Errorf("moving storage: %w", err)
}
_, err = m.db.Exec(ctx, `update sectors_sdr_pipeline set after_move_storage=true where task_id_move_storage=$1`, taskID)
return true, nil
}
func (m *MoveStorageTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
ctx := context.Background()
/*
var tasks []struct {
TaskID harmonytask.TaskID `db:"task_id_finalize"`
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
StorageID string `db:"storage_id"`
}
err := m.db.Select(ctx, &tasks, `
select p.task_id_move_storage, p.sp_id, p.sector_number, l.storage_id from sectors_sdr_pipeline p
inner join sector_location l on p.sp_id=l.miner_id and p.sector_number=l.sector_num
where task_id_move_storage in ($1) and l.sector_filetype=4`, ids)
if err != nil {
return nil, xerrors.Errorf("getting tasks: %w", err)
}
ls, err := m.sc.LocalStorage(ctx)
if err != nil {
return nil, xerrors.Errorf("getting local storage: %w", err)
}
acceptables := map[harmonytask.TaskID]bool{}
for _, t := range ids {
acceptables[t] = true
}
for _, t := range tasks {
}
todo some smarts
* yield a schedule cycle/s if we have moves already in progress
*/
////
ls, err := m.sc.LocalStorage(ctx)
if err != nil {
return nil, xerrors.Errorf("getting local storage: %w", err)
}
var haveStorage bool
for _, l := range ls {
if l.CanSeal {
haveStorage = true
break
}
}
if !haveStorage {
return nil, nil
}
id := ids[0]
return &id, nil
}
func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails {
return harmonytask.TaskTypeDetails{
Max: m.max,
Name: "MoveStorage",
Cost: resources.Resources{
Cpu: 1,
Gpu: 0,
Ram: 128 << 20,
},
MaxFailures: 10,
}
}
func (m *MoveStorageTask) Adder(taskFunc harmonytask.AddTaskFunc) {
m.sp.pollers[pollerMoveStorage].Set(taskFunc)
}
var _ harmonytask.TaskInterface = &MoveStorageTask{}