lpseal: MoveStorage
This commit is contained in:
parent
4649d3b8d0
commit
d820e76beb
@ -88,6 +88,10 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
|||||||
porepTask := lpseal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks)
|
porepTask := lpseal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks)
|
||||||
activeTasks = append(activeTasks, porepTask)
|
activeTasks = append(activeTasks, porepTask)
|
||||||
}
|
}
|
||||||
|
if cfg.Subsystems.EnableMoveStorage {
|
||||||
|
moveStorageTask := lpseal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks)
|
||||||
|
activeTasks = append(activeTasks, moveStorageTask)
|
||||||
|
}
|
||||||
if cfg.Subsystems.EnableSendCommitMsg {
|
if cfg.Subsystems.EnableSendCommitMsg {
|
||||||
commitTask := lpseal.NewSubmitCommitTask(sp, db, full, sender, as, cfg.Fees.MaxCommitGasFee)
|
commitTask := lpseal.NewSubmitCommitTask(sp, db, full, sender, as, cfg.Fees.MaxCommitGasFee)
|
||||||
activeTasks = append(activeTasks, commitTask)
|
activeTasks = append(activeTasks, commitTask)
|
||||||
|
@ -55,6 +55,10 @@ create table sectors_sdr_pipeline (
|
|||||||
task_id_finalize bigint,
|
task_id_finalize bigint,
|
||||||
after_finalize bool not null default false,
|
after_finalize bool not null default false,
|
||||||
|
|
||||||
|
-- MoveStorage (move data to storage)
|
||||||
|
task_id_move_storage bigint,
|
||||||
|
after_move_storage bool not null default false,
|
||||||
|
|
||||||
-- Commit message sending
|
-- Commit message sending
|
||||||
commit_msg_cid text,
|
commit_msg_cid text,
|
||||||
|
|
||||||
|
@ -108,6 +108,8 @@ type ProviderSubsystemsConfig struct {
|
|||||||
// This task handles encoding of unsealed data into last sdr layer and building
|
// This task handles encoding of unsealed data into last sdr layer and building
|
||||||
// of TreeR, TreeC and TreeD.
|
// of TreeR, TreeC and TreeD.
|
||||||
// In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
// In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
||||||
|
// Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
|
||||||
|
// which just remove unneeded tree data after PoRep is computed.
|
||||||
EnableSealSDRTrees bool
|
EnableSealSDRTrees bool
|
||||||
SealSDRTreesMaxTasks int
|
SealSDRTreesMaxTasks int
|
||||||
FinalizeMaxTasks int
|
FinalizeMaxTasks int
|
||||||
@ -125,6 +127,12 @@ type ProviderSubsystemsConfig struct {
|
|||||||
// from this lotus-provider instance.
|
// from this lotus-provider instance.
|
||||||
EnableSendCommitMsg bool
|
EnableSendCommitMsg bool
|
||||||
|
|
||||||
|
// EnableMoveStorage enables the move-into-long-term-storage task to run
|
||||||
|
// on this lotus-provider instance. This tasks should only be enabled on
|
||||||
|
// nodes with long-term storage.
|
||||||
|
EnableMoveStorage bool
|
||||||
|
MoveStorageMaxTasks int
|
||||||
|
|
||||||
EnableWebGui bool
|
EnableWebGui bool
|
||||||
// The address that should listen for Web GUI requests.
|
// The address that should listen for Web GUI requests.
|
||||||
GuiAddress string
|
GuiAddress string
|
||||||
|
@ -288,9 +288,15 @@ func (sb *SealCalls) FinalizeSector(ctx context.Context, sector storiface.Sector
|
|||||||
|
|
||||||
ssize, err := sector.ProofType.SectorSize()
|
ssize, err := sector.ProofType.SectorSize()
|
||||||
|
|
||||||
|
// todo treed into unsealed
|
||||||
|
|
||||||
if err := ffi.ClearCache(uint64(ssize), paths.Cache); err != nil {
|
if err := ffi.ClearCache(uint64(ssize), paths.Cache); err != nil {
|
||||||
return xerrors.Errorf("clearing cache: %w", err)
|
return xerrors.Errorf("clearing cache: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) MoveStorage(ctx context.Context, sector storiface.SectorRef) error {
|
||||||
|
return sb.sectors.storage.MoveStorage(ctx, sector, storiface.FTCache|storiface.FTSealed)
|
||||||
|
}
|
||||||
|
@ -26,6 +26,7 @@ const (
|
|||||||
pollerPoRep
|
pollerPoRep
|
||||||
pollerCommitMsg
|
pollerCommitMsg
|
||||||
pollerFinalize
|
pollerFinalize
|
||||||
|
pollerMoveStorage
|
||||||
|
|
||||||
numPollers
|
numPollers
|
||||||
)
|
)
|
||||||
@ -98,6 +99,9 @@ type pollTask struct {
|
|||||||
TaskFinalize *int64 `db:"task_id_finalize"`
|
TaskFinalize *int64 `db:"task_id_finalize"`
|
||||||
AfterFinalize bool `db:"after_finalize"`
|
AfterFinalize bool `db:"after_finalize"`
|
||||||
|
|
||||||
|
TaskMoveStorage *int64 `db:"task_id_move_storage"`
|
||||||
|
AfterMoveStorage bool `db:"after_move_storage"`
|
||||||
|
|
||||||
TaskCommitMsg *int64 `db:"task_id_commit_msg"`
|
TaskCommitMsg *int64 `db:"task_id_commit_msg"`
|
||||||
AfterCommitMsg bool `db:"after_commit_msg"`
|
AfterCommitMsg bool `db:"after_commit_msg"`
|
||||||
|
|
||||||
@ -120,6 +124,7 @@ func (s *SealPoller) poll(ctx context.Context) error {
|
|||||||
after_precommit_msg_success, seed_epoch,
|
after_precommit_msg_success, seed_epoch,
|
||||||
task_id_porep, porep_proof, after_porep,
|
task_id_porep, porep_proof, after_porep,
|
||||||
task_id_finalize, after_finalize,
|
task_id_finalize, after_finalize,
|
||||||
|
task_id_move_storage, after_move_storage,
|
||||||
task_id_commit_msg, after_commit_msg,
|
task_id_commit_msg, after_commit_msg,
|
||||||
after_commit_msg_success,
|
after_commit_msg_success,
|
||||||
failed, failed_reason
|
failed, failed_reason
|
||||||
@ -217,6 +222,22 @@ func (s *SealPoller) pollStartFinalize(ctx context.Context, task pollTask, ts *t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartMoveStorage(ctx context.Context, task pollTask) {
|
||||||
|
if s.pollers[pollerMoveStorage].IsSet() && task.AfterFinalize && task.TaskMoveStorage == nil {
|
||||||
|
s.pollers[pollerMoveStorage].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_move_storage = $1 WHERE sp_id = $2 AND sector_number = $3 and task_id_move_storage is null`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *SealPoller) mustPoll(err error) {
|
func (s *SealPoller) mustPoll(err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorw("poller operation failed", "error", err)
|
log.Errorw("poller operation failed", "error", err)
|
||||||
|
@ -65,8 +65,6 @@ func (f *FinalizeTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (do
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
// query:
|
|
||||||
|
|
||||||
var tasks []struct {
|
var tasks []struct {
|
||||||
TaskID harmonytask.TaskID `db:"task_id_finalize"`
|
TaskID harmonytask.TaskID `db:"task_id_finalize"`
|
||||||
SpID int64 `db:"sp_id"`
|
SpID int64 `db:"sp_id"`
|
||||||
@ -93,7 +91,17 @@ func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.T
|
|||||||
return nil, xerrors.Errorf("getting local storage: %w", err)
|
return nil, xerrors.Errorf("getting local storage: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
acceptables := map[harmonytask.TaskID]bool{}
|
||||||
|
|
||||||
|
for _, t := range ids {
|
||||||
|
acceptables[t] = true
|
||||||
|
}
|
||||||
|
|
||||||
for _, t := range tasks {
|
for _, t := range tasks {
|
||||||
|
if _, ok := acceptables[t.TaskID]; !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
for _, l := range ls {
|
for _, l := range ls {
|
||||||
if string(l.ID) == t.StorageID {
|
if string(l.ID) == t.StorageID {
|
||||||
return &t.TaskID, nil
|
return &t.TaskID, nil
|
||||||
|
141
provider/lpseal/task_movestorage.go
Normal file
141
provider/lpseal/task_movestorage.go
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MoveStorageTask struct {
|
||||||
|
sp *SealPoller
|
||||||
|
sc *lpffi.SealCalls
|
||||||
|
db *harmonydb.DB
|
||||||
|
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMoveStorageTask(sp *SealPoller, sc *lpffi.SealCalls, db *harmonydb.DB, max int) *MoveStorageTask {
|
||||||
|
return &MoveStorageTask{
|
||||||
|
max: max,
|
||||||
|
sp: sp,
|
||||||
|
sc: sc,
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
var task struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
RegSealProof int64 `db:"reg_seal_proof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
err = m.db.Select(ctx, &task, `
|
||||||
|
select sp_id, sector_number, reg_seal_proof from sectors_sdr_pipeline where task_id_move_storage=$1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting task: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sector := storiface.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: abi.ActorID(task.SpID),
|
||||||
|
Number: abi.SectorNumber(task.SectorNumber),
|
||||||
|
},
|
||||||
|
ProofType: abi.RegisteredSealProof(task.RegSealProof),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.sc.MoveStorage(ctx, sector)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("moving storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = m.db.Exec(ctx, `update sectors_sdr_pipeline set after_move_storage=true where task_id_move_storage=$1`, taskID)
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
/*
|
||||||
|
|
||||||
|
var tasks []struct {
|
||||||
|
TaskID harmonytask.TaskID `db:"task_id_finalize"`
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
StorageID string `db:"storage_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err := m.db.Select(ctx, &tasks, `
|
||||||
|
select p.task_id_move_storage, p.sp_id, p.sector_number, l.storage_id from sectors_sdr_pipeline p
|
||||||
|
inner join sector_location l on p.sp_id=l.miner_id and p.sector_number=l.sector_num
|
||||||
|
where task_id_move_storage in ($1) and l.sector_filetype=4`, ids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting tasks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := m.sc.LocalStorage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting local storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
acceptables := map[harmonytask.TaskID]bool{}
|
||||||
|
|
||||||
|
for _, t := range ids {
|
||||||
|
acceptables[t] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range tasks {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
todo some smarts
|
||||||
|
* yield a schedule cycle/s if we have moves already in progress
|
||||||
|
*/
|
||||||
|
|
||||||
|
////
|
||||||
|
ls, err := m.sc.LocalStorage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting local storage: %w", err)
|
||||||
|
}
|
||||||
|
var haveStorage bool
|
||||||
|
for _, l := range ls {
|
||||||
|
if l.CanSeal {
|
||||||
|
haveStorage = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveStorage {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
id := ids[0]
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
return harmonytask.TaskTypeDetails{
|
||||||
|
Max: m.max,
|
||||||
|
Name: "MoveStorage",
|
||||||
|
Cost: resources.Resources{
|
||||||
|
Cpu: 1,
|
||||||
|
Gpu: 0,
|
||||||
|
Ram: 128 << 20,
|
||||||
|
},
|
||||||
|
MaxFailures: 10,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
m.sp.pollers[pollerMoveStorage].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &MoveStorageTask{}
|
Loading…
Reference in New Issue
Block a user