2024-03-15 21:38:13 +00:00
|
|
|
// Package tasks contains tasks that can be run by the curio command.
|
2023-12-05 04:30:40 +00:00
|
|
|
package tasks
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2024-04-16 14:30:27 +00:00
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"time"
|
2023-12-05 04:30:40 +00:00
|
|
|
|
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2023-12-15 00:13:44 +00:00
|
|
|
"github.com/samber/lo"
|
2024-04-16 14:30:27 +00:00
|
|
|
"golang.org/x/exp/maps"
|
2024-02-23 10:53:49 +00:00
|
|
|
"golang.org/x/xerrors"
|
2023-12-05 04:30:40 +00:00
|
|
|
|
2024-04-16 00:29:56 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
|
|
|
|
2024-03-15 21:38:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/cmd/curio/deps"
|
|
|
|
curio "github.com/filecoin-project/lotus/curiosrc"
|
|
|
|
"github.com/filecoin-project/lotus/curiosrc/chainsched"
|
|
|
|
"github.com/filecoin-project/lotus/curiosrc/ffi"
|
2024-04-16 21:34:48 +00:00
|
|
|
"github.com/filecoin-project/lotus/curiosrc/gc"
|
2024-03-15 21:38:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/curiosrc/message"
|
2024-03-17 16:30:57 +00:00
|
|
|
"github.com/filecoin-project/lotus/curiosrc/piece"
|
2024-03-15 21:38:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/curiosrc/seal"
|
|
|
|
"github.com/filecoin-project/lotus/curiosrc/winning"
|
2023-12-05 04:30:40 +00:00
|
|
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
2024-02-28 20:50:12 +00:00
|
|
|
"github.com/filecoin-project/lotus/lib/lazy"
|
|
|
|
"github.com/filecoin-project/lotus/lib/must"
|
2024-02-23 10:53:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/modules"
|
2024-04-16 14:30:27 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
2023-12-05 04:30:40 +00:00
|
|
|
)
|
|
|
|
|
2024-03-15 21:38:13 +00:00
|
|
|
var log = logging.Logger("curio/deps")
|
2023-12-05 04:30:40 +00:00
|
|
|
|
|
|
|
func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.TaskEngine, error) {
|
|
|
|
cfg := dependencies.Cfg
|
|
|
|
db := dependencies.DB
|
|
|
|
full := dependencies.Full
|
|
|
|
verif := dependencies.Verif
|
|
|
|
lw := dependencies.LW
|
|
|
|
as := dependencies.As
|
|
|
|
maddrs := dependencies.Maddrs
|
|
|
|
stor := dependencies.Stor
|
2023-12-19 11:16:38 +00:00
|
|
|
lstor := dependencies.LocalStore
|
2023-12-05 04:30:40 +00:00
|
|
|
si := dependencies.Si
|
|
|
|
var activeTasks []harmonytask.TaskInterface
|
|
|
|
|
2024-03-15 21:38:13 +00:00
|
|
|
sender, sendTask := message.NewSender(full, full, db)
|
2023-12-05 04:30:40 +00:00
|
|
|
activeTasks = append(activeTasks, sendTask)
|
|
|
|
|
2024-01-05 15:10:34 +00:00
|
|
|
chainSched := chainsched.New(full)
|
|
|
|
|
2024-02-23 10:53:49 +00:00
|
|
|
var needProofParams bool
|
|
|
|
|
2023-12-05 04:30:40 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////
|
|
|
|
///// Task Selection
|
|
|
|
///////////////////////////////////////////////////////////////////////
|
|
|
|
{
|
2023-12-19 11:16:38 +00:00
|
|
|
// PoSt
|
2023-12-05 04:30:40 +00:00
|
|
|
|
|
|
|
if cfg.Subsystems.EnableWindowPost {
|
2024-03-15 21:38:13 +00:00
|
|
|
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := curio.WindowPostScheduler(
|
|
|
|
ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender, chainSched,
|
|
|
|
as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks)
|
|
|
|
|
2023-12-05 04:30:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask)
|
2024-02-23 10:53:49 +00:00
|
|
|
needProofParams = true
|
2023-12-05 04:30:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.Subsystems.EnableWinningPost {
|
2024-03-15 21:38:13 +00:00
|
|
|
winPoStTask := winning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, lw, verif, full, maddrs)
|
2023-12-05 04:30:40 +00:00
|
|
|
activeTasks = append(activeTasks, winPoStTask)
|
2024-02-23 10:53:49 +00:00
|
|
|
needProofParams = true
|
2023-12-05 04:30:40 +00:00
|
|
|
}
|
|
|
|
}
|
2023-12-19 11:16:38 +00:00
|
|
|
|
2024-03-17 16:30:57 +00:00
|
|
|
slrLazy := lazy.MakeLazy(func() (*ffi.SealCalls, error) {
|
|
|
|
return ffi.NewSealCalls(stor, lstor, si), nil
|
2024-02-28 19:57:12 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
{
|
|
|
|
// Piece handling
|
|
|
|
if cfg.Subsystems.EnableParkPiece {
|
2024-04-04 14:28:16 +00:00
|
|
|
parkPieceTask := piece.NewParkPieceTask(db, must.One(slrLazy.Val()), cfg.Subsystems.ParkPieceMaxTasks)
|
2024-03-17 16:30:57 +00:00
|
|
|
cleanupPieceTask := piece.NewCleanupPieceTask(db, must.One(slrLazy.Val()), 0)
|
2024-03-11 20:35:16 +00:00
|
|
|
activeTasks = append(activeTasks, parkPieceTask, cleanupPieceTask)
|
2024-02-28 19:57:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-14 13:26:55 +00:00
|
|
|
hasAnySealingTask := cfg.Subsystems.EnableSealSDR ||
|
|
|
|
cfg.Subsystems.EnableSealSDRTrees ||
|
|
|
|
cfg.Subsystems.EnableSendPrecommitMsg ||
|
|
|
|
cfg.Subsystems.EnablePoRepProof ||
|
|
|
|
cfg.Subsystems.EnableMoveStorage ||
|
|
|
|
cfg.Subsystems.EnableSendCommitMsg
|
2023-12-19 11:16:38 +00:00
|
|
|
{
|
|
|
|
// Sealing
|
|
|
|
|
2024-03-15 21:38:13 +00:00
|
|
|
var sp *seal.SealPoller
|
|
|
|
var slr *ffi.SealCalls
|
2023-12-19 11:16:38 +00:00
|
|
|
if hasAnySealingTask {
|
2024-03-15 21:38:13 +00:00
|
|
|
sp = seal.NewPoller(db, full)
|
2023-12-19 11:16:38 +00:00
|
|
|
go sp.RunPoller(ctx)
|
|
|
|
|
2024-02-28 19:57:12 +00:00
|
|
|
slr = must.One(slrLazy.Val())
|
2023-12-19 11:16:38 +00:00
|
|
|
}
|
|
|
|
|
2024-02-12 13:08:38 +00:00
|
|
|
// NOTE: Tasks with the LEAST priority are at the top
|
2023-12-19 11:16:38 +00:00
|
|
|
if cfg.Subsystems.EnableSealSDR {
|
2024-03-15 21:38:13 +00:00
|
|
|
sdrTask := seal.NewSDRTask(full, db, sp, slr, cfg.Subsystems.SealSDRMaxTasks)
|
2023-12-19 11:16:38 +00:00
|
|
|
activeTasks = append(activeTasks, sdrTask)
|
|
|
|
}
|
2023-12-20 13:45:19 +00:00
|
|
|
if cfg.Subsystems.EnableSealSDRTrees {
|
2024-05-02 09:17:33 +00:00
|
|
|
treeDTask := seal.NewTreeDTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks)
|
|
|
|
treeRCTask := seal.NewTreeRCTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks)
|
2024-03-15 21:38:13 +00:00
|
|
|
finalizeTask := seal.NewFinalizeTask(cfg.Subsystems.FinalizeMaxTasks, sp, slr, db)
|
2024-05-02 09:17:33 +00:00
|
|
|
activeTasks = append(activeTasks, treeDTask, treeRCTask, finalizeTask)
|
2023-12-20 13:45:19 +00:00
|
|
|
}
|
2024-01-05 15:10:34 +00:00
|
|
|
if cfg.Subsystems.EnableSendPrecommitMsg {
|
2024-03-15 21:38:13 +00:00
|
|
|
precommitTask := seal.NewSubmitPrecommitTask(sp, db, full, sender, as, cfg.Fees.MaxPreCommitGasFee)
|
2024-01-05 15:10:34 +00:00
|
|
|
activeTasks = append(activeTasks, precommitTask)
|
|
|
|
}
|
2024-01-11 15:47:07 +00:00
|
|
|
if cfg.Subsystems.EnablePoRepProof {
|
2024-03-15 21:38:13 +00:00
|
|
|
porepTask := seal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks)
|
2024-01-11 15:47:07 +00:00
|
|
|
activeTasks = append(activeTasks, porepTask)
|
2024-02-23 10:53:49 +00:00
|
|
|
needProofParams = true
|
2024-01-11 15:47:07 +00:00
|
|
|
}
|
2024-01-30 19:05:47 +00:00
|
|
|
if cfg.Subsystems.EnableMoveStorage {
|
2024-03-15 21:38:13 +00:00
|
|
|
moveStorageTask := seal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks)
|
2024-01-30 19:05:47 +00:00
|
|
|
activeTasks = append(activeTasks, moveStorageTask)
|
|
|
|
}
|
2024-01-11 18:24:28 +00:00
|
|
|
if cfg.Subsystems.EnableSendCommitMsg {
|
2024-03-15 21:38:13 +00:00
|
|
|
commitTask := seal.NewSubmitCommitTask(sp, db, full, sender, as, cfg.Fees.MaxCommitGasFee)
|
2024-01-11 18:24:28 +00:00
|
|
|
activeTasks = append(activeTasks, commitTask)
|
|
|
|
}
|
2023-12-19 11:16:38 +00:00
|
|
|
}
|
2024-02-23 10:53:49 +00:00
|
|
|
|
2024-04-16 21:34:48 +00:00
|
|
|
if hasAnySealingTask {
|
|
|
|
// Sealing nodes maintain storage index when bored
|
|
|
|
storageEndpointGcTask := gc.NewStorageEndpointGC(si, stor, db)
|
|
|
|
activeTasks = append(activeTasks, storageEndpointGcTask)
|
|
|
|
}
|
|
|
|
|
2024-02-23 10:53:49 +00:00
|
|
|
if needProofParams {
|
|
|
|
for spt := range dependencies.ProofTypes {
|
|
|
|
if err := modules.GetParams(true)(spt); err != nil {
|
|
|
|
return nil, xerrors.Errorf("getting params: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-16 00:29:56 +00:00
|
|
|
minerAddresses := make([]string, 0, len(maddrs))
|
|
|
|
for k := range maddrs {
|
|
|
|
minerAddresses = append(minerAddresses, address.Address(k).String())
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infow("This Curio instance handles",
|
|
|
|
"miner_addresses", minerAddresses,
|
2023-12-05 04:30:40 +00:00
|
|
|
"tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name }))
|
|
|
|
|
2024-02-12 13:08:38 +00:00
|
|
|
// harmony treats the first task as highest priority, so reverse the order
|
|
|
|
// (we could have just appended to this list in the reverse order, but defining
|
|
|
|
// tasks in pipeline order is more intuitive)
|
|
|
|
activeTasks = lo.Reverse(activeTasks)
|
|
|
|
|
2024-01-05 15:10:34 +00:00
|
|
|
ht, err := harmonytask.New(db, activeTasks, dependencies.ListenAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-04-16 14:30:27 +00:00
|
|
|
go machineDetails(dependencies, activeTasks, ht.ResourcesAvailable().MachineID)
|
2024-01-05 15:10:34 +00:00
|
|
|
|
|
|
|
if hasAnySealingTask {
|
2024-03-15 21:38:13 +00:00
|
|
|
watcher, err := message.NewMessageWatcher(db, ht, chainSched, full)
|
2024-01-05 15:10:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
_ = watcher
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.Subsystems.EnableWindowPost || hasAnySealingTask {
|
|
|
|
go chainSched.Run(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ht, nil
|
2023-12-05 04:30:40 +00:00
|
|
|
}
|
2024-04-16 14:30:27 +00:00
|
|
|
|
|
|
|
func machineDetails(deps *deps.Deps, activeTasks []harmonytask.TaskInterface, machineID int) {
|
|
|
|
taskNames := lo.Map(activeTasks, func(item harmonytask.TaskInterface, _ int) string {
|
|
|
|
return item.TypeDetails().Name
|
|
|
|
})
|
|
|
|
|
|
|
|
miners := lo.Map(maps.Keys(deps.Maddrs), func(item dtypes.MinerAddress, _ int) string {
|
|
|
|
return address.Address(item).String()
|
|
|
|
})
|
|
|
|
sort.Strings(miners)
|
|
|
|
|
|
|
|
_, err := deps.DB.Exec(context.Background(), `INSERT INTO harmony_machine_details
|
|
|
|
(tasks, layers, startup_time, miners, machine_id) VALUES ($1, $2, $3, $4, $5)
|
|
|
|
ON CONFLICT (machine_id) DO UPDATE SET tasks=$1, layers=$2, startup_time=$3, miners=$4`,
|
|
|
|
strings.Join(taskNames, ","), strings.Join(deps.Layers, ","),
|
|
|
|
time.Now(), strings.Join(miners, ","), machineID)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to update machine details: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// maybePostWarning
|
|
|
|
if !lo.Contains(taskNames, "WdPost") && !lo.Contains(taskNames, "WinPost") {
|
|
|
|
// Maybe we aren't running a PoSt for these miners?
|
|
|
|
var allMachines []struct {
|
|
|
|
MachineID int `db:"machine_id"`
|
|
|
|
Miners string `db:"miners"`
|
|
|
|
Tasks string `db:"tasks"`
|
|
|
|
}
|
|
|
|
err := deps.DB.Select(context.Background(), &allMachines, `SELECT machine_id, miners, tasks FROM harmony_machine_details`)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to get machine details: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, miner := range miners {
|
|
|
|
var myPostIsHandled bool
|
|
|
|
for _, m := range allMachines {
|
|
|
|
if !lo.Contains(strings.Split(m.Miners, ","), miner) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if lo.Contains(strings.Split(m.Tasks, ","), "WdPost") && lo.Contains(strings.Split(m.Tasks, ","), "WinPost") {
|
|
|
|
myPostIsHandled = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !myPostIsHandled {
|
|
|
|
log.Errorf("No PoSt tasks are running for miner %s. Start handling PoSts immediately with:\n\tcurio run --layers=\"post\" ", miner)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|