lp runs. todo ds-sectorInfo move & cleanups

This commit is contained in:
Andrew Jackson (Ajax) 2023-10-24 19:26:13 -05:00
parent e481e1196f
commit 43680400c3
6 changed files with 159 additions and 48 deletions

View File

@ -1,7 +1,6 @@
package main
import (
"context"
"fmt"
"net"
"net/http"
@ -18,6 +17,7 @@ import (
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-statestore"
@ -34,6 +34,7 @@ import (
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer"
@ -74,6 +75,11 @@ var runCmd = &cli.Command{
Usage: "list of layers to be interpreted (atop defaults). Default: base",
Value: cli.NewStringSlice("base"),
},
&cli.StringFlag{
Name: "storage-json",
Usage: "path to json file containing storage config",
Value: "~/.lotus/storage.json",
},
},
Action: func(cctx *cli.Context) (err error) {
defer func() {
@ -131,7 +137,7 @@ var runCmd = &cli.Command{
if err := r.Init(repo.Provider); err != nil {
return err
}
/*
lr, err := r.Lock(repo.Provider)
if err != nil {
return err
@ -155,6 +161,7 @@ var runCmd = &cli.Command{
if err := lr.Close(); err != nil {
return fmt.Errorf("close repo: %w", err)
}
*/
}
db, err := makeDB(cctx)
@ -174,15 +181,15 @@ var runCmd = &cli.Command{
*/
const unspecifiedAddress = "0.0.0.0"
address := cctx.String("listen")
addressSlice := strings.Split(address, ":")
listenAddr := cctx.String("listen")
addressSlice := strings.Split(listenAddr, ":")
if ip := net.ParseIP(addressSlice[0]); ip != nil {
if ip.String() == unspecifiedAddress {
rip, err := db.GetRoutableIP()
if err != nil {
return err
}
address = rip + ":" + addressSlice[1]
listenAddr = rip + ":" + addressSlice[1]
}
}
@ -195,10 +202,12 @@ var runCmd = &cli.Command{
log.Error("closing repo", err)
}
}()
if err := lr.SetAPIToken([]byte(address)); err != nil { // our assigned listen address is our unique token
if err := lr.SetAPIToken([]byte(listenAddr)); err != nil { // our assigned listen address is our unique token
return xerrors.Errorf("setting api token: %w", err)
}
localStore, err := paths.NewLocal(ctx, lr, nil, []string{"http://" + address + "/remote"})
localStore, err := paths.NewLocal(ctx, &paths.BasicLocalStorage{
PathToJSON: cctx.String("storage-json"),
}, nil, []string{"http://" + listenAddr + "/remote"})
if err != nil {
return err
}
@ -212,7 +221,7 @@ var runCmd = &cli.Command{
var verif storiface.Verifier = ffiwrapper.ProofVerifier
as, err := modules.AddressSelector(&cfg.Addresses)()
as, err := modules.LotusProvderAddressSelector(&cfg.Addresses)()
if err != nil {
return err
}
@ -257,25 +266,30 @@ var runCmd = &cli.Command{
return err
}
ds, dsCloser, err := modules.DatastoreV2(ctx, false, lr)
//ds, dsCloser, err := modules.DatastoreV2(ctx, false, lr)
if err != nil {
return err
}
defer dsCloser()
maddr, err := modules.MinerAddress(ds)
//defer dsCloser()
var maddrs []dtypes.MinerAddress
for _, s := range cfg.Addresses.MinerAddresses {
addr, err := address.NewFromString(s)
if err != nil {
return err
}
maddrs = append(maddrs, dtypes.MinerAddress(addr))
}
if cfg.Subsystems.EnableWindowPost {
wdPostTask, err := modules.WindowPostSchedulerV2(ctx, cfg.Fees, cfg.Proving, full, sealer, verif, j,
as, maddr, db, cfg.Subsystems.WindowPostMaxTasks)
as, maddrs, db, cfg.Subsystems.WindowPostMaxTasks)
if err != nil {
return err
}
activeTasks = append(activeTasks, wdPostTask)
}
taskEngine, err := harmonytask.New(db, activeTasks, address)
taskEngine, err := harmonytask.New(db, activeTasks, listenAddr)
if err != nil {
return err
}

View File

@ -369,7 +369,7 @@ func DefaultLotusProvider() *LotusProviderConfig {
MaxWindowPoStGasFee: types.MustParseFIL("5"),
MaxPublishDealsFee: types.MustParseFIL("0.05"),
},
Addresses: MinerAddressConfig{
Addresses: LotusProviderAddresses{
PreCommitControl: []string{},
CommitControl: []string{},
TerminateControl: []string{},

View File

@ -70,7 +70,7 @@ type LotusProviderConfig struct {
Subsystems ProviderSubsystemsConfig
Fees LotusProviderFees
Addresses MinerAddressConfig
Addresses LotusProviderAddresses
Proving ProvingConfig
SealingParams SealingConfig // TODO defaults
SealerConfig // TODO defaults
@ -574,6 +574,9 @@ type LotusProviderAddresses struct {
// A control address that doesn't have enough funds will still be chosen
// over the worker address if this flag is set.
DisableWorkerFallback bool
// MinerAddresses are the addresses of the miner actors to use for sending messages
MinerAddresses []string
}
// API contains configs for API endpoint

View File

@ -211,6 +211,46 @@ func AddressSelector(addrConf *config.MinerAddressConfig) func() (*ctladdr.Addre
return as, nil
}
}
func LotusProvderAddressSelector(addrConf *config.LotusProviderAddresses) func() (*ctladdr.AddressSelector, error) {
return func() (*ctladdr.AddressSelector, error) {
as := &ctladdr.AddressSelector{}
if addrConf == nil {
return as, nil
}
as.DisableOwnerFallback = addrConf.DisableOwnerFallback
as.DisableWorkerFallback = addrConf.DisableWorkerFallback
for _, s := range addrConf.PreCommitControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing precommit control address: %w", err)
}
as.PreCommitControl = append(as.PreCommitControl, addr)
}
for _, s := range addrConf.CommitControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing commit control address: %w", err)
}
as.CommitControl = append(as.CommitControl, addr)
}
for _, s := range addrConf.TerminateControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing terminate control address: %w", err)
}
as.TerminateControl = append(as.TerminateControl, addr)
}
return as, nil
}
}
func PreflightChecks(mctx helpers.MetricsCtx, lc fx.Lifecycle, api v1api.FullNode, maddr dtypes.MinerAddress) error {
ctx := helpers.LifecycleCtx(mctx, lc)
@ -312,12 +352,11 @@ func WindowPostScheduler(fc config.MinerFeeConfig, pc config.ProvingConfig) func
verif = params.Verifier
j = params.Journal
as = params.AddrSel
maddr = address.Address(params.Maddr)
)
ctx := helpers.LifecycleCtx(mctx, lc)
fps, err := wdpost.NewWindowedPoStScheduler(api, fc, pc, as, sealer, verif, sealer, j, maddr, db, nil)
fps, err := wdpost.NewWindowedPoStScheduler(api, fc, pc, as, sealer, verif, sealer, j, []dtypes.MinerAddress{params.Maddr}, db, nil)
if err != nil {
return nil, err
@ -336,7 +375,7 @@ func WindowPostScheduler(fc config.MinerFeeConfig, pc config.ProvingConfig) func
func WindowPostSchedulerV2(ctx context.Context, fc config.LotusProviderFees, pc config.ProvingConfig,
api api.FullNode, sealer sealer.SectorManager, verif storiface.Verifier, j journal.Journal,
as *ctladdr.AddressSelector, maddr dtypes.MinerAddress, db *harmonydb.DB, max int) (*wdpost.WdPostTask, error) {
as *ctladdr.AddressSelector, maddr []dtypes.MinerAddress, db *harmonydb.DB, max int) (*wdpost.WdPostTask, error) {
fc2 := config.MinerFeeConfig{
MaxPreCommitGasFee: fc.MaxPreCommitGasFee,
@ -345,7 +384,7 @@ func WindowPostSchedulerV2(ctx context.Context, fc config.LotusProviderFees, pc
MaxPublishDealsFee: fc.MaxPublishDealsFee,
}
ts := wdpost.NewWdPostTask(db, nil, max)
fps, err := wdpost.NewWindowedPoStScheduler(api, fc2, pc, as, sealer, verif, sealer, j, address.Address(maddr), db, ts)
fps, err := wdpost.NewWindowedPoStScheduler(api, fc2, pc, as, sealer, verif, sealer, j, maddr, db, ts)
ts.Scheduler = fps
if err != nil {

View File

@ -0,0 +1,44 @@
package paths
import (
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type BasicLocalStorage struct {
PathToJSON string
}
var _ LocalStorage = &BasicLocalStorage{}
func (ls *BasicLocalStorage) GetStorage() (storiface.StorageConfig, error) {
var def storiface.StorageConfig
c, err := config.StorageFromFile(ls.PathToJSON, &def)
if err != nil {
return storiface.StorageConfig{}, err
}
return *c, nil
}
func (ls *BasicLocalStorage) SetStorage(f func(*storiface.StorageConfig)) error {
var def storiface.StorageConfig
c, err := config.StorageFromFile(ls.PathToJSON, &def)
if err != nil {
return err
}
f(c)
return config.WriteStorageFile(ls.PathToJSON, *c)
}
func (ls *BasicLocalStorage) Stat(path string) (fsutil.FsStat, error) {
return fsutil.Statfs(path)
}
func (ls *BasicLocalStorage) DiskUsage(path string) (int64, error) {
si, err := fsutil.FileSize(path)
if err != nil {
return 0, err
}
return si.OnDisk, nil
}

View File

@ -26,6 +26,7 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/storage/ctladdr"
"github.com/filecoin-project/lotus/storage/sealer"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
@ -92,6 +93,11 @@ type WindowPoStScheduler struct {
wdPostTask *WdPostTask
}
type ActorInfo struct {
address.Address
api.MinerInfo
}
// NewWindowedPoStScheduler creates a new WindowPoStScheduler scheduler.
func NewWindowedPoStScheduler(api NodeAPI,
cfg config.MinerFeeConfig,
@ -101,14 +107,20 @@ func NewWindowedPoStScheduler(api NodeAPI,
verif storiface.Verifier,
ft sealer.FaultTracker,
j journal.Journal,
actor address.Address,
actors []dtypes.MinerAddress,
db *harmonydb.DB,
task *WdPostTask) (*WindowPoStScheduler, error) {
mi, err := api.StateMinerInfo(context.TODO(), actor, types.EmptyTSK)
var actorInfos []ActorInfo
for _, actor := range actors {
mi, err := api.StateMinerInfo(context.TODO(), address.Address(actor), types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("getting sector size: %w", err)
}
actorInfos = append(actorInfos, ActorInfo{address.Address(actor), mi})
}
// TODO I punted here knowing that actorInfos will be consumed differently later.
return &WindowPoStScheduler{
api: api,
feeCfg: cfg,
@ -116,13 +128,12 @@ func NewWindowedPoStScheduler(api NodeAPI,
prover: sp,
verifier: verif,
faultTracker: ft,
proofType: mi.WindowPoStProofType,
partitionSectors: mi.WindowPoStPartitionSectors,
proofType: actorInfos[0].WindowPoStProofType,
partitionSectors: actorInfos[0].WindowPoStPartitionSectors,
disablePreChecks: pcfg.DisableWDPoStPreChecks,
maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage,
maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage,
singleRecoveringPartitionPerPostMessage: pcfg.SingleRecoveringPartitionPerPostMessage,
actor: actor,
evtTypes: [...]journal.EventType{
evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"),
evtTypeWdPoStProofs: j.RegisterEventType("wdpost", "proofs_processed"),