Merge pull request #6356 from filecoin-project/nonsense/split-market-miner-processes
Support standalone miner-market process
This commit is contained in:
commit
837322ea59
@ -55,6 +55,13 @@ type StorageMiner interface {
|
||||
// Get the status of a given sector by ID
|
||||
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read
|
||||
|
||||
// Add piece to an open sector. If no sectors with enough space are open,
|
||||
// either a new sector will be created, or this call will block until more
|
||||
// sectors can be created.
|
||||
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin
|
||||
|
||||
SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin
|
||||
|
||||
// List all staged sectors
|
||||
SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read
|
||||
|
||||
@ -135,8 +142,8 @@ type StorageMiner interface {
|
||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin
|
||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
|
||||
|
||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
|
||||
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
|
||||
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
|
||||
|
||||
@ -302,3 +309,25 @@ type PendingDealInfo struct {
|
||||
PublishPeriodStart time.Time
|
||||
PublishPeriod time.Duration
|
||||
}
|
||||
|
||||
type SectorOffset struct {
|
||||
Sector abi.SectorNumber
|
||||
Offset abi.PaddedPieceSize
|
||||
}
|
||||
|
||||
// DealInfo is a tuple of deal identity and its schedule
|
||||
type PieceDealInfo struct {
|
||||
PublishCid *cid.Cid
|
||||
DealID abi.DealID
|
||||
DealProposal *market.DealProposal
|
||||
DealSchedule DealSchedule
|
||||
KeepUnsealed bool
|
||||
}
|
||||
|
||||
// DealSchedule communicates the time interval of a storage deal. The deal must
|
||||
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
|
||||
// is invalid.
|
||||
type DealSchedule struct {
|
||||
StartEpoch abi.ChainEpoch
|
||||
EndEpoch abi.ChainEpoch
|
||||
}
|
||||
|
379
api/cbor_gen.go
379
api/cbor_gen.go
@ -8,6 +8,7 @@ import (
|
||||
"sort"
|
||||
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
market "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
@ -738,3 +739,381 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{165}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
if len("PublishCid") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("PublishCid")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.PublishCid == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
if len("DealID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealProposal (market.DealProposal) (struct)
|
||||
if len("DealProposal") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealProposal")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.DealProposal.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealSchedule (api.DealSchedule) (struct)
|
||||
if len("DealSchedule") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealSchedule")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.DealSchedule.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.KeepUnsealed (bool) (bool)
|
||||
if len("KeepUnsealed") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = PieceDealInfo{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
case "PublishCid":
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
|
||||
}
|
||||
|
||||
t.PublishCid = &c
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
case "DealID":
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.DealID = abi.DealID(extra)
|
||||
|
||||
}
|
||||
// t.DealProposal (market.DealProposal) (struct)
|
||||
case "DealProposal":
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.DealProposal = new(market.DealProposal)
|
||||
if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealSchedule (api.DealSchedule) (struct)
|
||||
case "DealSchedule":
|
||||
|
||||
{
|
||||
|
||||
if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.KeepUnsealed (bool) (bool)
|
||||
case "KeepUnsealed":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajOther {
|
||||
return fmt.Errorf("booleans must be major type 7")
|
||||
}
|
||||
switch extra {
|
||||
case 20:
|
||||
t.KeepUnsealed = false
|
||||
case 21:
|
||||
t.KeepUnsealed = true
|
||||
default:
|
||||
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
if len("StartEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.StartEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
if len("EndEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("EndEpoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.EndEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = DealSchedule{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
case "StartEpoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.StartEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
case "EndEpoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.EndEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -52,8 +52,30 @@ func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Heade
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
func getPushUrl(addr string) (string, error) {
|
||||
pushUrl, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
switch pushUrl.Scheme {
|
||||
case "ws":
|
||||
pushUrl.Scheme = "http"
|
||||
case "wss":
|
||||
pushUrl.Scheme = "https"
|
||||
}
|
||||
///rpc/v0 -> /rpc/streams/v0/push
|
||||
|
||||
pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push")
|
||||
return pushUrl.String(), nil
|
||||
}
|
||||
|
||||
// NewStorageMinerRPCV0 creates a new http jsonrpc client for miner
|
||||
func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||
pushUrl, err := getPushUrl(addr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var res v0api.StorageMinerStruct
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
[]interface{}{
|
||||
@ -61,26 +83,19 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H
|
||||
&res.Internal,
|
||||
},
|
||||
requestHeader,
|
||||
opts...,
|
||||
append([]jsonrpc.Option{
|
||||
rpcenc.ReaderParamEncoder(pushUrl),
|
||||
}, opts...)...,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Worker, jsonrpc.ClientCloser, error) {
|
||||
u, err := url.Parse(addr)
|
||||
func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) {
|
||||
pushUrl, err := getPushUrl(addr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
}
|
||||
///rpc/v0 -> /rpc/streams/v0/push
|
||||
|
||||
u.Path = path.Join(u.Path, "../streams/v0/push")
|
||||
|
||||
var res api.WorkerStruct
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
@ -88,7 +103,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header)
|
||||
&res.Internal,
|
||||
},
|
||||
requestHeader,
|
||||
rpcenc.ReaderParamEncoder(u.String()),
|
||||
rpcenc.ReaderParamEncoder(pushUrl),
|
||||
jsonrpc.WithNoReconnect(),
|
||||
jsonrpc.WithTimeout(30*time.Second),
|
||||
)
|
||||
|
@ -665,6 +665,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
|
||||
|
||||
SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
|
||||
|
||||
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
|
||||
|
||||
SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
|
||||
@ -703,6 +705,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
SectorsSummary func(p0 context.Context) (map[SectorState]int, error) `perm:"read"`
|
||||
|
||||
SectorsUnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error `perm:"admin"`
|
||||
|
||||
SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"`
|
||||
|
||||
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
@ -3153,6 +3157,14 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf
|
||||
return nil, xerrors.New("method not supported")
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
|
||||
return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
|
||||
return *new(SectorOffset), xerrors.New("method not supported")
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) {
|
||||
return s.Internal.SectorCommitFlush(p0)
|
||||
}
|
||||
@ -3305,6 +3317,14 @@ func (s *StorageMinerStub) SectorsSummary(p0 context.Context) (map[SectorState]i
|
||||
return *new(map[SectorState]int), xerrors.New("method not supported")
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error {
|
||||
return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error {
|
||||
return xerrors.New("method not supported")
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error {
|
||||
return s.Internal.SectorsUpdate(p0, p1, p2)
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ var (
|
||||
FullAPIVersion0 = newVer(1, 3, 0)
|
||||
FullAPIVersion1 = newVer(2, 1, 0)
|
||||
|
||||
MinerAPIVersion0 = newVer(1, 1, 0)
|
||||
MinerAPIVersion0 = newVer(1, 2, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 1, 0)
|
||||
)
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -279,7 +279,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) {
|
||||
|
||||
stop, err := node.New(tu.ctx,
|
||||
node.FullAPI(&out),
|
||||
node.Online(),
|
||||
node.Base(),
|
||||
node.Repo(sourceRepo),
|
||||
node.MockHost(tu.mn),
|
||||
node.Test(),
|
||||
@ -310,10 +310,11 @@ func (tu *syncTestUtil) addClientNode() int {
|
||||
|
||||
var out api.FullNode
|
||||
|
||||
r := repo.NewMemory(nil)
|
||||
stop, err := node.New(tu.ctx,
|
||||
node.FullAPI(&out),
|
||||
node.Online(),
|
||||
node.Repo(repo.NewMemory(nil)),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
node.MockHost(tu.mn),
|
||||
node.Test(),
|
||||
|
||||
|
@ -49,7 +49,7 @@ func TestMinerAllInfo(t *testing.T) {
|
||||
|
||||
t.Run("pre-info-all", run)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
|
||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
||||
kit.AssertFilesEqual(t, inPath, outPath)
|
||||
|
@ -121,7 +121,8 @@ var initCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
initRestoreCmd,
|
||||
restoreCmd,
|
||||
serviceCmd,
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
log.Info("Initializing lotus miner")
|
||||
@ -317,10 +318,10 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string
|
||||
Size: abi.PaddedPieceSize(meta.SectorSize),
|
||||
PieceCID: commD,
|
||||
},
|
||||
DealInfo: &sealing.DealInfo{
|
||||
DealInfo: &lapi.PieceDealInfo{
|
||||
DealID: dealID,
|
||||
DealProposal: §or.Deal,
|
||||
DealSchedule: sealing.DealSchedule{
|
||||
DealSchedule: lapi.DealSchedule{
|
||||
StartEpoch: sector.Deal.StartEpoch,
|
||||
EndEpoch: sector.Deal.EndEpoch,
|
||||
},
|
||||
@ -470,7 +471,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
||||
AllowCommit: true,
|
||||
AllowUnseal: true,
|
||||
}, wsts, smsts)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -734,6 +734,8 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID,
|
||||
return retval.IDAddress, nil
|
||||
}
|
||||
|
||||
// checkV1ApiSupport uses v0 api version to signal support for v1 API
|
||||
// trying to query the v1 api on older lotus versions would get a 404, which can happen for any number of other reasons
|
||||
func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error {
|
||||
// check v0 api version to make sure it supports v1 api
|
||||
api0, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
@ -30,7 +31,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
var initRestoreCmd = &cli.Command{
|
||||
var restoreCmd = &cli.Command{
|
||||
Name: "restore",
|
||||
Usage: "Initialize a lotus miner repo from a backup",
|
||||
Flags: []cli.Flag{
|
||||
@ -49,129 +50,11 @@ var initRestoreCmd = &cli.Command{
|
||||
},
|
||||
ArgsUsage: "[backupFile]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
log.Info("Initializing lotus miner using a backup")
|
||||
if cctx.Args().Len() != 1 {
|
||||
return xerrors.Errorf("expected 1 argument")
|
||||
}
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
log.Info("Initializing lotus miner using a backup")
|
||||
|
||||
log.Info("Trying to connect to full node RPC")
|
||||
|
||||
if err := checkV1ApiSupport(ctx, cctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
log.Info("Checking full node version")
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) {
|
||||
return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion)
|
||||
}
|
||||
|
||||
if !cctx.Bool("nosync") {
|
||||
if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil {
|
||||
return xerrors.Errorf("sync wait: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
bf, err := homedir.Expand(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expand backup file path: %w", err)
|
||||
}
|
||||
|
||||
st, err := os.Stat(bf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat backup file (%s): %w", bf, err)
|
||||
}
|
||||
|
||||
f, err := os.Open(bf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening backup file: %w", err)
|
||||
}
|
||||
defer f.Close() // nolint:errcheck
|
||||
|
||||
log.Info("Checking if repo exists")
|
||||
|
||||
repoPath := cctx.String(FlagMinerRepo)
|
||||
r, err := repo.NewFS(repoPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo))
|
||||
}
|
||||
|
||||
log.Info("Initializing repo")
|
||||
|
||||
if err := r.Init(repo.StorageMiner); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lr, err := r.Lock(repo.StorageMiner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
if cctx.IsSet("config") {
|
||||
log.Info("Restoring config")
|
||||
|
||||
cf, err := homedir.Expand(cctx.String("config"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding config path: %w", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(cf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat config file (%s): %w", cf, err)
|
||||
}
|
||||
|
||||
var cerr error
|
||||
err = lr.SetConfig(func(raw interface{}) {
|
||||
rcfg, ok := raw.(*config.StorageMiner)
|
||||
if !ok {
|
||||
cerr = xerrors.New("expected miner config")
|
||||
return
|
||||
}
|
||||
|
||||
ff, err := config.FromFile(cf, rcfg)
|
||||
if err != nil {
|
||||
cerr = xerrors.Errorf("loading config: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
*rcfg = *ff.(*config.StorageMiner)
|
||||
})
|
||||
if cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting config: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Warn("--config NOT SET, WILL USE DEFAULT VALUES")
|
||||
}
|
||||
|
||||
var storageCfg *stores.StorageConfig
|
||||
if cctx.IsSet("storage-config") {
|
||||
log.Info("Restoring storage path config")
|
||||
|
||||
cf, err := homedir.Expand(cctx.String("storage-config"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding storage config path: %w", err)
|
||||
@ -182,101 +65,233 @@ var initRestoreCmd = &cli.Command{
|
||||
return xerrors.Errorf("reading storage config: %w", err)
|
||||
}
|
||||
|
||||
var cerr error
|
||||
err = lr.SetStorage(func(scfg *stores.StorageConfig) {
|
||||
cerr = json.Unmarshal(cfb, scfg)
|
||||
})
|
||||
if cerr != nil {
|
||||
return xerrors.Errorf("unmarshalling storage config: %w", cerr)
|
||||
}
|
||||
storageCfg = &stores.StorageConfig{}
|
||||
err = json.Unmarshal(cfb, storageCfg)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting storage config: %w", err)
|
||||
return xerrors.Errorf("cannot unmarshal json for storage config: %w", err)
|
||||
}
|
||||
} else {
|
||||
log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
|
||||
}
|
||||
|
||||
log.Info("Restoring metadata backup")
|
||||
if err := restore(ctx, cctx, storageCfg, nil, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error {
|
||||
log.Info("Checking proof parameters")
|
||||
|
||||
mds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil {
|
||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||
}
|
||||
|
||||
bar := pb.New64(st.Size())
|
||||
br := bar.NewProxyReader(f)
|
||||
bar.ShowTimeLeft = true
|
||||
bar.ShowPercent = true
|
||||
bar.ShowSpeed = true
|
||||
bar.Units = pb.U_BYTES
|
||||
log.Info("Configuring miner actor")
|
||||
|
||||
bar.Start()
|
||||
err = backupds.RestoreInto(br, mds)
|
||||
bar.Finish()
|
||||
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("restoring metadata: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Checking actor metadata")
|
||||
|
||||
abytes, err := mds.Get(datastore.NewKey("miner-address"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting actor address from metadata datastore: %w", err)
|
||||
}
|
||||
|
||||
maddr, err := address.NewFromBytes(abytes)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing actor address: %w", err)
|
||||
}
|
||||
|
||||
log.Info("ACTOR ADDRESS: ", maddr.String())
|
||||
|
||||
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize)))
|
||||
|
||||
wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolving worker key: %w", err)
|
||||
}
|
||||
|
||||
has, err := api.WalletHas(ctx, wk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking worker address: %w", err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr)
|
||||
}
|
||||
|
||||
log.Info("Checking proof parameters")
|
||||
|
||||
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil {
|
||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Initializing libp2p identity")
|
||||
|
||||
p2pSk, err := makeHostKey(lr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("make host key: %w", err)
|
||||
}
|
||||
|
||||
peerid, err := peer.IDFromPrivateKey(p2pSk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("peer ID from private key: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Configuring miner actor")
|
||||
|
||||
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func restore(ctx context.Context, cctx *cli.Context, strConfig *stores.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi miner.MinerInfo) error) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return xerrors.Errorf("expected 1 argument")
|
||||
}
|
||||
|
||||
log.Info("Trying to connect to full node RPC")
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
log.Info("Checking full node version")
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) {
|
||||
return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion)
|
||||
}
|
||||
|
||||
if !cctx.Bool("nosync") {
|
||||
if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil {
|
||||
return xerrors.Errorf("sync wait: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
bf, err := homedir.Expand(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expand backup file path: %w", err)
|
||||
}
|
||||
|
||||
st, err := os.Stat(bf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat backup file (%s): %w", bf, err)
|
||||
}
|
||||
|
||||
f, err := os.Open(bf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening backup file: %w", err)
|
||||
}
|
||||
defer f.Close() // nolint:errcheck
|
||||
|
||||
log.Info("Checking if repo exists")
|
||||
|
||||
repoPath := cctx.String(FlagMinerRepo)
|
||||
r, err := repo.NewFS(repoPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo))
|
||||
}
|
||||
|
||||
log.Info("Initializing repo")
|
||||
|
||||
if err := r.Init(repo.StorageMiner); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lr, err := r.Lock(repo.StorageMiner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
if cctx.IsSet("config") {
|
||||
log.Info("Restoring config")
|
||||
|
||||
cf, err := homedir.Expand(cctx.String("config"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding config path: %w", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(cf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat config file (%s): %w", cf, err)
|
||||
}
|
||||
|
||||
var cerr error
|
||||
err = lr.SetConfig(func(raw interface{}) {
|
||||
rcfg, ok := raw.(*config.StorageMiner)
|
||||
if !ok {
|
||||
cerr = xerrors.New("expected miner config")
|
||||
return
|
||||
}
|
||||
|
||||
ff, err := config.FromFile(cf, rcfg)
|
||||
if err != nil {
|
||||
cerr = xerrors.Errorf("loading config: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
*rcfg = *ff.(*config.StorageMiner)
|
||||
if manageConfig != nil {
|
||||
cerr = manageConfig(rcfg)
|
||||
}
|
||||
})
|
||||
if cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting config: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Warn("--config NOT SET, WILL USE DEFAULT VALUES")
|
||||
}
|
||||
|
||||
if strConfig != nil {
|
||||
log.Info("Restoring storage path config")
|
||||
|
||||
err = lr.SetStorage(func(scfg *stores.StorageConfig) {
|
||||
*scfg = *strConfig
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting storage config: %w", err)
|
||||
}
|
||||
} else {
|
||||
log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
|
||||
}
|
||||
|
||||
log.Info("Restoring metadata backup")
|
||||
|
||||
mds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bar := pb.New64(st.Size())
|
||||
br := bar.NewProxyReader(f)
|
||||
bar.ShowTimeLeft = true
|
||||
bar.ShowPercent = true
|
||||
bar.ShowSpeed = true
|
||||
bar.Units = pb.U_BYTES
|
||||
|
||||
bar.Start()
|
||||
err = backupds.RestoreInto(br, mds)
|
||||
bar.Finish()
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("restoring metadata: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Checking actor metadata")
|
||||
|
||||
abytes, err := mds.Get(datastore.NewKey("miner-address"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting actor address from metadata datastore: %w", err)
|
||||
}
|
||||
|
||||
maddr, err := address.NewFromBytes(abytes)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing actor address: %w", err)
|
||||
}
|
||||
|
||||
log.Info("ACTOR ADDRESS: ", maddr.String())
|
||||
|
||||
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize)))
|
||||
|
||||
wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolving worker key: %w", err)
|
||||
}
|
||||
|
||||
has, err := api.WalletHas(ctx, wk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking worker address: %w", err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr)
|
||||
}
|
||||
|
||||
log.Info("Initializing libp2p identity")
|
||||
|
||||
p2pSk, err := makeHostKey(lr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("make host key: %w", err)
|
||||
}
|
||||
|
||||
peerid, err := peer.IDFromPrivateKey(p2pSk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("peer ID from private key: %w", err)
|
||||
}
|
||||
|
||||
return after(api, maddr, peerid, mi)
|
||||
}
|
||||
|
152
cmd/lotus-storage-miner/init_service.go
Normal file
152
cmd/lotus-storage-miner/init_service.go
Normal file
@ -0,0 +1,152 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const (
|
||||
MarketsService = "markets"
|
||||
)
|
||||
|
||||
var serviceCmd = &cli.Command{
|
||||
Name: "service",
|
||||
Usage: "Initialize a lotus miner sub-service",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "config file (config.toml)",
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nosync",
|
||||
Usage: "don't check full-node sync status",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "type",
|
||||
Usage: "type of service to be enabled",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "api-sealer",
|
||||
Usage: "sealer API info (lotus-miner auth api-info --perm=admin)",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "api-sector-index",
|
||||
Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[backupFile]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
log.Info("Initializing lotus miner service")
|
||||
|
||||
es := EnabledServices(cctx.StringSlice("type"))
|
||||
|
||||
if len(es) == 0 {
|
||||
return xerrors.Errorf("at least one module must be enabled")
|
||||
}
|
||||
|
||||
// we should remove this as soon as we have more service types and not just `markets`
|
||||
if !es.Contains(MarketsService) {
|
||||
return xerrors.Errorf("markets module must be enabled")
|
||||
}
|
||||
|
||||
if !cctx.IsSet("api-sealer") {
|
||||
return xerrors.Errorf("--api-sealer is required without the sealer module enabled")
|
||||
}
|
||||
if !cctx.IsSet("api-sector-index") {
|
||||
return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled")
|
||||
}
|
||||
|
||||
if err := restore(ctx, cctx, &stores.StorageConfig{}, func(cfg *config.StorageMiner) error {
|
||||
cfg.Subsystems.EnableMarkets = es.Contains(MarketsService)
|
||||
cfg.Subsystems.EnableMining = false
|
||||
cfg.Subsystems.EnableSealing = false
|
||||
cfg.Subsystems.EnableSectorStorage = false
|
||||
|
||||
if !cfg.Subsystems.EnableSealing {
|
||||
ai, err := checkApiInfo(ctx, cctx.String("api-sealer"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking sealer API: %w", err)
|
||||
}
|
||||
cfg.Subsystems.SealerApiInfo = ai
|
||||
}
|
||||
|
||||
if !cfg.Subsystems.EnableSectorStorage {
|
||||
ai, err := checkApiInfo(ctx, cctx.String("api-sector-index"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking sector index API: %w", err)
|
||||
}
|
||||
cfg.Subsystems.SectorIndexApiInfo = ai
|
||||
}
|
||||
|
||||
return nil
|
||||
}, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error {
|
||||
if es.Contains(MarketsService) {
|
||||
log.Info("Configuring miner actor")
|
||||
|
||||
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type EnabledServices []string
|
||||
|
||||
func (es EnabledServices) Contains(name string) bool {
|
||||
for _, s := range es {
|
||||
if s == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkApiInfo(ctx context.Context, ai string) (string, error) {
|
||||
ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=")
|
||||
info := cliutil.ParseApiInfo(ai)
|
||||
addr, err := info.DialArgs("v0")
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Checking api version of %s", addr)
|
||||
|
||||
api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("checking version: %w", err)
|
||||
}
|
||||
|
||||
if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) {
|
||||
return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion)
|
||||
}
|
||||
|
||||
return ai, nil
|
||||
}
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/ulimit"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
@ -118,13 +119,33 @@ var runCmd = &cli.Command{
|
||||
return xerrors.Errorf("repo at '%s' is not initialized, run 'lotus-miner init' to set it up", minerRepoPath)
|
||||
}
|
||||
|
||||
lr, err := r.Lock(repo.StorageMiner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := lr.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, ok := c.(*config.StorageMiner)
|
||||
if !ok {
|
||||
return xerrors.Errorf("invalid config for repo, got: %T", c)
|
||||
}
|
||||
|
||||
bootstrapLibP2P := cfg.Subsystems.EnableMarkets
|
||||
|
||||
err = lr.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
shutdownChan := make(chan struct{})
|
||||
|
||||
var minerapi api.StorageMiner
|
||||
stop, err := node.New(ctx,
|
||||
node.StorageMiner(&minerapi),
|
||||
node.StorageMiner(&minerapi, cfg.Subsystems),
|
||||
node.Override(new(dtypes.ShutdownChan), shutdownChan),
|
||||
node.Online(),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
|
||||
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") },
|
||||
@ -142,14 +163,18 @@ var runCmd = &cli.Command{
|
||||
return xerrors.Errorf("getting API endpoint: %w", err)
|
||||
}
|
||||
|
||||
// Bootstrap with full node
|
||||
remoteAddrs, err := nodeApi.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting full node libp2p address: %w", err)
|
||||
}
|
||||
if bootstrapLibP2P {
|
||||
log.Infof("Bootstrapping libp2p network with full node")
|
||||
|
||||
if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil {
|
||||
return xerrors.Errorf("connecting to full node (libp2p): %w", err)
|
||||
// Bootstrap with full node
|
||||
remoteAddrs, err := nodeApi.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting full node libp2p address: %w", err)
|
||||
}
|
||||
|
||||
if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil {
|
||||
return xerrors.Errorf("connecting to full node (libp2p): %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Remote version %s", v)
|
||||
|
@ -314,7 +314,7 @@ var DaemonCmd = &cli.Command{
|
||||
stop, err := node.New(ctx,
|
||||
node.FullAPI(&api, node.Lite(isLite)),
|
||||
|
||||
node.Online(),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
|
||||
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
|
||||
|
@ -98,6 +98,7 @@
|
||||
* [SealingAbort](#SealingAbort)
|
||||
* [SealingSchedDiag](#SealingSchedDiag)
|
||||
* [Sector](#Sector)
|
||||
* [SectorAddPieceToAny](#SectorAddPieceToAny)
|
||||
* [SectorCommitFlush](#SectorCommitFlush)
|
||||
* [SectorCommitPending](#SectorCommitPending)
|
||||
* [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration)
|
||||
@ -118,6 +119,7 @@
|
||||
* [SectorsRefs](#SectorsRefs)
|
||||
* [SectorsStatus](#SectorsStatus)
|
||||
* [SectorsSummary](#SectorsSummary)
|
||||
* [SectorsUnsealPiece](#SectorsUnsealPiece)
|
||||
* [SectorsUpdate](#SectorsUpdate)
|
||||
* [Storage](#Storage)
|
||||
* [StorageAddLocal](#StorageAddLocal)
|
||||
@ -1561,6 +1563,54 @@ Response: `{}`
|
||||
## Sector
|
||||
|
||||
|
||||
### SectorAddPieceToAny
|
||||
Add piece to an open sector. If no sectors with enough space are open,
|
||||
either a new sector will be created, or this call will block until more
|
||||
sectors can be created.
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
1024,
|
||||
{},
|
||||
{
|
||||
"PublishCid": null,
|
||||
"DealID": 5432,
|
||||
"DealProposal": {
|
||||
"PieceCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
"PieceSize": 1032,
|
||||
"VerifiedDeal": true,
|
||||
"Client": "f01234",
|
||||
"Provider": "f01234",
|
||||
"Label": "string value",
|
||||
"StartEpoch": 10101,
|
||||
"EndEpoch": 10101,
|
||||
"StoragePricePerEpoch": "0",
|
||||
"ProviderCollateral": "0",
|
||||
"ClientCollateral": "0"
|
||||
},
|
||||
"DealSchedule": {
|
||||
"StartEpoch": 10101,
|
||||
"EndEpoch": 10101
|
||||
},
|
||||
"KeepUnsealed": true
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"Sector": 9,
|
||||
"Offset": 1032
|
||||
}
|
||||
```
|
||||
|
||||
### SectorCommitFlush
|
||||
SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
|
||||
Returns null if message wasn't sent
|
||||
@ -1861,6 +1911,30 @@ Response:
|
||||
}
|
||||
```
|
||||
|
||||
### SectorsUnsealPiece
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1040384,
|
||||
1024,
|
||||
null,
|
||||
null
|
||||
]
|
||||
```
|
||||
|
||||
Response: `{}`
|
||||
|
||||
### SectorsUpdate
|
||||
|
||||
|
||||
|
@ -57,6 +57,7 @@ USAGE:
|
||||
|
||||
COMMANDS:
|
||||
restore Initialize a lotus miner repo from a backup
|
||||
service Initialize a lotus miner sub-service
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
OPTIONS:
|
||||
@ -93,6 +94,24 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner init service
|
||||
```
|
||||
NAME:
|
||||
lotus-miner init service - Initialize a lotus miner sub-service
|
||||
|
||||
USAGE:
|
||||
lotus-miner init service [command options] [backupFile]
|
||||
|
||||
OPTIONS:
|
||||
--config value config file (config.toml)
|
||||
--nosync don't check full-node sync status (default: false)
|
||||
--type value type of service to be enabled
|
||||
--api-sealer value sealer API info (lotus-miner auth api-info --perm=admin)
|
||||
--api-sector-index value sector Index API info (lotus-miner auth api-info --perm=admin)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
## lotus-miner run
|
||||
```
|
||||
NAME:
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -24,6 +23,7 @@ import (
|
||||
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
|
||||
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
|
@ -1,13 +1,13 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
|
2
extern/sector-storage/manager.go
vendored
2
extern/sector-storage/manager.go
vendored
@ -29,8 +29,6 @@ var log = logging.Logger("advmgr")
|
||||
|
||||
var ErrNoWorkers = errors.New("no suitable workers found")
|
||||
|
||||
type URLs []string
|
||||
|
||||
type Worker interface {
|
||||
storiface.WorkerCalls
|
||||
|
||||
|
8
extern/sector-storage/mock/mock.go
vendored
8
extern/sector-storage/mock/mock.go
vendored
@ -75,6 +75,10 @@ func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error {
|
||||
panic("SectorMgr: unsealing piece: implement me")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
log.Warn("Add piece: ", sectorID, size, sectorID.ProofType)
|
||||
|
||||
@ -496,10 +500,6 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID,
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
|
||||
plen, err := svi.SealProof.ProofSize()
|
||||
if err != nil {
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/detailyang/go-fallocate"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
||||
@ -15,6 +14,8 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("partialfile")
|
||||
|
7
extern/sector-storage/stores/http_handler.go
vendored
7
extern/sector-storage/stores/http_handler.go
vendored
@ -7,12 +7,12 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||
"github.com/gorilla/mux"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
|
||||
|
||||
@ -53,11 +53,10 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
mux := mux.NewRouter()
|
||||
|
||||
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
|
||||
|
||||
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
|
||||
|
||||
mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
|
2
extern/sector-storage/stores/index.go
vendored
2
extern/sector-storage/stores/index.go
vendored
@ -66,6 +66,8 @@ type SectorIndex interface { // part of storage-miner api
|
||||
// atomically acquire locks on all sector file types. close ctx to unlock
|
||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error
|
||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error)
|
||||
|
||||
StorageList(ctx context.Context) (map[ID][]Decl, error)
|
||||
}
|
||||
|
||||
type Decl struct {
|
||||
|
2
extern/sector-storage/stores/local.go
vendored
2
extern/sector-storage/stores/local.go
vendored
@ -158,6 +158,8 @@ func (p *path) sectorPath(sid abi.SectorID, fileType storiface.SectorFileType) s
|
||||
return filepath.Join(p.local, fileType.String(), storiface.SectorName(sid))
|
||||
}
|
||||
|
||||
type URLs []string
|
||||
|
||||
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
|
||||
l := &Local{
|
||||
localStorage: ls,
|
||||
|
17
extern/sector-storage/stores/mocks/index.go
vendored
17
extern/sector-storage/stores/mocks/index.go
vendored
@ -1,7 +1,7 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: index.go
|
||||
|
||||
// Package mock_stores is a generated GoMock package.
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
import (
|
||||
@ -125,6 +125,21 @@ func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomo
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageInfo", reflect.TypeOf((*MockSectorIndex)(nil).StorageInfo), arg0, arg1)
|
||||
}
|
||||
|
||||
// StorageList mocks base method.
|
||||
func (m *MockSectorIndex) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StorageList", ctx)
|
||||
ret0, _ := ret[0].(map[stores.ID][]stores.Decl)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StorageList indicates an expected call of StorageList.
|
||||
func (mr *MockSectorIndexMockRecorder) StorageList(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageList", reflect.TypeOf((*MockSectorIndex)(nil).StorageList), ctx)
|
||||
}
|
||||
|
||||
// StorageLock mocks base method.
|
||||
func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
2
extern/sector-storage/stores/mocks/stores.go
vendored
2
extern/sector-storage/stores/mocks/stores.go
vendored
@ -1,7 +1,7 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: interface.go
|
||||
|
||||
// Package mock_stores is a generated GoMock package.
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
import (
|
||||
|
51
extern/sector-storage/stores/remote.go
vendored
51
extern/sector-storage/stores/remote.go
vendored
@ -297,6 +297,32 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) {
|
||||
url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded())
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth.Clone()
|
||||
fmt.Printf("req using header: %#v \n", r.auth)
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close() // nolint
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
return true, nil
|
||||
case http.StatusRequestedRangeNotSatisfiable:
|
||||
return false, nil
|
||||
default:
|
||||
return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error {
|
||||
// Make sure we have the data local
|
||||
_, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
@ -419,31 +445,6 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) {
|
||||
url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded())
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth.Clone()
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close() // nolint
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
return true, nil
|
||||
case http.StatusRequestedRangeNotSatisfiable:
|
||||
return false, nil
|
||||
default:
|
||||
return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) {
|
||||
if len(r.limit) >= cap(r.limit) {
|
||||
log.Infof("Throttling remote read, %d already running", len(r.limit))
|
||||
|
386
extern/storage-sealing/cbor_gen.go
vendored
386
extern/storage-sealing/cbor_gen.go
vendored
@ -8,7 +8,7 @@ import (
|
||||
"sort"
|
||||
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
market "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
api "github.com/filecoin-project/lotus/api"
|
||||
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
@ -46,7 +46,7 @@ func (t *Piece) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealInfo (sealing.DealInfo) (struct)
|
||||
// t.DealInfo (api.PieceDealInfo) (struct)
|
||||
if len("DealInfo") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealInfo\" was too long")
|
||||
}
|
||||
@ -107,7 +107,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealInfo (sealing.DealInfo) (struct)
|
||||
// t.DealInfo (api.PieceDealInfo) (struct)
|
||||
case "DealInfo":
|
||||
|
||||
{
|
||||
@ -120,7 +120,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.DealInfo = new(DealInfo)
|
||||
t.DealInfo = new(api.PieceDealInfo)
|
||||
if err := t.DealInfo.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err)
|
||||
}
|
||||
@ -136,384 +136,6 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *DealInfo) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{165}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
if len("PublishCid") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("PublishCid")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.PublishCid == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
if len("DealID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealProposal (market.DealProposal) (struct)
|
||||
if len("DealProposal") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealProposal")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.DealProposal.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealSchedule (sealing.DealSchedule) (struct)
|
||||
if len("DealSchedule") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealSchedule")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.DealSchedule.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.KeepUnsealed (bool) (bool)
|
||||
if len("KeepUnsealed") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = DealInfo{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("DealInfo: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
case "PublishCid":
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
|
||||
}
|
||||
|
||||
t.PublishCid = &c
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
case "DealID":
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.DealID = abi.DealID(extra)
|
||||
|
||||
}
|
||||
// t.DealProposal (market.DealProposal) (struct)
|
||||
case "DealProposal":
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.DealProposal = new(market.DealProposal)
|
||||
if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealSchedule (sealing.DealSchedule) (struct)
|
||||
case "DealSchedule":
|
||||
|
||||
{
|
||||
|
||||
if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.KeepUnsealed (bool) (bool)
|
||||
case "KeepUnsealed":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajOther {
|
||||
return fmt.Errorf("booleans must be major type 7")
|
||||
}
|
||||
switch extra {
|
||||
case 20:
|
||||
t.KeepUnsealed = false
|
||||
case 21:
|
||||
t.KeepUnsealed = true
|
||||
default:
|
||||
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
if len("StartEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.StartEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
if len("EndEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("EndEpoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.EndEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = DealSchedule{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
case "StartEpoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.StartEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
case "EndEpoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.EndEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
|
2
extern/storage-sealing/gen/main.go
vendored
2
extern/storage-sealing/gen/main.go
vendored
@ -12,8 +12,6 @@ import (
|
||||
func main() {
|
||||
err := gen.WriteMapEncodersToFile("./cbor_gen.go", "sealing",
|
||||
sealing.Piece{},
|
||||
sealing.DealInfo{},
|
||||
sealing.DealSchedule{},
|
||||
sealing.SectorInfo{},
|
||||
sealing.Log{},
|
||||
)
|
||||
|
19
extern/storage-sealing/input.go
vendored
19
extern/storage-sealing/input.go
vendored
@ -14,6 +14,7 @@ import (
|
||||
"github.com/filecoin-project/go-statemachine"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
@ -236,34 +237,34 @@ func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorIn
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo) (api.SectorOffset, error) {
|
||||
log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid)
|
||||
if (padreader.PaddedSize(uint64(size))) != size {
|
||||
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
|
||||
return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece")
|
||||
}
|
||||
|
||||
sp, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
|
||||
ssize, err := sp.SectorSize()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
return api.SectorOffset{}, err
|
||||
}
|
||||
|
||||
if size > abi.PaddedPieceSize(ssize).Unpadded() {
|
||||
return 0, 0, xerrors.Errorf("piece cannot fit into a sector")
|
||||
return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector")
|
||||
}
|
||||
|
||||
if _, err := deal.DealProposal.Cid(); err != nil {
|
||||
return 0, 0, xerrors.Errorf("getting proposal CID: %w", err)
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err)
|
||||
}
|
||||
|
||||
m.inputLk.Lock()
|
||||
if _, exist := m.pendingPieces[proposalCID(deal)]; exist {
|
||||
m.inputLk.Unlock()
|
||||
return 0, 0, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal))
|
||||
return api.SectorOffset{}, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal))
|
||||
}
|
||||
|
||||
resCh := make(chan struct {
|
||||
@ -295,7 +296,7 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec
|
||||
|
||||
res := <-resCh
|
||||
|
||||
return res.sn, res.offset.Padded(), res.err
|
||||
return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err
|
||||
}
|
||||
|
||||
// called with m.inputLk
|
||||
@ -454,7 +455,7 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
|
||||
return m.sectors.Send(uint64(sid), SectorStartPacking{})
|
||||
}
|
||||
|
||||
func proposalCID(deal DealInfo) cid.Cid {
|
||||
func proposalCID(deal api.PieceDealInfo) cid.Cid {
|
||||
pc, err := deal.DealProposal.Cid()
|
||||
if err != nil {
|
||||
log.Errorf("DealProposal.Cid error: %+v", err)
|
||||
|
17
extern/storage-sealing/precommit_policy_test.go
vendored
17
extern/storage-sealing/precommit_policy_test.go
vendored
@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
api "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -58,9 +59,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) {
|
||||
Size: abi.PaddedPieceSize(1024),
|
||||
PieceCID: fakePieceCid(t),
|
||||
},
|
||||
DealInfo: &sealing.DealInfo{
|
||||
DealInfo: &api.PieceDealInfo{
|
||||
DealID: abi.DealID(42),
|
||||
DealSchedule: sealing.DealSchedule{
|
||||
DealSchedule: api.DealSchedule{
|
||||
StartEpoch: abi.ChainEpoch(70),
|
||||
EndEpoch: abi.ChainEpoch(75),
|
||||
},
|
||||
@ -71,9 +72,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) {
|
||||
Size: abi.PaddedPieceSize(1024),
|
||||
PieceCID: fakePieceCid(t),
|
||||
},
|
||||
DealInfo: &sealing.DealInfo{
|
||||
DealInfo: &api.PieceDealInfo{
|
||||
DealID: abi.DealID(43),
|
||||
DealSchedule: sealing.DealSchedule{
|
||||
DealSchedule: api.DealSchedule{
|
||||
StartEpoch: abi.ChainEpoch(80),
|
||||
EndEpoch: abi.ChainEpoch(100),
|
||||
},
|
||||
@ -98,9 +99,9 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) {
|
||||
Size: abi.PaddedPieceSize(1024),
|
||||
PieceCID: fakePieceCid(t),
|
||||
},
|
||||
DealInfo: &sealing.DealInfo{
|
||||
DealInfo: &api.PieceDealInfo{
|
||||
DealID: abi.DealID(44),
|
||||
DealSchedule: sealing.DealSchedule{
|
||||
DealSchedule: api.DealSchedule{
|
||||
StartEpoch: abi.ChainEpoch(1),
|
||||
EndEpoch: abi.ChainEpoch(10),
|
||||
},
|
||||
@ -125,9 +126,9 @@ func TestMissingDealIsIgnored(t *testing.T) {
|
||||
Size: abi.PaddedPieceSize(1024),
|
||||
PieceCID: fakePieceCid(t),
|
||||
},
|
||||
DealInfo: &sealing.DealInfo{
|
||||
DealInfo: &api.PieceDealInfo{
|
||||
DealID: abi.DealID(44),
|
||||
DealSchedule: sealing.DealSchedule{
|
||||
DealSchedule: api.DealSchedule{
|
||||
StartEpoch: abi.ChainEpoch(1),
|
||||
EndEpoch: abi.ChainEpoch(10),
|
||||
},
|
||||
|
2
extern/storage-sealing/sealing.go
vendored
2
extern/storage-sealing/sealing.go
vendored
@ -124,7 +124,7 @@ type openSector struct {
|
||||
|
||||
type pendingPiece struct {
|
||||
size abi.UnpaddedPieceSize
|
||||
deal DealInfo
|
||||
deal api.PieceDealInfo
|
||||
|
||||
data storage.Data
|
||||
|
||||
|
19
extern/storage-sealing/states_proving.go
vendored
19
extern/storage-sealing/states_proving.go
vendored
@ -126,3 +126,22 @@ func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) er
|
||||
|
||||
return ctx.Send(SectorRemoved{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// TODO: track sector health / expiration
|
||||
log.Infof("Proving sector %d", sector.SectorNumber)
|
||||
|
||||
cfg, err := m.getConfig()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting sealing config: %w", err)
|
||||
}
|
||||
|
||||
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// TODO: Watch termination
|
||||
// TODO: Auto-extend if set
|
||||
|
||||
return nil
|
||||
}
|
||||
|
19
extern/storage-sealing/states_sealing.go
vendored
19
extern/storage-sealing/states_sealing.go
vendored
@ -739,22 +739,3 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn
|
||||
|
||||
return ctx.Send(SectorFinalized{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// TODO: track sector health / expiration
|
||||
log.Infof("Proving sector %d", sector.SectorNumber)
|
||||
|
||||
cfg, err := m.getConfig()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting sealing config: %w", err)
|
||||
}
|
||||
|
||||
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// TODO: Watch termination
|
||||
// TODO: Auto-extend if set
|
||||
|
||||
return nil
|
||||
}
|
||||
|
23
extern/storage-sealing/types.go
vendored
23
extern/storage-sealing/types.go
vendored
@ -11,39 +11,22 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
)
|
||||
|
||||
// Piece is a tuple of piece and deal info
|
||||
type PieceWithDealInfo struct {
|
||||
Piece abi.PieceInfo
|
||||
DealInfo DealInfo
|
||||
DealInfo api.PieceDealInfo
|
||||
}
|
||||
|
||||
// Piece is a tuple of piece info and optional deal
|
||||
type Piece struct {
|
||||
Piece abi.PieceInfo
|
||||
DealInfo *DealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
|
||||
}
|
||||
|
||||
// DealInfo is a tuple of deal identity and its schedule
|
||||
type DealInfo struct {
|
||||
PublishCid *cid.Cid
|
||||
DealID abi.DealID
|
||||
DealProposal *market.DealProposal
|
||||
DealSchedule DealSchedule
|
||||
KeepUnsealed bool
|
||||
}
|
||||
|
||||
// DealSchedule communicates the time interval of a storage deal. The deal must
|
||||
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
|
||||
// is invalid.
|
||||
type DealSchedule struct {
|
||||
StartEpoch abi.ChainEpoch
|
||||
EndEpoch abi.ChainEpoch
|
||||
DealInfo *api.PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
|
5
extern/storage-sealing/types_test.go
vendored
5
extern/storage-sealing/types_test.go
vendored
@ -10,6 +10,7 @@ import (
|
||||
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
api "github.com/filecoin-project/lotus/api"
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
||||
)
|
||||
@ -22,9 +23,9 @@ func TestSectorInfoSerialization(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dealInfo := DealInfo{
|
||||
dealInfo := api.PieceDealInfo{
|
||||
DealID: d,
|
||||
DealSchedule: DealSchedule{
|
||||
DealSchedule: api.DealSchedule{
|
||||
StartEpoch: 0,
|
||||
EndEpoch: 100,
|
||||
},
|
||||
|
@ -53,6 +53,8 @@ func main() {
|
||||
api.SealedRefs{},
|
||||
api.SealTicket{},
|
||||
api.SealSeed{},
|
||||
api.PieceDealInfo{},
|
||||
api.DealSchedule{},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
|
@ -186,6 +186,7 @@ func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
|
||||
ens.Miner(&newMiner, full,
|
||||
kit.OwnerAddr(full.DefaultKey),
|
||||
kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs.
|
||||
kit.WithAllSubsystems(),
|
||||
).Start().InterconnectAll()
|
||||
|
||||
ta, err := newMiner.ActorAddress(ctx)
|
||||
|
@ -58,7 +58,7 @@ func TestBatchDealInput(t *testing.T) {
|
||||
))
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
||||
require.NoError(t, err)
|
||||
|
@ -61,7 +61,7 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
|
||||
err = miner.SectorMarkForUpgrade(ctx, sl[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
|
||||
Rseed: 6,
|
||||
SuspendUntilCryptoeconStable: true,
|
||||
|
@ -76,6 +76,7 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
minerE kit.TestMiner
|
||||
)
|
||||
opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))}
|
||||
opts = append(opts, kit.WithAllSubsystems())
|
||||
ens := kit.NewEnsemble(t, kit.MockProofs()).
|
||||
FullNode(&client, opts...).
|
||||
Miner(&minerA, &client, opts...).
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
@ -19,6 +21,53 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
// TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner
|
||||
// architecture where the `mining/sealing/proving` node is a separate process from the `markets` node
|
||||
func TestDealWithMarketAndMinerNode(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
t.Cleanup(func() {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
startEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
||||
api.RunningNodeType = api.NodeMiner // TODO(anteva): fix me
|
||||
|
||||
client, main, market, _ := kit.EnsembleWithMinerAndMarketNodes(t, kit.ThroughRPC())
|
||||
|
||||
dh := kit.NewDealHarness(t, client, main, market)
|
||||
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||
N: n,
|
||||
FastRetrieval: fastRetrieval,
|
||||
CarExport: carExport,
|
||||
StartEpoch: startEpoch,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175#
|
||||
cycles := []int{1}
|
||||
for _, n := range cycles {
|
||||
n := n
|
||||
ns := fmt.Sprintf("%d", n)
|
||||
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
|
||||
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
|
||||
}
|
||||
}
|
||||
|
||||
func TestDealCyclesConcurrent(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
@ -36,7 +85,7 @@ func TestDealCyclesConcurrent(t *testing.T) {
|
||||
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||
N: n,
|
||||
@ -77,7 +126,7 @@ func TestSimultenousTransferLimit(t *testing.T) {
|
||||
node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(2))),
|
||||
))
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -28,7 +28,7 @@ func TestOfflineDealFlow(t *testing.T) {
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||
ens.InterconnectAll().BeginMining(blocktime)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
// Create a random file and import on the client.
|
||||
res, inFile := client.CreateImportFile(ctx, 1, 0)
|
||||
|
@ -24,13 +24,13 @@ func TestFirstDealEnablesMining(t *testing.T) {
|
||||
|
||||
ens := kit.NewEnsemble(t, kit.MockProofs())
|
||||
ens.FullNode(&client)
|
||||
ens.Miner(&genMiner, &client)
|
||||
ens.Miner(&provider, &client, kit.PresealSectors(0))
|
||||
ens.Miner(&genMiner, &client, kit.WithAllSubsystems())
|
||||
ens.Miner(&provider, &client, kit.WithAllSubsystems(), kit.PresealSectors(0))
|
||||
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
dh := kit.NewDealHarness(t, &client, &provider)
|
||||
dh := kit.NewDealHarness(t, &client, &provider, &provider)
|
||||
|
||||
ref, _ := client.CreateImportFile(ctx, 5, 0)
|
||||
|
||||
|
@ -35,7 +35,7 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
||||
|
||||
@ -123,7 +123,7 @@ func TestZeroPricePerByteRetrieval(t *testing.T) {
|
||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||
N: 1,
|
||||
StartEpoch: startEpoch,
|
||||
|
@ -60,7 +60,7 @@ func TestPublishDealsBatching(t *testing.T) {
|
||||
|
||||
miner.SetControlAddresses(publisherKey.Address)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
// Starts a deal and waits until it's published
|
||||
runDealTillPublish := func(rseed int) {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
@ -14,11 +15,17 @@ func TestDealsWithSealingAndRPC(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
t.Cleanup(func() {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
var blockTime = 50 * time.Millisecond
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) // no mock proofs.
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs.
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
t.Run("stdretrieval", func(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -192,7 +193,7 @@ func TestGatewayDealFlow(t *testing.T) {
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner)
|
||||
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner, nodes.miner)
|
||||
dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{
|
||||
Rseed: 6,
|
||||
StartEpoch: dealStartEpoch,
|
||||
@ -270,7 +271,10 @@ func startNodes(
|
||||
handler, err := gateway.Handler(gwapi)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, _ := kit.CreateRPCServer(t, handler)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, _ := kit.CreateRPCServer(t, handler, l)
|
||||
|
||||
// Create a gateway client API that connects to the gateway server
|
||||
var gapi api.Gateway
|
||||
|
@ -28,7 +28,8 @@ import (
|
||||
type DealHarness struct {
|
||||
t *testing.T
|
||||
client *TestFullNode
|
||||
miner *TestMiner
|
||||
main *TestMiner
|
||||
market *TestMiner
|
||||
}
|
||||
|
||||
type MakeFullDealParams struct {
|
||||
@ -62,11 +63,12 @@ type MakeFullDealParams struct {
|
||||
}
|
||||
|
||||
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
||||
func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness {
|
||||
func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market *TestMiner) *DealHarness {
|
||||
return &DealHarness{
|
||||
t: t,
|
||||
client: client,
|
||||
miner: miner,
|
||||
main: main,
|
||||
market: market,
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,7 +99,7 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa
|
||||
|
||||
// StartDeal starts a storage deal between the client and the miner.
|
||||
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||
maddr, err := dh.miner.ActorAddress(ctx)
|
||||
maddr, err := dh.main.ActorAddress(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
addr, err := dh.client.WalletDefaultAddress(ctx)
|
||||
@ -146,7 +148,7 @@ loop:
|
||||
break loop
|
||||
}
|
||||
|
||||
mds, err := dh.miner.MarketListIncompleteDeals(ctx)
|
||||
mds, err := dh.market.MarketListIncompleteDeals(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
var minerState storagemarket.StorageDealStatus
|
||||
@ -170,7 +172,7 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
|
||||
updates, err := dh.market.MarketGetDealUpdates(subCtx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
for {
|
||||
@ -197,19 +199,19 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
||||
}
|
||||
|
||||
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
||||
snums, err := dh.miner.SectorsList(ctx)
|
||||
snums, err := dh.main.SectorsList(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
for _, snum := range snums {
|
||||
si, err := dh.miner.SectorsStatus(ctx, snum, false)
|
||||
si, err := dh.main.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
dh.t.Logf("Sector state: %s", si.State)
|
||||
if si.State == api.SectorState(sealing.WaitDeals) {
|
||||
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
|
||||
require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum))
|
||||
}
|
||||
|
||||
dh.miner.FlushSealingBatches(ctx)
|
||||
dh.main.FlushSealingBatches(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@ -194,6 +196,10 @@ func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt)
|
||||
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt))
|
||||
require.NoError(n.t, err)
|
||||
|
||||
if options.mainMiner != nil {
|
||||
actorAddr = options.mainMiner.ActorAddr
|
||||
}
|
||||
|
||||
ownerKey := options.ownerKey
|
||||
if !n.bootstrapped {
|
||||
var (
|
||||
@ -228,13 +234,17 @@ func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt)
|
||||
require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis")
|
||||
}
|
||||
|
||||
rl, err := net.Listen("tcp", "127.0.0.1:")
|
||||
require.NoError(n.t, err)
|
||||
|
||||
*miner = TestMiner{
|
||||
t: n.t,
|
||||
ActorAddr: actorAddr,
|
||||
OwnerKey: ownerKey,
|
||||
FullNode: full,
|
||||
PresealDir: tdir,
|
||||
options: options,
|
||||
t: n.t,
|
||||
ActorAddr: actorAddr,
|
||||
OwnerKey: ownerKey,
|
||||
FullNode: full,
|
||||
PresealDir: tdir,
|
||||
options: options,
|
||||
RemoteListener: rl,
|
||||
}
|
||||
|
||||
miner.Libp2p.PeerID = peerId
|
||||
@ -263,10 +273,11 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
|
||||
// Create all inactive full nodes.
|
||||
for i, full := range n.inactive.fullnodes {
|
||||
r := repo.NewMemory(nil)
|
||||
opts := []node.Option{
|
||||
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
|
||||
node.Online(),
|
||||
node.Repo(repo.NewMemory(nil)),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
node.MockHost(n.mn),
|
||||
node.Test(),
|
||||
|
||||
@ -334,39 +345,56 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
// Create all inactive miners.
|
||||
for i, m := range n.inactive.miners {
|
||||
if n.bootstrapped {
|
||||
// this is a miner created after genesis, so it won't have a preseal.
|
||||
// we need to create it on chain.
|
||||
params, aerr := actors.SerializeParams(&power2.CreateMinerParams{
|
||||
Owner: m.OwnerKey.Address,
|
||||
Worker: m.OwnerKey.Address,
|
||||
SealProofType: m.options.proofType,
|
||||
Peer: abi.PeerID(m.Libp2p.PeerID),
|
||||
})
|
||||
require.NoError(n.t, aerr)
|
||||
if m.options.mainMiner == nil {
|
||||
// this is a miner created after genesis, so it won't have a preseal.
|
||||
// we need to create it on chain.
|
||||
params, aerr := actors.SerializeParams(&power2.CreateMinerParams{
|
||||
Owner: m.OwnerKey.Address,
|
||||
Worker: m.OwnerKey.Address,
|
||||
SealProofType: m.options.proofType,
|
||||
Peer: abi.PeerID(m.Libp2p.PeerID),
|
||||
})
|
||||
require.NoError(n.t, aerr)
|
||||
|
||||
createStorageMinerMsg := &types.Message{
|
||||
From: m.OwnerKey.Address,
|
||||
To: power.Address,
|
||||
Value: big.Zero(),
|
||||
createStorageMinerMsg := &types.Message{
|
||||
From: m.OwnerKey.Address,
|
||||
To: power.Address,
|
||||
Value: big.Zero(),
|
||||
|
||||
Method: power.Methods.CreateMiner,
|
||||
Params: params,
|
||||
Method: power.Methods.CreateMiner,
|
||||
Params: params,
|
||||
}
|
||||
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
GasLimit: 0,
|
||||
GasPremium: big.NewInt(5252),
|
||||
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
require.NoError(n.t, err)
|
||||
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
||||
|
||||
var retval power2.CreateMinerReturn
|
||||
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
||||
require.NoError(n.t, err, "failed to create miner")
|
||||
|
||||
m.ActorAddr = retval.IDAddress
|
||||
} else {
|
||||
params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
msg := &types.Message{
|
||||
To: m.options.mainMiner.ActorAddr,
|
||||
From: m.options.mainMiner.OwnerKey.Address,
|
||||
Method: miner.Methods.ChangePeerID,
|
||||
Params: params,
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(n.t, err2)
|
||||
|
||||
mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
require.NoError(n.t, err2)
|
||||
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
||||
}
|
||||
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
require.NoError(n.t, err)
|
||||
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
||||
|
||||
var retval power2.CreateMinerReturn
|
||||
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
||||
require.NoError(n.t, err, "failed to create miner")
|
||||
|
||||
m.ActorAddr = retval.IDAddress
|
||||
}
|
||||
|
||||
has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address)
|
||||
@ -388,6 +416,36 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
lr, err := r.Lock(repo.StorageMiner)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
c, err := lr.Config()
|
||||
require.NoError(n.t, err)
|
||||
|
||||
cfg, ok := c.(*config.StorageMiner)
|
||||
if !ok {
|
||||
n.t.Fatalf("invalid config from repo, got: %T", c)
|
||||
}
|
||||
cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String()
|
||||
cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets)
|
||||
cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining)
|
||||
cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing)
|
||||
cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage)
|
||||
|
||||
if m.options.mainMiner != nil {
|
||||
token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
||||
cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
||||
|
||||
fmt.Println("config for market node, setting SectorIndexApiInfo to: ", cfg.Subsystems.SectorIndexApiInfo)
|
||||
fmt.Println("config for market node, setting SealerApiInfo to: ", cfg.Subsystems.SealerApiInfo)
|
||||
}
|
||||
|
||||
err = lr.SetConfig(func(raw interface{}) {
|
||||
rcfg := raw.(*config.StorageMiner)
|
||||
*rcfg = *cfg
|
||||
})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
ks, err := lr.KeyStore()
|
||||
require.NoError(n.t, err)
|
||||
|
||||
@ -417,28 +475,30 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
err = lr.Close()
|
||||
require.NoError(n.t, err)
|
||||
|
||||
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
||||
require.NoError(n.t, err)
|
||||
if m.options.mainMiner == nil {
|
||||
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
msg := &types.Message{
|
||||
From: m.OwnerKey.Address,
|
||||
To: m.ActorAddr,
|
||||
Method: miner.Methods.ChangePeerID,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
msg := &types.Message{
|
||||
From: m.OwnerKey.Address,
|
||||
To: m.ActorAddr,
|
||||
Method: miner.Methods.ChangePeerID,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(n.t, err2)
|
||||
}
|
||||
|
||||
_, err = m.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
var mineBlock = make(chan lotusminer.MineReq)
|
||||
opts := []node.Option{
|
||||
node.StorageMiner(&m.StorageMiner),
|
||||
node.Online(),
|
||||
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
node.Test(),
|
||||
|
||||
node.MockHost(n.mn),
|
||||
node.If(!m.options.disableLibp2p, node.MockHost(n.mn)),
|
||||
|
||||
node.Override(new(v1api.FullNode), m.FullNode.FullNode),
|
||||
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
||||
|
@ -1,6 +1,9 @@
|
||||
package kit
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner.
|
||||
// It does not interconnect nodes nor does it begin mining.
|
||||
@ -8,6 +11,8 @@ import "testing"
|
||||
// This function supports passing both ensemble and node functional options.
|
||||
// Functional options are applied to all nodes.
|
||||
func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) {
|
||||
opts = append(opts, WithAllSubsystems())
|
||||
|
||||
eopts, nopts := siftOptions(t, opts)
|
||||
|
||||
var (
|
||||
@ -18,12 +23,37 @@ func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMin
|
||||
return &full, &miner, ens
|
||||
}
|
||||
|
||||
func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
||||
eopts, nopts := siftOptions(t, opts)
|
||||
|
||||
var (
|
||||
fullnode TestFullNode
|
||||
main, market TestMiner
|
||||
)
|
||||
|
||||
mainNodeOpts := []NodeOpt{WithSubsystems(SSealing, SSectorStorage, SMining), DisableLibp2p()}
|
||||
mainNodeOpts = append(mainNodeOpts, nopts...)
|
||||
|
||||
blockTime := 100 * time.Millisecond
|
||||
ens := NewEnsemble(t, eopts...).FullNode(&fullnode, nopts...).Miner(&main, &fullnode, mainNodeOpts...).Start()
|
||||
ens.BeginMining(blockTime)
|
||||
|
||||
marketNodeOpts := []NodeOpt{OwnerAddr(fullnode.DefaultKey), MainMiner(&main), WithSubsystems(SMarkets)}
|
||||
marketNodeOpts = append(marketNodeOpts, nopts...)
|
||||
|
||||
ens.Miner(&market, &fullnode, marketNodeOpts...).Start().Connect(market, fullnode)
|
||||
|
||||
return &fullnode, &main, &market, ens
|
||||
}
|
||||
|
||||
// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner.
|
||||
// It does not interconnect nodes nor does it begin mining.
|
||||
//
|
||||
// This function supports passing both ensemble and node functional options.
|
||||
// Functional options are applied to all nodes.
|
||||
func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) {
|
||||
opts = append(opts, WithAllSubsystems())
|
||||
|
||||
eopts, nopts := siftOptions(t, opts)
|
||||
|
||||
var (
|
||||
@ -40,6 +70,8 @@ func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFull
|
||||
// This function supports passing both ensemble and node functional options.
|
||||
// Functional options are applied to all nodes.
|
||||
func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
||||
opts = append(opts, WithAllSubsystems())
|
||||
|
||||
eopts, nopts := siftOptions(t, opts)
|
||||
|
||||
var (
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -27,6 +28,35 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
type MinerSubsystem int
|
||||
|
||||
const (
|
||||
SMarkets MinerSubsystem = 1 << iota
|
||||
SMining
|
||||
SSealing
|
||||
SSectorStorage
|
||||
|
||||
MinerSubsystems = iota
|
||||
)
|
||||
|
||||
func (ms MinerSubsystem) Add(single MinerSubsystem) MinerSubsystem {
|
||||
return ms | single
|
||||
}
|
||||
|
||||
func (ms MinerSubsystem) Has(single MinerSubsystem) bool {
|
||||
return ms&single == single
|
||||
}
|
||||
|
||||
func (ms MinerSubsystem) All() [MinerSubsystems]bool {
|
||||
var out [MinerSubsystems]bool
|
||||
|
||||
for i := range out {
|
||||
out[i] = ms&(1<<i) > 0
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// TestMiner represents a miner enrolled in an Ensemble.
|
||||
type TestMiner struct {
|
||||
api.StorageMiner
|
||||
@ -50,6 +80,8 @@ type TestMiner struct {
|
||||
PrivKey libp2pcrypto.PrivKey
|
||||
}
|
||||
|
||||
RemoteListener net.Listener
|
||||
|
||||
options nodeOpts
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,10 @@ type nodeOpts struct {
|
||||
rpc bool
|
||||
ownerKey *wallet.Key
|
||||
extraNodeOpts []node.Option
|
||||
|
||||
subsystems MinerSubsystem
|
||||
mainMiner *TestMiner
|
||||
disableLibp2p bool
|
||||
optBuilders []OptBuilder
|
||||
proofType abi.RegisteredSealProof
|
||||
}
|
||||
@ -43,6 +47,40 @@ type OptBuilder func(activeNodes []*TestFullNode) node.Option
|
||||
// NodeOpt is a functional option for test nodes.
|
||||
type NodeOpt func(opts *nodeOpts) error
|
||||
|
||||
func WithAllSubsystems() NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.subsystems = opts.subsystems.Add(SMarkets)
|
||||
opts.subsystems = opts.subsystems.Add(SMining)
|
||||
opts.subsystems = opts.subsystems.Add(SSealing)
|
||||
opts.subsystems = opts.subsystems.Add(SSectorStorage)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSubsystems(systems ...MinerSubsystem) NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
for _, s := range systems {
|
||||
opts.subsystems = opts.subsystems.Add(s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DisableLibp2p() NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.disableLibp2p = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func MainMiner(m *TestMiner) NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.mainMiner = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OwnerBalance specifies the balance to be attributed to a miner's owner
|
||||
// account. Only relevant when creating a miner.
|
||||
func OwnerBalance(balance abi.TokenAmount) NodeOpt {
|
||||
|
@ -2,6 +2,8 @@ package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
@ -13,8 +15,13 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
|
||||
testServ := httptest.NewServer(handler)
|
||||
func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr) {
|
||||
testServ := &httptest.Server{
|
||||
Listener: listener,
|
||||
Config: &http.Server{Handler: handler},
|
||||
}
|
||||
testServ.Start()
|
||||
|
||||
t.Cleanup(testServ.Close)
|
||||
t.Cleanup(testServ.CloseClientConnections)
|
||||
|
||||
@ -28,7 +35,10 @@ func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
|
||||
handler, err := node.FullNodeHandler(f.FullNode, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, maddr := CreateRPCServer(t, handler)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, maddr := CreateRPCServer(t, handler, l)
|
||||
|
||||
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||
require.NoError(t, err)
|
||||
@ -42,9 +52,11 @@ func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
|
||||
handler, err := node.MinerHandler(m.StorageMiner, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, maddr := CreateRPCServer(t, handler)
|
||||
srv, maddr := CreateRPCServer(t, handler, m.RemoteListener)
|
||||
|
||||
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
|
||||
fmt.Println("creating RPC server for", m.ActorAddr, "at: ", srv.Listener.Addr().String())
|
||||
url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0"
|
||||
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stop)
|
||||
|
||||
|
@ -41,7 +41,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
||||
ens := kit.NewEnsemble(t, kit.MockProofs()).
|
||||
FullNode(&paymentCreator).
|
||||
FullNode(&paymentReceiver).
|
||||
Miner(&miner, &paymentCreator).
|
||||
Miner(&miner, &paymentCreator, kit.WithAllSubsystems()).
|
||||
Start().
|
||||
InterconnectAll()
|
||||
bms := ens.BeginMining(blockTime)
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -27,13 +26,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
}
|
||||
|
||||
// TestPaymentChannels does a basic test to exercise the payment channel CLI
|
||||
// TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI
|
||||
// commands
|
||||
func TestPaymentChannelsBasic(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
@ -420,7 +413,7 @@ func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCr
|
||||
kit.NewEnsemble(t, kit.MockProofs()).
|
||||
FullNode(paymentCreator, opts).
|
||||
FullNode(paymentReceiver, opts).
|
||||
Miner(&miner, paymentCreator).
|
||||
Miner(&miner, paymentCreator, kit.WithAllSubsystems()).
|
||||
Start().
|
||||
InterconnectAll().
|
||||
BeginMining(blocktime)
|
||||
|
@ -35,7 +35,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) {
|
||||
}, nil
|
||||
})))) // no mock proofs.
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -39,11 +39,12 @@ func TestWindowPostDispute(t *testing.T) {
|
||||
// it doesn't submit proofs.
|
||||
//
|
||||
// Then we're going to manually submit bad proofs.
|
||||
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
|
||||
opts := []kit.NodeOpt{kit.ConstructorOpts(kit.LatestActorsAt(-1))}
|
||||
opts = append(opts, kit.WithAllSubsystems())
|
||||
ens := kit.NewEnsemble(t, kit.MockProofs()).
|
||||
FullNode(&client, opts).
|
||||
Miner(&chainMiner, &client, opts).
|
||||
Miner(&evilMiner, &client, opts, kit.PresealSectors(0)).
|
||||
FullNode(&client, opts...).
|
||||
Miner(&chainMiner, &client, opts...).
|
||||
Miner(&evilMiner, &client, append(opts, kit.PresealSectors(0))...).
|
||||
Start()
|
||||
|
||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||
|
@ -4,38 +4,42 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
specstorage "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("retrievaladapter")
|
||||
|
||||
type retrievalProviderNode struct {
|
||||
miner *storage.Miner
|
||||
maddr address.Address
|
||||
secb sectorblocks.SectorBuilder
|
||||
pp sectorstorage.PieceProvider
|
||||
full v1api.FullNode
|
||||
}
|
||||
|
||||
// NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the
|
||||
// Lotus Node
|
||||
func NewRetrievalProviderNode(miner *storage.Miner, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.RetrievalProviderNode {
|
||||
return &retrievalProviderNode{miner, pp, full}
|
||||
func NewRetrievalProviderNode(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.RetrievalProviderNode {
|
||||
return &retrievalProviderNode{address.Address(maddr), secb, pp, full}
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
@ -50,13 +54,12 @@ func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, min
|
||||
|
||||
func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
|
||||
log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length)
|
||||
|
||||
si, err := rpn.miner.GetSectorInfo(sectorID)
|
||||
si, err := rpn.sectorsStatus(ctx, sectorID, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(rpn.miner.Address())
|
||||
mid, err := address.IDFromAddress(rpn.maddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -66,7 +69,7 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SectorType,
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
var commD cid.Cid
|
||||
@ -76,7 +79,7 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi
|
||||
|
||||
// Get a reader for the piece, unsealing the piece if necessary
|
||||
log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid)
|
||||
r, unsealed, err := rpn.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD)
|
||||
r, unsealed, err := rpn.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err)
|
||||
}
|
||||
@ -102,12 +105,12 @@ func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipS
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
|
||||
si, err := rpn.miner.GetSectorInfo(sectorID)
|
||||
si, err := rpn.sectorsStatus(ctx, sectorID, true)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get sectorinfo, err=%s", err)
|
||||
return false, xerrors.Errorf("failed to get sector info: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(rpn.miner.Address())
|
||||
mid, err := address.IDFromAddress(rpn.maddr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -117,7 +120,7 @@ func (rpn *retrievalProviderNode) IsUnsealed(ctx context.Context, sectorID abi.S
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SectorType,
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length)
|
||||
@ -172,3 +175,37 @@ func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context,
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||
sInfo, err := rpn.secb.SectorsStatus(ctx, sid, false)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
if !showOnChainInfo {
|
||||
return sInfo, nil
|
||||
}
|
||||
|
||||
onChainInfo, err := rpn.full.StateSectorGetInfo(ctx, rpn.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, err
|
||||
}
|
||||
if onChainInfo == nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.SealProof = onChainInfo.SealProof
|
||||
sInfo.Activation = onChainInfo.Activation
|
||||
sInfo.Expiration = onChainInfo.Expiration
|
||||
sInfo.DealWeight = onChainInfo.DealWeight
|
||||
sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight
|
||||
sInfo.InitialPledge = onChainInfo.InitialPledge
|
||||
|
||||
ex, err := rpn.full.StateSectorExpiration(ctx, rpn.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.OnTime = ex.OnTime
|
||||
sInfo.Early = ex.Early
|
||||
|
||||
return sInfo, nil
|
||||
}
|
||||
|
@ -95,11 +95,11 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema
|
||||
return nil, xerrors.Errorf("deal.PublishCid can't be nil")
|
||||
}
|
||||
|
||||
sdInfo := sealing.DealInfo{
|
||||
sdInfo := api.PieceDealInfo{
|
||||
DealID: deal.DealID,
|
||||
DealProposal: &deal.Proposal,
|
||||
PublishCid: deal.PublishCid,
|
||||
DealSchedule: sealing.DealSchedule{
|
||||
DealSchedule: api.DealSchedule{
|
||||
StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch,
|
||||
EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch,
|
||||
},
|
||||
@ -240,19 +240,19 @@ func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context
|
||||
|
||||
// TODO: better strategy (e.g. look for already unsealed)
|
||||
var best api.SealedRef
|
||||
var bestSi sealing.SectorInfo
|
||||
var bestSi api.SectorInfo
|
||||
for _, r := range refs {
|
||||
si, err := n.secb.Miner.GetSectorInfo(r.SectorID)
|
||||
si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false)
|
||||
if err != nil {
|
||||
return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err)
|
||||
}
|
||||
if si.State == sealing.Proving {
|
||||
if si.State == api.SectorState(sealing.Proving) {
|
||||
best = r
|
||||
bestSi = si
|
||||
break
|
||||
}
|
||||
}
|
||||
if bestSi.State == sealing.UndefinedSectorState {
|
||||
if bestSi.State == api.SectorState(sealing.UndefinedSectorState) {
|
||||
return 0, 0, 0, xerrors.New("no sealed sector found")
|
||||
}
|
||||
return best.SectorID, best.Offset, best.Size.Padded(), nil
|
||||
|
419
node/builder.go
419
node/builder.go
@ -8,14 +8,7 @@ import (
|
||||
|
||||
metricsi "github.com/ipfs/go-metrics-interface"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/exchange"
|
||||
rpcstmgr "github.com/filecoin-project/lotus/chain/stmgr/rpc"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/filecoin-project/lotus/node/hello"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/system"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -33,52 +26,23 @@ import (
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/discovery"
|
||||
discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/beacon"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
"github.com/filecoin-project/lotus/chain/market"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/metrics"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/remotewallet"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||
"github.com/filecoin-project/lotus/markets/dealfilter"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/impl/common/mock"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
||||
"github.com/filecoin-project/lotus/node/modules/testing"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/paychmgr"
|
||||
"github.com/filecoin-project/lotus/paychmgr/settler"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
)
|
||||
|
||||
//nolint:deadcode,varcheck
|
||||
@ -167,9 +131,11 @@ type Settings struct {
|
||||
|
||||
nodeType repo.RepoType
|
||||
|
||||
Online bool // Online option applied
|
||||
Base bool // Base option applied
|
||||
Config bool // Config option applied
|
||||
Lite bool // Start node in "lite" mode
|
||||
|
||||
enableLibp2pNode bool
|
||||
}
|
||||
|
||||
// Basic lotus-app services
|
||||
@ -246,257 +212,22 @@ func isFullOrLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode }
|
||||
func isFullNode(s *Settings) bool { return s.nodeType == repo.FullNode && !s.Lite }
|
||||
func isLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode && s.Lite }
|
||||
|
||||
// Chain node provides access to the Filecoin blockchain, by setting up a full
|
||||
// validator node, or by delegating some actions to other nodes (lite mode)
|
||||
var ChainNode = Options(
|
||||
// Full node or lite node
|
||||
// TODO: Fix offline mode
|
||||
|
||||
// Consensus settings
|
||||
Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig),
|
||||
Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()),
|
||||
Override(new(dtypes.NetworkName), modules.NetworkName),
|
||||
Override(new(modules.Genesis), modules.ErrorGenesis),
|
||||
Override(new(dtypes.AfterGenesisSet), modules.SetGenesis),
|
||||
Override(SetGenesisKey, modules.DoSetGenesis),
|
||||
Override(new(beacon.Schedule), modules.RandomSchedule),
|
||||
|
||||
// Network bootstrap
|
||||
Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap),
|
||||
Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap),
|
||||
|
||||
// Consensus: crypto dependencies
|
||||
Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier),
|
||||
|
||||
// Consensus: VM
|
||||
Override(new(vm.SyscallBuilder), vm.Syscalls),
|
||||
|
||||
// Consensus: Chain storage/access
|
||||
Override(new(*store.ChainStore), modules.ChainStore),
|
||||
Override(new(*stmgr.StateManager), modules.StateManager),
|
||||
Override(new(dtypes.ChainBitswap), modules.ChainBitswap),
|
||||
Override(new(dtypes.ChainBlockService), modules.ChainBlockService), // todo: unused
|
||||
|
||||
// Consensus: Chain sync
|
||||
|
||||
// We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value.
|
||||
// It will be called implicitly by the Syncer constructor.
|
||||
Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }),
|
||||
Override(new(*chain.Syncer), modules.NewSyncer),
|
||||
Override(new(exchange.Client), exchange.NewClient),
|
||||
|
||||
// Chain networking
|
||||
Override(new(*hello.Service), hello.NewHelloService),
|
||||
Override(new(exchange.Server), exchange.NewServer),
|
||||
Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr),
|
||||
|
||||
// Chain mining API dependencies
|
||||
Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter),
|
||||
|
||||
// Service: Message Pool
|
||||
Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc),
|
||||
Override(new(*messagepool.MessagePool), modules.MessagePool),
|
||||
Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)),
|
||||
|
||||
// Shared graphsync (markets, serving chain)
|
||||
Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultSimultaneousTransfers)),
|
||||
|
||||
// Service: Wallet
|
||||
Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner),
|
||||
Override(new(*wallet.LocalWallet), wallet.NewWallet),
|
||||
Override(new(wallet.Default), From(new(*wallet.LocalWallet))),
|
||||
Override(new(api.Wallet), From(new(wallet.MultiWallet))),
|
||||
|
||||
// Service: Payment channels
|
||||
Override(new(paychmgr.PaychAPI), From(new(modules.PaychAPI))),
|
||||
Override(new(*paychmgr.Store), modules.NewPaychStore),
|
||||
Override(new(*paychmgr.Manager), modules.NewManager),
|
||||
Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager),
|
||||
Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels),
|
||||
|
||||
// Markets (common)
|
||||
Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery),
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(discovery.PeerResolver), modules.RetrievalResolver),
|
||||
Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient),
|
||||
Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer),
|
||||
|
||||
// Markets (storage)
|
||||
Override(new(*market.FundManager), market.NewFundManager),
|
||||
Override(new(dtypes.ClientDatastore), modules.NewClientDatastore),
|
||||
Override(new(storagemarket.StorageClient), modules.StorageClient),
|
||||
Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter),
|
||||
Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds),
|
||||
|
||||
Override(new(*full.GasPriceCache), full.NewGasPriceCache),
|
||||
|
||||
// Lite node API
|
||||
ApplyIf(isLiteNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProviderLite),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))),
|
||||
Override(new(full.ChainModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.GasModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.StateModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager),
|
||||
),
|
||||
|
||||
// Full node API / service startup
|
||||
ApplyIf(isFullNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProvider),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))),
|
||||
Override(new(full.ChainModuleAPI), From(new(full.ChainModule))),
|
||||
Override(new(full.GasModuleAPI), From(new(full.GasModule))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))),
|
||||
Override(new(full.StateModuleAPI), From(new(full.StateModule))),
|
||||
Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))),
|
||||
|
||||
Override(RunHelloKey, modules.RunHello),
|
||||
Override(RunChainExchangeKey, modules.RunChainExchange),
|
||||
Override(RunPeerMgrKey, modules.RunPeerMgr),
|
||||
Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages),
|
||||
Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks),
|
||||
),
|
||||
)
|
||||
|
||||
var MinerNode = Options(
|
||||
// API dependencies
|
||||
Override(new(api.Common), From(new(common.CommonAPI))),
|
||||
Override(new(sectorstorage.StorageAuth), modules.StorageAuth),
|
||||
|
||||
// Actor config
|
||||
Override(new(dtypes.MinerAddress), modules.MinerAddress),
|
||||
Override(new(dtypes.MinerID), modules.MinerID),
|
||||
Override(new(abi.RegisteredSealProof), modules.SealProofType),
|
||||
Override(new(dtypes.NetworkName), modules.StorageNetworkName),
|
||||
|
||||
// Sector storage
|
||||
Override(new(*stores.Index), stores.NewIndex),
|
||||
Override(new(stores.SectorIndex), From(new(*stores.Index))),
|
||||
Override(new(stores.LocalStorage), From(new(repo.LockedRepo))),
|
||||
Override(new(*stores.Local), modules.LocalStorage),
|
||||
Override(new(*stores.Remote), modules.RemoteStorage),
|
||||
Override(new(*sectorstorage.Manager), modules.SectorStorage),
|
||||
Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))),
|
||||
Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))),
|
||||
Override(new(sectorstorage.Unsealer), From(new(*sectorstorage.Manager))),
|
||||
|
||||
// Sector storage: Proofs
|
||||
Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier),
|
||||
Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver),
|
||||
Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))),
|
||||
|
||||
// Sealing
|
||||
Override(new(sealing.SectorIDCounter), modules.SectorIDCounter),
|
||||
Override(GetParamsKey, modules.GetParams),
|
||||
|
||||
// Mining / proving
|
||||
Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter),
|
||||
Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)),
|
||||
Override(new(*miner.Miner), modules.SetupBlockProducer),
|
||||
Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver),
|
||||
|
||||
Override(new(*storage.AddressSelector), modules.AddressSelector(nil)),
|
||||
|
||||
// Markets
|
||||
Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore),
|
||||
Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore),
|
||||
Override(new(dtypes.StagingDAG), modules.StagingDAG),
|
||||
Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(config.DefaultSimultaneousTransfers)),
|
||||
Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore),
|
||||
Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks),
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{
|
||||
RetrievalPricing: &config.RetrievalPricing{
|
||||
Strategy: config.RetrievalPricingDefaultMode,
|
||||
Default: &config.RetrievalPricingDefault{},
|
||||
},
|
||||
})),
|
||||
Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider),
|
||||
Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider),
|
||||
Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)),
|
||||
|
||||
Override(HandleRetrievalKey, modules.HandleRetrieval),
|
||||
|
||||
// Markets (storage)
|
||||
Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer),
|
||||
Override(new(*storedask.StoredAsk), modules.NewStorageAsk),
|
||||
Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)),
|
||||
Override(new(storagemarket.StorageProvider), modules.StorageProvider),
|
||||
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})),
|
||||
Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil, nil)),
|
||||
Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds),
|
||||
Override(HandleDealsKey, modules.HandleDeals),
|
||||
|
||||
// Config (todo: get a real property system)
|
||||
Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc),
|
||||
Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc),
|
||||
Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc),
|
||||
Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc),
|
||||
Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc),
|
||||
Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc),
|
||||
Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc),
|
||||
Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc),
|
||||
Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc),
|
||||
)
|
||||
|
||||
// Online sets up basic libp2p node
|
||||
func Online() Option {
|
||||
|
||||
func Base() Option {
|
||||
return Options(
|
||||
// make sure that online is applied before Config.
|
||||
// This is important because Config overrides some of Online units
|
||||
func(s *Settings) error { s.Online = true; return nil },
|
||||
func(s *Settings) error { s.Base = true; return nil }, // mark Base as applied
|
||||
ApplyIf(func(s *Settings) bool { return s.Config },
|
||||
Error(errors.New("the Online option must be set before Config option")),
|
||||
Error(errors.New("the Base() option must be set before Config option")),
|
||||
),
|
||||
ApplyIf(func(s *Settings) bool { return s.enableLibp2pNode },
|
||||
LibP2P,
|
||||
),
|
||||
|
||||
LibP2P,
|
||||
|
||||
ApplyIf(isFullOrLiteNode, ChainNode),
|
||||
ApplyIf(IsType(repo.StorageMiner), MinerNode),
|
||||
)
|
||||
}
|
||||
|
||||
func StorageMiner(out *api.StorageMiner) Option {
|
||||
return Options(
|
||||
ApplyIf(func(s *Settings) bool { return s.Config },
|
||||
Error(errors.New("the StorageMiner option must be set before Config option")),
|
||||
),
|
||||
ApplyIf(func(s *Settings) bool { return s.Online },
|
||||
Error(errors.New("the StorageMiner option must be set before Online option")),
|
||||
),
|
||||
|
||||
func(s *Settings) error {
|
||||
s.nodeType = repo.StorageMiner
|
||||
return nil
|
||||
},
|
||||
|
||||
func(s *Settings) error {
|
||||
resAPI := &impl.StorageMinerAPI{}
|
||||
s.invokes[ExtractApiKey] = fx.Populate(resAPI)
|
||||
*out = resAPI
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Config sets up constructors based on the provided Config
|
||||
func ConfigCommon(cfg *config.Common) Option {
|
||||
func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option {
|
||||
return Options(
|
||||
func(s *Settings) error { s.Config = true; return nil },
|
||||
Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) {
|
||||
@ -505,14 +236,21 @@ func ConfigCommon(cfg *config.Common) Option {
|
||||
Override(SetApiEndpointKey, func(lr repo.LockedRepo, e dtypes.APIEndpoint) error {
|
||||
return lr.SetAPIEndpoint(e)
|
||||
}),
|
||||
Override(new(sectorstorage.URLs), func(e dtypes.APIEndpoint) (sectorstorage.URLs, error) {
|
||||
Override(new(stores.URLs), func(e dtypes.APIEndpoint) (stores.URLs, error) {
|
||||
ip := cfg.API.RemoteListenAddress
|
||||
|
||||
var urls sectorstorage.URLs
|
||||
var urls stores.URLs
|
||||
urls = append(urls, "http://"+ip+"/remote") // TODO: This makes no assumptions, and probably could...
|
||||
return urls, nil
|
||||
}),
|
||||
ApplyIf(func(s *Settings) bool { return s.Online },
|
||||
ApplyIf(func(s *Settings) bool { return s.Base }), // apply only if Base has already been applied
|
||||
If(!enableLibp2pNode,
|
||||
Override(new(common.NetAPI), From(new(mock.MockNetAPI))),
|
||||
Override(new(api.Common), From(new(common.CommonAPI))),
|
||||
),
|
||||
If(enableLibp2pNode,
|
||||
Override(new(common.NetAPI), From(new(common.Libp2pNetAPI))),
|
||||
Override(new(api.Common), From(new(common.CommonAPI))),
|
||||
Override(StartListeningKey, lp2p.StartListening(cfg.Libp2p.ListenAddresses)),
|
||||
Override(ConnectionManagerKey, lp2p.ConnectionManager(
|
||||
cfg.Libp2p.ConnMgrLow,
|
||||
@ -525,95 +263,15 @@ func ConfigCommon(cfg *config.Common) Option {
|
||||
ApplyIf(func(s *Settings) bool { return len(cfg.Libp2p.BootstrapPeers) > 0 },
|
||||
Override(new(dtypes.BootstrapPeers), modules.ConfigBootstrap(cfg.Libp2p.BootstrapPeers)),
|
||||
),
|
||||
|
||||
Override(AddrsFactoryKey, lp2p.AddrsFactory(
|
||||
cfg.Libp2p.AnnounceAddresses,
|
||||
cfg.Libp2p.NoAnnounceAddresses)),
|
||||
),
|
||||
Override(AddrsFactoryKey, lp2p.AddrsFactory(
|
||||
cfg.Libp2p.AnnounceAddresses,
|
||||
cfg.Libp2p.NoAnnounceAddresses)),
|
||||
Override(new(dtypes.MetadataDS), modules.Datastore(cfg.Backup.DisableMetadataLog)),
|
||||
)
|
||||
}
|
||||
|
||||
func ConfigFullNode(c interface{}) Option {
|
||||
cfg, ok := c.(*config.FullNode)
|
||||
if !ok {
|
||||
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
||||
}
|
||||
|
||||
ipfsMaddr := cfg.Client.IpfsMAddr
|
||||
return Options(
|
||||
ConfigCommon(&cfg.Common),
|
||||
|
||||
If(cfg.Client.UseIpfs,
|
||||
Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)),
|
||||
If(cfg.Client.IpfsUseForRetrieval,
|
||||
Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager),
|
||||
),
|
||||
),
|
||||
Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)),
|
||||
|
||||
If(cfg.Metrics.HeadNotifs,
|
||||
Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)),
|
||||
),
|
||||
|
||||
If(cfg.Wallet.RemoteBackend != "",
|
||||
Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)),
|
||||
),
|
||||
If(cfg.Wallet.EnableLedger,
|
||||
Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet),
|
||||
),
|
||||
If(cfg.Wallet.DisableLocal,
|
||||
Unset(new(*wallet.LocalWallet)),
|
||||
Override(new(wallet.Default), wallet.NilDefault),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func ConfigStorageMiner(c interface{}) Option {
|
||||
cfg, ok := c.(*config.StorageMiner)
|
||||
if !ok {
|
||||
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
||||
}
|
||||
|
||||
pricingConfig := cfg.Dealmaking.RetrievalPricing
|
||||
if pricingConfig.Strategy == config.RetrievalPricingExternalMode {
|
||||
if pricingConfig.External == nil {
|
||||
return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil"))
|
||||
}
|
||||
|
||||
if pricingConfig.External.Path == "" {
|
||||
return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty"))
|
||||
}
|
||||
} else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode {
|
||||
return Error(xerrors.New("retrieval pricing policy must be either default or external"))
|
||||
}
|
||||
|
||||
return Options(
|
||||
ConfigCommon(&cfg.Common),
|
||||
|
||||
If(cfg.Dealmaking.Filter != "",
|
||||
Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))),
|
||||
),
|
||||
|
||||
If(cfg.Dealmaking.RetrievalFilter != "",
|
||||
Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))),
|
||||
),
|
||||
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
||||
|
||||
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{
|
||||
Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod),
|
||||
MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg,
|
||||
})),
|
||||
Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)),
|
||||
|
||||
Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfers)),
|
||||
|
||||
Override(new(sectorstorage.SealerConfig), cfg.Storage),
|
||||
Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)),
|
||||
Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)),
|
||||
)
|
||||
}
|
||||
|
||||
func Repo(r repo.Repo) Option {
|
||||
return func(settings *Settings) error {
|
||||
lr, err := r.Lock(settings.nodeType)
|
||||
@ -690,31 +348,6 @@ func Repo(r repo.Repo) Option {
|
||||
}
|
||||
}
|
||||
|
||||
type FullOption = Option
|
||||
|
||||
func Lite(enable bool) FullOption {
|
||||
return func(s *Settings) error {
|
||||
s.Lite = enable
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func FullAPI(out *api.FullNode, fopts ...FullOption) Option {
|
||||
return Options(
|
||||
func(s *Settings) error {
|
||||
s.nodeType = repo.FullNode
|
||||
return nil
|
||||
},
|
||||
Options(fopts...),
|
||||
func(s *Settings) error {
|
||||
resAPI := &impl.FullNodeAPI{}
|
||||
s.invokes[ExtractApiKey] = fx.Populate(resAPI)
|
||||
*out = resAPI
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
type StopFunc func(context.Context) error
|
||||
|
||||
// New builds and starts new Filecoin node
|
||||
|
218
node/builder_chain.go
Normal file
218
node/builder_chain.go
Normal file
@ -0,0 +1,218 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/discovery"
|
||||
discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/beacon"
|
||||
"github.com/filecoin-project/lotus/chain/exchange"
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
"github.com/filecoin-project/lotus/chain/market"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/metrics"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
rpcstmgr "github.com/filecoin-project/lotus/chain/stmgr/rpc"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/remotewallet"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/hello"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/paychmgr"
|
||||
"github.com/filecoin-project/lotus/paychmgr/settler"
|
||||
)
|
||||
|
||||
// Chain node provides access to the Filecoin blockchain, by setting up a full
|
||||
// validator node, or by delegating some actions to other nodes (lite mode)
|
||||
var ChainNode = Options(
|
||||
// Full node or lite node
|
||||
// TODO: Fix offline mode
|
||||
|
||||
// Consensus settings
|
||||
Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig),
|
||||
Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()),
|
||||
Override(new(dtypes.NetworkName), modules.NetworkName),
|
||||
Override(new(modules.Genesis), modules.ErrorGenesis),
|
||||
Override(new(dtypes.AfterGenesisSet), modules.SetGenesis),
|
||||
Override(SetGenesisKey, modules.DoSetGenesis),
|
||||
Override(new(beacon.Schedule), modules.RandomSchedule),
|
||||
|
||||
// Network bootstrap
|
||||
Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap),
|
||||
Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap),
|
||||
|
||||
// Consensus: crypto dependencies
|
||||
Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier),
|
||||
Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver),
|
||||
|
||||
// Consensus: VM
|
||||
Override(new(vm.SyscallBuilder), vm.Syscalls),
|
||||
|
||||
// Consensus: Chain storage/access
|
||||
Override(new(*store.ChainStore), modules.ChainStore),
|
||||
Override(new(*stmgr.StateManager), modules.StateManager),
|
||||
Override(new(dtypes.ChainBitswap), modules.ChainBitswap),
|
||||
Override(new(dtypes.ChainBlockService), modules.ChainBlockService), // todo: unused
|
||||
|
||||
// Consensus: Chain sync
|
||||
|
||||
// We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value.
|
||||
// It will be called implicitly by the Syncer constructor.
|
||||
Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }),
|
||||
Override(new(*chain.Syncer), modules.NewSyncer),
|
||||
Override(new(exchange.Client), exchange.NewClient),
|
||||
|
||||
// Chain networking
|
||||
Override(new(*hello.Service), hello.NewHelloService),
|
||||
Override(new(exchange.Server), exchange.NewServer),
|
||||
Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr),
|
||||
|
||||
// Chain mining API dependencies
|
||||
Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter),
|
||||
|
||||
// Service: Message Pool
|
||||
Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc),
|
||||
Override(new(*messagepool.MessagePool), modules.MessagePool),
|
||||
Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)),
|
||||
|
||||
// Shared graphsync (markets, serving chain)
|
||||
Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultFullNode().Client.SimultaneousTransfers)),
|
||||
|
||||
// Service: Wallet
|
||||
Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner),
|
||||
Override(new(*wallet.LocalWallet), wallet.NewWallet),
|
||||
Override(new(wallet.Default), From(new(*wallet.LocalWallet))),
|
||||
Override(new(api.Wallet), From(new(wallet.MultiWallet))),
|
||||
|
||||
// Service: Payment channels
|
||||
Override(new(paychmgr.PaychAPI), From(new(modules.PaychAPI))),
|
||||
Override(new(*paychmgr.Store), modules.NewPaychStore),
|
||||
Override(new(*paychmgr.Manager), modules.NewManager),
|
||||
Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager),
|
||||
Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels),
|
||||
|
||||
// Markets (common)
|
||||
Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery),
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(discovery.PeerResolver), modules.RetrievalResolver),
|
||||
Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient),
|
||||
Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer),
|
||||
|
||||
// Markets (storage)
|
||||
Override(new(*market.FundManager), market.NewFundManager),
|
||||
Override(new(dtypes.ClientDatastore), modules.NewClientDatastore),
|
||||
Override(new(storagemarket.StorageClient), modules.StorageClient),
|
||||
Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter),
|
||||
Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds),
|
||||
|
||||
Override(new(*full.GasPriceCache), full.NewGasPriceCache),
|
||||
|
||||
// Lite node API
|
||||
ApplyIf(isLiteNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProviderLite),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))),
|
||||
Override(new(full.ChainModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.GasModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.StateModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager),
|
||||
),
|
||||
|
||||
// Full node API / service startup
|
||||
ApplyIf(isFullNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProvider),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))),
|
||||
Override(new(full.ChainModuleAPI), From(new(full.ChainModule))),
|
||||
Override(new(full.GasModuleAPI), From(new(full.GasModule))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))),
|
||||
Override(new(full.StateModuleAPI), From(new(full.StateModule))),
|
||||
Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))),
|
||||
|
||||
Override(RunHelloKey, modules.RunHello),
|
||||
Override(RunChainExchangeKey, modules.RunChainExchange),
|
||||
Override(RunPeerMgrKey, modules.RunPeerMgr),
|
||||
Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages),
|
||||
Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks),
|
||||
),
|
||||
)
|
||||
|
||||
func ConfigFullNode(c interface{}) Option {
|
||||
cfg, ok := c.(*config.FullNode)
|
||||
if !ok {
|
||||
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
||||
}
|
||||
|
||||
enableLibp2pNode := true // always enable libp2p for full nodes
|
||||
|
||||
ipfsMaddr := cfg.Client.IpfsMAddr
|
||||
return Options(
|
||||
ConfigCommon(&cfg.Common, enableLibp2pNode),
|
||||
|
||||
If(cfg.Client.UseIpfs,
|
||||
Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)),
|
||||
If(cfg.Client.IpfsUseForRetrieval,
|
||||
Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager),
|
||||
),
|
||||
),
|
||||
Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)),
|
||||
|
||||
If(cfg.Metrics.HeadNotifs,
|
||||
Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)),
|
||||
),
|
||||
|
||||
If(cfg.Wallet.RemoteBackend != "",
|
||||
Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)),
|
||||
),
|
||||
If(cfg.Wallet.EnableLedger,
|
||||
Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet),
|
||||
),
|
||||
If(cfg.Wallet.DisableLocal,
|
||||
Unset(new(*wallet.LocalWallet)),
|
||||
Override(new(wallet.Default), wallet.NilDefault),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
type FullOption = Option
|
||||
|
||||
func Lite(enable bool) FullOption {
|
||||
return func(s *Settings) error {
|
||||
s.Lite = enable
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func FullAPI(out *api.FullNode, fopts ...FullOption) Option {
|
||||
return Options(
|
||||
func(s *Settings) error {
|
||||
s.nodeType = repo.FullNode
|
||||
s.enableLibp2pNode = true
|
||||
return nil
|
||||
},
|
||||
Options(fopts...),
|
||||
func(s *Settings) error {
|
||||
resAPI := &impl.FullNodeAPI{}
|
||||
s.invokes[ExtractApiKey] = fx.Populate(resAPI)
|
||||
*out = resAPI
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
223
node/builder_miner.go
Normal file
223
node/builder_miner.go
Normal file
@ -0,0 +1,223 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/markets/dealfilter"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
)
|
||||
|
||||
var MinerNode = Options(
|
||||
Override(new(sectorstorage.StorageAuth), modules.StorageAuth),
|
||||
|
||||
// Actor config
|
||||
Override(new(dtypes.MinerAddress), modules.MinerAddress),
|
||||
Override(new(dtypes.MinerID), modules.MinerID),
|
||||
Override(new(abi.RegisteredSealProof), modules.SealProofType),
|
||||
Override(new(dtypes.NetworkName), modules.StorageNetworkName),
|
||||
|
||||
// Mining / proving
|
||||
Override(new(*storage.AddressSelector), modules.AddressSelector(nil)),
|
||||
)
|
||||
|
||||
func ConfigStorageMiner(c interface{}) Option {
|
||||
cfg, ok := c.(*config.StorageMiner)
|
||||
if !ok {
|
||||
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
||||
}
|
||||
|
||||
pricingConfig := cfg.Dealmaking.RetrievalPricing
|
||||
if pricingConfig.Strategy == config.RetrievalPricingExternalMode {
|
||||
if pricingConfig.External == nil {
|
||||
return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil"))
|
||||
}
|
||||
|
||||
if pricingConfig.External.Path == "" {
|
||||
return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty"))
|
||||
}
|
||||
} else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode {
|
||||
return Error(xerrors.New("retrieval pricing policy must be either default or external"))
|
||||
}
|
||||
|
||||
enableLibp2pNode := cfg.Subsystems.EnableMarkets // we enable libp2p nodes if the storage market subsystem is enabled, otherwise we don't
|
||||
|
||||
return Options(
|
||||
ConfigCommon(&cfg.Common, enableLibp2pNode),
|
||||
|
||||
Override(new(stores.LocalStorage), From(new(repo.LockedRepo))),
|
||||
Override(new(*stores.Local), modules.LocalStorage),
|
||||
Override(new(*stores.Remote), modules.RemoteStorage),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
||||
|
||||
If(!cfg.Subsystems.EnableMining,
|
||||
If(cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))),
|
||||
If(cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))),
|
||||
),
|
||||
If(cfg.Subsystems.EnableMining,
|
||||
If(!cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))),
|
||||
If(!cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))),
|
||||
|
||||
// Sector storage: Proofs
|
||||
Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier),
|
||||
Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver),
|
||||
Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))),
|
||||
|
||||
// Sealing (todo should be under EnableSealing, but storagefsm is currently bundled with storage.Miner)
|
||||
Override(new(sealing.SectorIDCounter), modules.SectorIDCounter),
|
||||
Override(GetParamsKey, modules.GetParams),
|
||||
|
||||
Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc),
|
||||
Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc),
|
||||
|
||||
// Mining / proving
|
||||
Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter),
|
||||
Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)),
|
||||
Override(new(*miner.Miner), modules.SetupBlockProducer),
|
||||
Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver),
|
||||
Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)),
|
||||
Override(new(sectorblocks.SectorBuilder), From(new(*storage.Miner))),
|
||||
),
|
||||
|
||||
If(cfg.Subsystems.EnableSectorStorage,
|
||||
// Sector storage
|
||||
Override(new(*stores.Index), stores.NewIndex),
|
||||
Override(new(stores.SectorIndex), From(new(*stores.Index))),
|
||||
Override(new(*sectorstorage.Manager), modules.SectorStorage),
|
||||
Override(new(sectorstorage.Unsealer), From(new(*sectorstorage.Manager))),
|
||||
Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))),
|
||||
Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))),
|
||||
),
|
||||
|
||||
If(!cfg.Subsystems.EnableSectorStorage,
|
||||
Override(new(sectorstorage.StorageAuth), modules.StorageAuthWithURL(cfg.Subsystems.SectorIndexApiInfo)),
|
||||
Override(new(modules.MinerStorageService), modules.ConnectStorageService(cfg.Subsystems.SectorIndexApiInfo)),
|
||||
Override(new(sectorstorage.Unsealer), From(new(modules.MinerStorageService))),
|
||||
Override(new(sectorblocks.SectorBuilder), From(new(modules.MinerStorageService))),
|
||||
),
|
||||
If(!cfg.Subsystems.EnableSealing,
|
||||
Override(new(modules.MinerSealingService), modules.ConnectSealingService(cfg.Subsystems.SealerApiInfo)),
|
||||
Override(new(stores.SectorIndex), From(new(modules.MinerSealingService))),
|
||||
),
|
||||
|
||||
If(cfg.Subsystems.EnableMarkets,
|
||||
// Markets
|
||||
Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore),
|
||||
Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore),
|
||||
Override(new(dtypes.StagingDAG), modules.StagingDAG),
|
||||
Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfers)),
|
||||
Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore),
|
||||
Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks),
|
||||
|
||||
// Markets (retrieval deps)
|
||||
Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{
|
||||
RetrievalPricing: &config.RetrievalPricing{
|
||||
Strategy: config.RetrievalPricingDefaultMode,
|
||||
Default: &config.RetrievalPricingDefault{},
|
||||
},
|
||||
})),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode),
|
||||
Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork),
|
||||
Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider),
|
||||
Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)),
|
||||
Override(HandleRetrievalKey, modules.HandleRetrieval),
|
||||
|
||||
// Markets (storage)
|
||||
Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer),
|
||||
Override(new(*storedask.StoredAsk), modules.NewStorageAsk),
|
||||
Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)),
|
||||
Override(new(storagemarket.StorageProvider), modules.StorageProvider),
|
||||
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})),
|
||||
Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds),
|
||||
Override(HandleDealsKey, modules.HandleDeals),
|
||||
|
||||
// Config (todo: get a real property system)
|
||||
Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc),
|
||||
Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc),
|
||||
Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc),
|
||||
Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc),
|
||||
Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc),
|
||||
Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc),
|
||||
Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc),
|
||||
|
||||
If(cfg.Dealmaking.Filter != "",
|
||||
Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))),
|
||||
),
|
||||
|
||||
If(cfg.Dealmaking.RetrievalFilter != "",
|
||||
Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))),
|
||||
),
|
||||
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{
|
||||
Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod),
|
||||
MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg,
|
||||
})),
|
||||
Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)),
|
||||
),
|
||||
|
||||
Override(new(sectorstorage.SealerConfig), cfg.Storage),
|
||||
Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)),
|
||||
)
|
||||
}
|
||||
|
||||
func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConfig) Option {
|
||||
return Options(
|
||||
ApplyIf(func(s *Settings) bool { return s.Config },
|
||||
Error(errors.New("the StorageMiner option must be set before Config option")),
|
||||
),
|
||||
|
||||
func(s *Settings) error {
|
||||
s.nodeType = repo.StorageMiner
|
||||
s.enableLibp2pNode = subsystemsCfg.EnableMarkets
|
||||
return nil
|
||||
},
|
||||
|
||||
func(s *Settings) error {
|
||||
resAPI := &impl.StorageMinerAPI{}
|
||||
s.invokes[ExtractApiKey] = fx.Populate(resAPI)
|
||||
*out = resAPI
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
@ -50,6 +50,7 @@ type Backup struct {
|
||||
type StorageMiner struct {
|
||||
Common
|
||||
|
||||
Subsystems MinerSubsystemConfig
|
||||
Dealmaking DealmakingConfig
|
||||
Sealing SealingConfig
|
||||
Storage sectorstorage.SealerConfig
|
||||
@ -57,6 +58,16 @@ type StorageMiner struct {
|
||||
Addresses MinerAddressConfig
|
||||
}
|
||||
|
||||
type MinerSubsystemConfig struct {
|
||||
EnableMining bool
|
||||
EnableSealing bool
|
||||
EnableSectorStorage bool
|
||||
EnableMarkets bool
|
||||
|
||||
SealerApiInfo string // if EnableSealing == false
|
||||
SectorIndexApiInfo string // if EnableSectorStorage == false
|
||||
}
|
||||
|
||||
type DealmakingConfig struct {
|
||||
ConsiderOnlineStorageDeals bool
|
||||
ConsiderOfflineStorageDeals bool
|
||||
@ -384,6 +395,13 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
},
|
||||
},
|
||||
|
||||
Subsystems: MinerSubsystemConfig{
|
||||
EnableMining: true,
|
||||
EnableSealing: true,
|
||||
EnableSectorStorage: true,
|
||||
EnableMarkets: true,
|
||||
},
|
||||
|
||||
Fees: MinerFeeConfig{
|
||||
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
|
||||
MaxCommitGasFee: types.MustParseFIL("0.05"),
|
||||
|
@ -2,32 +2,18 @@ package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/google/uuid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
||||
swarm "github.com/libp2p/go-libp2p-swarm"
|
||||
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/conngater"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
||||
)
|
||||
|
||||
var session = uuid.New()
|
||||
@ -35,13 +21,9 @@ var session = uuid.New()
|
||||
type CommonAPI struct {
|
||||
fx.In
|
||||
|
||||
NetAPI
|
||||
|
||||
APISecret *dtypes.APIAlg
|
||||
RawHost lp2p.RawHost
|
||||
Host host.Host
|
||||
Router lp2p.BaseIpfsRouting
|
||||
ConnGater *conngater.BasicConnectionGater
|
||||
Reporter metrics.Reporter
|
||||
Sk *dtypes.ScoreKeeper
|
||||
ShutdownChan dtypes.ShutdownChan
|
||||
}
|
||||
|
||||
@ -66,170 +48,6 @@ func (a *CommonAPI) AuthNew(ctx context.Context, perms []auth.Permission) ([]byt
|
||||
return jwt.Sign(&p, (*jwt.HMACSHA)(a.APISecret))
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
|
||||
return a.Host.Network().Connectedness(pid), nil
|
||||
}
|
||||
func (a *CommonAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
|
||||
scores := a.Sk.Get()
|
||||
out := make([]api.PubsubScore, len(scores))
|
||||
i := 0
|
||||
for k, v := range scores {
|
||||
out[i] = api.PubsubScore{ID: k, Score: v}
|
||||
i++
|
||||
}
|
||||
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0
|
||||
})
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
|
||||
conns := a.Host.Network().Conns()
|
||||
out := make([]peer.AddrInfo, len(conns))
|
||||
|
||||
for i, conn := range conns {
|
||||
out[i] = peer.AddrInfo{
|
||||
ID: conn.RemotePeer(),
|
||||
Addrs: []ma.Multiaddr{
|
||||
conn.RemoteMultiaddr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
|
||||
info := &api.ExtendedPeerInfo{ID: p}
|
||||
|
||||
agent, err := a.Host.Peerstore().Get(p, "AgentVersion")
|
||||
if err == nil {
|
||||
info.Agent = agent.(string)
|
||||
}
|
||||
|
||||
for _, a := range a.Host.Peerstore().Addrs(p) {
|
||||
info.Addrs = append(info.Addrs, a.String())
|
||||
}
|
||||
sort.Strings(info.Addrs)
|
||||
|
||||
protocols, err := a.Host.Peerstore().GetProtocols(p)
|
||||
if err == nil {
|
||||
sort.Strings(protocols)
|
||||
info.Protocols = protocols
|
||||
}
|
||||
|
||||
if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil {
|
||||
info.ConnMgrMeta = &api.ConnMgrInfo{
|
||||
FirstSeen: cm.FirstSeen,
|
||||
Value: cm.Value,
|
||||
Tags: cm.Tags,
|
||||
Conns: cm.Conns,
|
||||
}
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
|
||||
if swrm, ok := a.Host.Network().(*swarm.Swarm); ok {
|
||||
swrm.Backoff().Clear(p.ID)
|
||||
}
|
||||
|
||||
return a.Host.Connect(ctx, p)
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) {
|
||||
return peer.AddrInfo{
|
||||
ID: a.Host.ID(),
|
||||
Addrs: a.Host.Addrs(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
|
||||
return a.Host.Network().ClosePeer(p)
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
|
||||
return a.Router.FindPeer(ctx, p)
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
|
||||
autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat()
|
||||
|
||||
if autonat == nil {
|
||||
return api.NatInfo{
|
||||
Reachability: network.ReachabilityUnknown,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var maddr string
|
||||
if autonat.Status() == network.ReachabilityPublic {
|
||||
pa, err := autonat.PublicAddr()
|
||||
if err != nil {
|
||||
return api.NatInfo{}, err
|
||||
}
|
||||
maddr = pa.String()
|
||||
}
|
||||
|
||||
return api.NatInfo{
|
||||
Reachability: autonat.Status(),
|
||||
PublicAddr: maddr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
|
||||
ag, err := a.Host.Peerstore().Get(p, "AgentVersion")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ag == nil {
|
||||
return "unknown", nil
|
||||
}
|
||||
|
||||
return ag.(string), nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) {
|
||||
return a.Reporter.GetBandwidthTotals(), nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
|
||||
out := make(map[string]metrics.Stats)
|
||||
for p, s := range a.Reporter.GetBandwidthByPeer() {
|
||||
out[p.String()] = s
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
|
||||
return a.Reporter.GetBandwidthByProtocol(), nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
|
||||
return build.OpenRPCDiscoverJSON_Full(), nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) ID(context.Context) (peer.ID, error) {
|
||||
return a.Host.ID(), nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) {
|
||||
v, err := api.VersionForType(api.RunningNodeType)
|
||||
if err != nil {
|
||||
return api.APIVersion{}, err
|
||||
}
|
||||
|
||||
return api.APIVersion{
|
||||
Version: build.UserVersion(),
|
||||
APIVersion: v,
|
||||
|
||||
BlockDelay: build.BlockDelaySecs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) LogList(context.Context) ([]string, error) {
|
||||
return logging.GetSubsystems(), nil
|
||||
}
|
||||
@ -251,4 +69,16 @@ func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) {
|
||||
return make(chan struct{}), nil // relies on jsonrpc closing
|
||||
}
|
||||
|
||||
var _ api.Common = &CommonAPI{}
|
||||
func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) {
|
||||
v, err := api.VersionForType(api.RunningNodeType)
|
||||
if err != nil {
|
||||
return api.APIVersion{}, err
|
||||
}
|
||||
|
||||
return api.APIVersion{
|
||||
Version: build.UserVersion(),
|
||||
APIVersion: v,
|
||||
|
||||
BlockDelay: build.BlockDelaySecs,
|
||||
}, nil
|
||||
}
|
||||
|
101
node/impl/common/mock/net.go
Normal file
101
node/impl/common/mock/net.go
Normal file
@ -0,0 +1,101 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotImplemented = errors.New("not implemented")
|
||||
)
|
||||
|
||||
type MockNetAPI struct {
|
||||
fx.In
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
|
||||
return "", errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (conn network.Connectedness, err error) {
|
||||
err = errNotImplemented
|
||||
return
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetAddrsListen(context.Context) (ai peer.AddrInfo, err error) {
|
||||
err = errNotImplemented
|
||||
return
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (ai peer.AddrInfo, err error) {
|
||||
err = errNotImplemented
|
||||
return
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
|
||||
err = errNotImplemented
|
||||
return
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetBandwidthStats(ctx context.Context) (s metrics.Stats, err error) {
|
||||
err = errNotImplemented
|
||||
return
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) ID(context.Context) (p peer.ID, err error) {
|
||||
err = errNotImplemented
|
||||
return
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
|
||||
func (a *MockNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
|
||||
err = errNotImplemented
|
||||
return
|
||||
}
|
34
node/impl/common/net.go
Normal file
34
node/impl/common/net.go
Normal file
@ -0,0 +1,34 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
)
|
||||
|
||||
type NetAPI interface {
|
||||
NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error)
|
||||
NetPubsubScores(context.Context) ([]api.PubsubScore, error)
|
||||
NetPeers(context.Context) ([]peer.AddrInfo, error)
|
||||
NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error)
|
||||
NetConnect(ctx context.Context, p peer.AddrInfo) error
|
||||
NetAddrsListen(context.Context) (peer.AddrInfo, error)
|
||||
NetDisconnect(ctx context.Context, p peer.ID) error
|
||||
NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error)
|
||||
NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error)
|
||||
NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
|
||||
NetBandwidthStats(ctx context.Context) (metrics.Stats, error)
|
||||
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error)
|
||||
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
|
||||
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error)
|
||||
ID(context.Context) (peer.ID, error)
|
||||
NetBlockAdd(ctx context.Context, acl api.NetBlockList) error
|
||||
NetBlockRemove(ctx context.Context, acl api.NetBlockList) error
|
||||
NetBlockList(ctx context.Context) (api.NetBlockList, error)
|
||||
}
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
var cLog = logging.Logger("conngater")
|
||||
|
||||
func (a *CommonAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
|
||||
func (a *Libp2pNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
|
||||
for _, p := range acl.Peers {
|
||||
err := a.ConnGater.BlockPeer(p)
|
||||
if err != nil {
|
||||
@ -89,7 +89,7 @@ func (a *CommonAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
|
||||
func (a *Libp2pNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
|
||||
for _, p := range acl.Peers {
|
||||
err := a.ConnGater.UnblockPeer(p)
|
||||
if err != nil {
|
||||
@ -124,7 +124,7 @@ func (a *CommonAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *CommonAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
|
||||
func (a *Libp2pNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
|
||||
result.Peers = a.ConnGater.ListBlockedPeers()
|
||||
for _, ip := range a.ConnGater.ListBlockedAddrs() {
|
||||
result.IPAddrs = append(result.IPAddrs, ip.String())
|
186
node/impl/common/net_libp2p.go
Normal file
186
node/impl/common/net_libp2p.go
Normal file
@ -0,0 +1,186 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
||||
swarm "github.com/libp2p/go-libp2p-swarm"
|
||||
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/conngater"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
||||
)
|
||||
|
||||
type Libp2pNetAPI struct {
|
||||
fx.In
|
||||
|
||||
RawHost lp2p.RawHost
|
||||
Host host.Host
|
||||
Router lp2p.BaseIpfsRouting
|
||||
ConnGater *conngater.BasicConnectionGater
|
||||
Reporter metrics.Reporter
|
||||
Sk *dtypes.ScoreKeeper
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
|
||||
return a.Host.Network().Connectedness(pid), nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
|
||||
scores := a.Sk.Get()
|
||||
out := make([]api.PubsubScore, len(scores))
|
||||
i := 0
|
||||
for k, v := range scores {
|
||||
out[i] = api.PubsubScore{ID: k, Score: v}
|
||||
i++
|
||||
}
|
||||
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0
|
||||
})
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
|
||||
conns := a.Host.Network().Conns()
|
||||
out := make([]peer.AddrInfo, len(conns))
|
||||
|
||||
for i, conn := range conns {
|
||||
out[i] = peer.AddrInfo{
|
||||
ID: conn.RemotePeer(),
|
||||
Addrs: []ma.Multiaddr{
|
||||
conn.RemoteMultiaddr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
|
||||
info := &api.ExtendedPeerInfo{ID: p}
|
||||
|
||||
agent, err := a.Host.Peerstore().Get(p, "AgentVersion")
|
||||
if err == nil {
|
||||
info.Agent = agent.(string)
|
||||
}
|
||||
|
||||
for _, a := range a.Host.Peerstore().Addrs(p) {
|
||||
info.Addrs = append(info.Addrs, a.String())
|
||||
}
|
||||
sort.Strings(info.Addrs)
|
||||
|
||||
protocols, err := a.Host.Peerstore().GetProtocols(p)
|
||||
if err == nil {
|
||||
sort.Strings(protocols)
|
||||
info.Protocols = protocols
|
||||
}
|
||||
|
||||
if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil {
|
||||
info.ConnMgrMeta = &api.ConnMgrInfo{
|
||||
FirstSeen: cm.FirstSeen,
|
||||
Value: cm.Value,
|
||||
Tags: cm.Tags,
|
||||
Conns: cm.Conns,
|
||||
}
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
|
||||
if swrm, ok := a.Host.Network().(*swarm.Swarm); ok {
|
||||
swrm.Backoff().Clear(p.ID)
|
||||
}
|
||||
|
||||
return a.Host.Connect(ctx, p)
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) {
|
||||
return peer.AddrInfo{
|
||||
ID: a.Host.ID(),
|
||||
Addrs: a.Host.Addrs(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
|
||||
return a.Host.Network().ClosePeer(p)
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
|
||||
return a.Router.FindPeer(ctx, p)
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
|
||||
autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat()
|
||||
|
||||
if autonat == nil {
|
||||
return api.NatInfo{
|
||||
Reachability: network.ReachabilityUnknown,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var maddr string
|
||||
if autonat.Status() == network.ReachabilityPublic {
|
||||
pa, err := autonat.PublicAddr()
|
||||
if err != nil {
|
||||
return api.NatInfo{}, err
|
||||
}
|
||||
maddr = pa.String()
|
||||
}
|
||||
|
||||
return api.NatInfo{
|
||||
Reachability: autonat.Status(),
|
||||
PublicAddr: maddr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
|
||||
ag, err := a.Host.Peerstore().Get(p, "AgentVersion")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ag == nil {
|
||||
return "unknown", nil
|
||||
}
|
||||
|
||||
return ag.(string), nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) {
|
||||
return a.Reporter.GetBandwidthTotals(), nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
|
||||
out := make(map[string]metrics.Stats)
|
||||
for p, s := range a.Reporter.GetBandwidthByPeer() {
|
||||
out[p.String()] = s
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
|
||||
return a.Reporter.GetBandwidthByProtocol(), nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
|
||||
return build.OpenRPCDiscoverJSON_Full(), nil
|
||||
}
|
||||
|
||||
func (a *Libp2pNetAPI) ID(context.Context) (peer.ID, error) {
|
||||
return a.Host.ID(), nil
|
||||
}
|
@ -8,6 +8,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
|
||||
@ -23,9 +24,7 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
@ -49,54 +48,63 @@ import (
|
||||
type StorageMinerAPI struct {
|
||||
common.CommonAPI
|
||||
|
||||
SectorBlocks *sectorblocks.SectorBlocks
|
||||
Full api.FullNode
|
||||
LocalStore *stores.Local
|
||||
RemoteStore *stores.Remote
|
||||
|
||||
PieceStore dtypes.ProviderPieceStore
|
||||
StorageProvider storagemarket.StorageProvider
|
||||
RetrievalProvider retrievalmarket.RetrievalProvider
|
||||
Miner *storage.Miner
|
||||
BlockMiner *miner.Miner
|
||||
Full api.FullNode
|
||||
StorageMgr *sectorstorage.Manager `optional:"true"`
|
||||
IStorageMgr sectorstorage.SectorManager
|
||||
*stores.Index
|
||||
storiface.WorkerReturn
|
||||
DataTransfer dtypes.ProviderDataTransfer
|
||||
Host host.Host
|
||||
AddrSel *storage.AddressSelector
|
||||
DealPublisher *storageadapter.DealPublisher
|
||||
// Markets
|
||||
PieceStore dtypes.ProviderPieceStore `optional:"true"`
|
||||
StorageProvider storagemarket.StorageProvider `optional:"true"`
|
||||
RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"`
|
||||
DataTransfer dtypes.ProviderDataTransfer `optional:"true"`
|
||||
DealPublisher *storageadapter.DealPublisher `optional:"true"`
|
||||
SectorBlocks *sectorblocks.SectorBlocks `optional:"true"`
|
||||
Host host.Host `optional:"true"`
|
||||
|
||||
Epp gen.WinningPoStProver
|
||||
// Miner / storage
|
||||
Miner *storage.Miner `optional:"true"`
|
||||
BlockMiner *miner.Miner `optional:"true"`
|
||||
StorageMgr *sectorstorage.Manager `optional:"true"`
|
||||
IStorageMgr sectorstorage.SectorManager `optional:"true"`
|
||||
stores.SectorIndex
|
||||
storiface.WorkerReturn `optional:"true"`
|
||||
AddrSel *storage.AddressSelector
|
||||
|
||||
Epp gen.WinningPoStProver `optional:"true"`
|
||||
DS dtypes.MetadataDS
|
||||
|
||||
ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc
|
||||
SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc
|
||||
ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc
|
||||
SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc
|
||||
StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc
|
||||
SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc
|
||||
ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc
|
||||
SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc
|
||||
ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc
|
||||
SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc
|
||||
ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc
|
||||
SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc
|
||||
ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc
|
||||
SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc
|
||||
SetSealingConfigFunc dtypes.SetSealingConfigFunc
|
||||
GetSealingConfigFunc dtypes.GetSealingConfigFunc
|
||||
GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc
|
||||
SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc
|
||||
ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc `optional:"true"`
|
||||
ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc `optional:"true"`
|
||||
StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc `optional:"true"`
|
||||
SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc `optional:"true"`
|
||||
ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc `optional:"true"`
|
||||
ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc `optional:"true"`
|
||||
ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc `optional:"true"`
|
||||
ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc `optional:"true"`
|
||||
SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"`
|
||||
GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"`
|
||||
GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"`
|
||||
SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"`
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
||||
if !auth.HasPerm(r.Context(), nil, api.PermAdmin) {
|
||||
w.WriteHeader(401)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
||||
return
|
||||
}
|
||||
func (sm *StorageMinerAPI) ServeRemote(perm bool) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if perm == true {
|
||||
if !auth.HasPerm(r.Context(), nil, api.PermAdmin) {
|
||||
w.WriteHeader(401)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sm.StorageMgr.ServeHTTP(w, r)
|
||||
sm.StorageMgr.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
|
||||
@ -136,12 +144,12 @@ func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, erro
|
||||
// wait for the sector to enter the Packing state
|
||||
// TODO: instead of polling implement some pubsub-type thing in storagefsm
|
||||
for {
|
||||
info, err := sm.Miner.GetSectorInfo(sr.ID.Number)
|
||||
info, err := sm.Miner.SectorsStatus(ctx, sr.ID.Number, false)
|
||||
if err != nil {
|
||||
return abi.SectorID{}, xerrors.Errorf("getting pledged sector info: %w", err)
|
||||
}
|
||||
|
||||
if info.State != sealing.UndefinedSectorState {
|
||||
if info.State != api.SectorState(sealing.UndefinedSectorState) {
|
||||
return sr.ID, nil
|
||||
}
|
||||
|
||||
@ -154,62 +162,11 @@ func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, erro
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||
info, err := sm.Miner.GetSectorInfo(sid)
|
||||
sInfo, err := sm.Miner.SectorsStatus(ctx, sid, false)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
deals := make([]abi.DealID, len(info.Pieces))
|
||||
for i, piece := range info.Pieces {
|
||||
if piece.DealInfo == nil {
|
||||
continue
|
||||
}
|
||||
deals[i] = piece.DealInfo.DealID
|
||||
}
|
||||
|
||||
log := make([]api.SectorLog, len(info.Log))
|
||||
for i, l := range info.Log {
|
||||
log[i] = api.SectorLog{
|
||||
Kind: l.Kind,
|
||||
Timestamp: l.Timestamp,
|
||||
Trace: l.Trace,
|
||||
Message: l.Message,
|
||||
}
|
||||
}
|
||||
|
||||
sInfo := api.SectorInfo{
|
||||
SectorID: sid,
|
||||
State: api.SectorState(info.State),
|
||||
CommD: info.CommD,
|
||||
CommR: info.CommR,
|
||||
Proof: info.Proof,
|
||||
Deals: deals,
|
||||
Ticket: api.SealTicket{
|
||||
Value: info.TicketValue,
|
||||
Epoch: info.TicketEpoch,
|
||||
},
|
||||
Seed: api.SealSeed{
|
||||
Value: info.SeedValue,
|
||||
Epoch: info.SeedEpoch,
|
||||
},
|
||||
PreCommitMsg: info.PreCommitMessage,
|
||||
CommitMsg: info.CommitMessage,
|
||||
Retries: info.InvalidProofs,
|
||||
ToUpgrade: sm.Miner.IsMarkedForUpgrade(sid),
|
||||
|
||||
LastErr: info.LastErr,
|
||||
Log: log,
|
||||
// on chain info
|
||||
SealProof: 0,
|
||||
Activation: 0,
|
||||
Expiration: 0,
|
||||
DealWeight: big.Zero(),
|
||||
VerifiedDealWeight: big.Zero(),
|
||||
InitialPledge: big.Zero(),
|
||||
OnTime: 0,
|
||||
Early: 0,
|
||||
}
|
||||
|
||||
if !showOnChainInfo {
|
||||
return sInfo, nil
|
||||
}
|
||||
@ -238,6 +195,14 @@ func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumb
|
||||
return sInfo, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r sto.Data, d api.PieceDealInfo) (api.SectorOffset, error) {
|
||||
return sm.Miner.SectorAddPieceToAny(ctx, size, r, d)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorsUnsealPiece(ctx context.Context, sector sto.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error {
|
||||
return sm.StorageMgr.SectorsUnsealPiece(ctx, sector, offset, size, randomness, commd)
|
||||
}
|
||||
|
||||
// List all staged sectors
|
||||
func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, error) {
|
||||
sectors, err := sm.Miner.ListSectors()
|
||||
@ -300,7 +265,17 @@ func (sm *StorageMinerAPI) SectorsSummary(ctx context.Context) (map[api.SectorSt
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
||||
return sm.StorageMgr.StorageLocal(ctx)
|
||||
l, err := sm.LocalStore.Local(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := map[stores.ID]string{}
|
||||
for _, st := range l {
|
||||
out[st.ID] = st.LocalPath
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) {
|
||||
@ -320,7 +295,7 @@ func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.Sealed
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) {
|
||||
return sm.StorageMgr.FsStat(ctx, id)
|
||||
return sm.RemoteStore.FsStat(ctx, id)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorStartSealing(ctx context.Context, number abi.SectorNumber) error {
|
||||
@ -688,7 +663,7 @@ func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredP
|
||||
var rg storiface.RGetter
|
||||
if expensive {
|
||||
rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) {
|
||||
si, err := sm.Miner.GetSectorInfo(id.Number)
|
||||
si, err := sm.Miner.SectorsStatus(ctx, id.Number, false)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
@ -86,6 +86,7 @@ type ClientDataTransfer datatransfer.Manager
|
||||
|
||||
type ProviderDealStore *statestore.StateStore
|
||||
type ProviderPieceStore piecestore.PieceStore
|
||||
|
||||
type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator
|
||||
|
||||
// ProviderDataTransfer is a data transfer manager for the provider
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/markets/pricing"
|
||||
@ -44,7 +45,7 @@ import (
|
||||
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
"github.com/filecoin-project/go-storedcounter"
|
||||
@ -67,7 +68,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/markets"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
lotusminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -207,7 +207,6 @@ type StorageMinerParams struct {
|
||||
Lifecycle fx.Lifecycle
|
||||
MetricsCtx helpers.MetricsCtx
|
||||
API v1api.FullNode
|
||||
Host host.Host
|
||||
MetadataDS dtypes.MetadataDS
|
||||
Sealer sectorstorage.SectorManager
|
||||
SectorIDCounter sealing.SectorIDCounter
|
||||
@ -226,7 +225,6 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st
|
||||
lc = params.Lifecycle
|
||||
api = params.API
|
||||
sealer = params.Sealer
|
||||
h = params.Host
|
||||
sc = params.SectorIDCounter
|
||||
verif = params.Verifier
|
||||
prover = params.Prover
|
||||
@ -247,7 +245,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, prover, gsd, fc, j, as)
|
||||
sm, err := storage.NewMiner(api, maddr, ds, sealer, sc, verif, prover, gsd, fc, j, as)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -652,6 +650,10 @@ func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dt
|
||||
}
|
||||
}
|
||||
|
||||
func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork {
|
||||
return rmnet.NewFromLibp2pHost(h)
|
||||
}
|
||||
|
||||
// RetrievalPricingFunc configures the pricing function to use for retrieval deals.
|
||||
func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
_ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc {
|
||||
@ -667,35 +669,26 @@ func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnl
|
||||
}
|
||||
|
||||
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
|
||||
func RetrievalProvider(h host.Host,
|
||||
miner *storage.Miner,
|
||||
full v1api.FullNode,
|
||||
func RetrievalProvider(
|
||||
maddr dtypes.MinerAddress,
|
||||
adapter retrievalmarket.RetrievalProviderNode,
|
||||
netwk rmnet.RetrievalMarketNetwork,
|
||||
ds dtypes.MetadataDS,
|
||||
pieceStore dtypes.ProviderPieceStore,
|
||||
mds dtypes.StagingMultiDstore,
|
||||
dt dtypes.ProviderDataTransfer,
|
||||
pieceProvider sectorstorage.PieceProvider,
|
||||
pricingFnc dtypes.RetrievalPricingFunc,
|
||||
userFilter dtypes.RetrievalDealFilter,
|
||||
) (retrievalmarket.RetrievalProvider, error) {
|
||||
adapter := retrievaladapter.NewRetrievalProviderNode(miner, pieceProvider, full)
|
||||
|
||||
maddr, err := minerAddrFromDS(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
netwk := rmnet.NewFromLibp2pHost(h)
|
||||
opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter))
|
||||
|
||||
return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")),
|
||||
return retrievalimpl.NewProvider(address.Address(maddr), adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")),
|
||||
retrievalimpl.RetrievalPricingFunc(pricingFnc), opt)
|
||||
}
|
||||
|
||||
var WorkerCallsPrefix = datastore.NewKey("/worker/calls")
|
||||
var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls")
|
||||
|
||||
func LocalStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls sectorstorage.URLs) (*stores.Local, error) {
|
||||
func LocalStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls stores.URLs) (*stores.Local, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
return stores.NewLocal(ctx, ls, si, urls)
|
||||
}
|
||||
@ -733,6 +726,18 @@ func StorageAuth(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.Storage
|
||||
return sectorstorage.StorageAuth(headers), nil
|
||||
}
|
||||
|
||||
func StorageAuthWithURL(apiInfo string) func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) {
|
||||
return func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) {
|
||||
s := strings.Split(apiInfo, ":")
|
||||
if len(s) != 2 {
|
||||
return nil, errors.New("unexpected format of `apiInfo`")
|
||||
}
|
||||
headers := http.Header{}
|
||||
headers.Add("Authorization", "Bearer "+s[0])
|
||||
return sectorstorage.StorageAuth(headers), nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewConsiderOnlineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineStorageDealsConfigFunc, error) {
|
||||
return func() (out bool, err error) {
|
||||
err = readCfg(r, func(cfg *config.StorageMiner) {
|
||||
|
71
node/modules/storageminer_svc.go
Normal file
71
node/modules/storageminer_svc.go
Normal file
@ -0,0 +1,71 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
)
|
||||
|
||||
type MinerSealingService api.StorageMiner
|
||||
type MinerStorageService api.StorageMiner
|
||||
|
||||
var _ sectorblocks.SectorBuilder = *new(MinerSealingService)
|
||||
|
||||
func connectMinerService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
info := cliutil.ParseApiInfo(apiInfo)
|
||||
addr, err := info.DialArgs("v0")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Checking (svc) api version of %s", addr)
|
||||
|
||||
mapi, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
v, err := mapi.Version(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking version: %w", err)
|
||||
}
|
||||
|
||||
if !v.APIVersion.EqMajorMinor(api.MinerAPIVersion0) {
|
||||
return xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", api.MinerAPIVersion0, v.APIVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
closer()
|
||||
return nil
|
||||
}})
|
||||
|
||||
return mapi, nil
|
||||
}
|
||||
}
|
||||
|
||||
func ConnectSealingService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) {
|
||||
log.Info("Connecting sealing service to miner")
|
||||
return connectMinerService(apiInfo)(mctx, lc)
|
||||
}
|
||||
}
|
||||
|
||||
func ConnectStorageService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) {
|
||||
log.Info("Connecting storage service to miner")
|
||||
return connectMinerService(apiInfo)(mctx, lc)
|
||||
}
|
||||
}
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
@ -117,18 +118,20 @@ func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) {
|
||||
mapi = api.PermissionedStorMinerAPI(mapi)
|
||||
}
|
||||
|
||||
rpcServer := jsonrpc.NewServer()
|
||||
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
|
||||
rpcServer := jsonrpc.NewServer(readerServerOpt)
|
||||
rpcServer.Register("Filecoin", mapi)
|
||||
|
||||
m.Handle("/rpc/v0", rpcServer)
|
||||
m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote)
|
||||
m.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
|
||||
m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote(permissioned))
|
||||
|
||||
// debugging
|
||||
m.Handle("/debug/metrics", metrics.Exporter())
|
||||
m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||
|
||||
if !permissioned {
|
||||
return rpcServer, nil
|
||||
return m, nil
|
||||
}
|
||||
|
||||
ah := &auth.Handler{
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
|
||||
func MockHost(mn mocknet.Mocknet) Option {
|
||||
return Options(
|
||||
ApplyIf(func(s *Settings) bool { return !s.Online },
|
||||
Error(errors.New("MockHost must be specified after Online")),
|
||||
ApplyIf(func(s *Settings) bool { return !s.Base },
|
||||
Error(errors.New("MockHost must be specified after Base")),
|
||||
),
|
||||
|
||||
Override(new(lp2p.RawHost), lp2p.MockHost),
|
||||
|
@ -6,11 +6,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -52,7 +50,6 @@ var log = logging.Logger("storageminer")
|
||||
type Miner struct {
|
||||
api fullNodeFilteredAPI
|
||||
feeCfg config.MinerFeeConfig
|
||||
h host.Host
|
||||
sealer sectorstorage.SectorManager
|
||||
ds datastore.Batching
|
||||
sc sealing.SectorIDCounter
|
||||
@ -130,7 +127,6 @@ type fullNodeFilteredAPI interface {
|
||||
// NewMiner creates a new Miner object.
|
||||
func NewMiner(api fullNodeFilteredAPI,
|
||||
maddr address.Address,
|
||||
h host.Host,
|
||||
ds datastore.Batching,
|
||||
sealer sectorstorage.SectorManager,
|
||||
sc sealing.SectorIDCounter,
|
||||
@ -143,7 +139,6 @@ func NewMiner(api fullNodeFilteredAPI,
|
||||
m := &Miner{
|
||||
api: api,
|
||||
feeCfg: feeCfg,
|
||||
h: h,
|
||||
sealer: sealer,
|
||||
ds: ds,
|
||||
sc: sc,
|
||||
|
@ -2,16 +2,19 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
)
|
||||
|
||||
// TODO: refactor this to be direct somehow
|
||||
@ -20,10 +23,6 @@ func (m *Miner) Address() address.Address {
|
||||
return m.sealing.Address()
|
||||
}
|
||||
|
||||
func (m *Miner) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
return m.sealing.AddPieceToAnySector(ctx, size, r, d)
|
||||
}
|
||||
|
||||
func (m *Miner) StartPackingSector(sectorNum abi.SectorNumber) error {
|
||||
return m.sealing.StartPacking(sectorNum)
|
||||
}
|
||||
@ -32,10 +31,6 @@ func (m *Miner) ListSectors() ([]sealing.SectorInfo, error) {
|
||||
return m.sealing.ListSectors()
|
||||
}
|
||||
|
||||
func (m *Miner) GetSectorInfo(sid abi.SectorNumber) (sealing.SectorInfo, error) {
|
||||
return m.sealing.GetSectorInfo(sid)
|
||||
}
|
||||
|
||||
func (m *Miner) PledgeSector(ctx context.Context) (storage.SectorRef, error) {
|
||||
return m.sealing.PledgeSector(ctx)
|
||||
}
|
||||
@ -83,3 +78,73 @@ func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error {
|
||||
func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool {
|
||||
return m.sealing.IsMarkedForUpgrade(id)
|
||||
}
|
||||
|
||||
func (m *Miner) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) {
|
||||
return m.sealing.SectorAddPieceToAny(ctx, size, r, d)
|
||||
}
|
||||
|
||||
func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||
if showOnChainInfo {
|
||||
return api.SectorInfo{}, xerrors.Errorf("on-chain info not supported")
|
||||
}
|
||||
|
||||
info, err := m.sealing.GetSectorInfo(sid)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
deals := make([]abi.DealID, len(info.Pieces))
|
||||
for i, piece := range info.Pieces {
|
||||
if piece.DealInfo == nil {
|
||||
continue
|
||||
}
|
||||
deals[i] = piece.DealInfo.DealID
|
||||
}
|
||||
|
||||
log := make([]api.SectorLog, len(info.Log))
|
||||
for i, l := range info.Log {
|
||||
log[i] = api.SectorLog{
|
||||
Kind: l.Kind,
|
||||
Timestamp: l.Timestamp,
|
||||
Trace: l.Trace,
|
||||
Message: l.Message,
|
||||
}
|
||||
}
|
||||
|
||||
sInfo := api.SectorInfo{
|
||||
SectorID: sid,
|
||||
State: api.SectorState(info.State),
|
||||
CommD: info.CommD,
|
||||
CommR: info.CommR,
|
||||
Proof: info.Proof,
|
||||
Deals: deals,
|
||||
Ticket: api.SealTicket{
|
||||
Value: info.TicketValue,
|
||||
Epoch: info.TicketEpoch,
|
||||
},
|
||||
Seed: api.SealSeed{
|
||||
Value: info.SeedValue,
|
||||
Epoch: info.SeedEpoch,
|
||||
},
|
||||
PreCommitMsg: info.PreCommitMessage,
|
||||
CommitMsg: info.CommitMessage,
|
||||
Retries: info.InvalidProofs,
|
||||
ToUpgrade: m.IsMarkedForUpgrade(sid),
|
||||
|
||||
LastErr: info.LastErr,
|
||||
Log: log,
|
||||
// on chain info
|
||||
SealProof: 0,
|
||||
Activation: 0,
|
||||
Expiration: 0,
|
||||
DealWeight: big.Zero(),
|
||||
VerifiedDealWeight: big.Zero(),
|
||||
InitialPledge: big.Zero(),
|
||||
OnTime: 0,
|
||||
Early: 0,
|
||||
}
|
||||
|
||||
return sInfo, nil
|
||||
}
|
||||
|
||||
var _ sectorblocks.SectorBuilder = &Miner{}
|
||||
|
@ -16,11 +16,10 @@ import (
|
||||
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
)
|
||||
|
||||
type SealSerialization uint8
|
||||
@ -48,17 +47,22 @@ func DsKeyToDealID(key datastore.Key) (uint64, error) {
|
||||
return dealID, nil
|
||||
}
|
||||
|
||||
type SectorBuilder interface {
|
||||
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error)
|
||||
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error)
|
||||
}
|
||||
|
||||
type SectorBlocks struct {
|
||||
*storage.Miner
|
||||
SectorBuilder
|
||||
|
||||
keys datastore.Batching
|
||||
keyLk sync.Mutex
|
||||
}
|
||||
|
||||
func NewSectorBlocks(miner *storage.Miner, ds dtypes.MetadataDS) *SectorBlocks {
|
||||
func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks {
|
||||
sbc := &SectorBlocks{
|
||||
Miner: miner,
|
||||
keys: namespace.Wrap(ds, dsPrefix),
|
||||
SectorBuilder: sb,
|
||||
keys: namespace.Wrap(ds, dsPrefix),
|
||||
}
|
||||
|
||||
return sbc
|
||||
@ -96,19 +100,19 @@ func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, o
|
||||
return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow
|
||||
}
|
||||
|
||||
func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
sn, offset, err := st.Miner.AddPieceToAnySector(ctx, size, r, d)
|
||||
func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// TODO: DealID has very low finality here
|
||||
err = st.writeRef(d.DealID, sn, offset, size)
|
||||
err = st.writeRef(d.DealID, so.Sector, so.Offset, size)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("writeRef: %w", err)
|
||||
}
|
||||
|
||||
return sn, offset, nil
|
||||
return so.Sector, so.Offset, nil
|
||||
}
|
||||
|
||||
func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) {
|
||||
|
@ -9,11 +9,11 @@ require (
|
||||
github.com/drand/drand v1.2.1
|
||||
github.com/filecoin-project/go-address v0.0.5
|
||||
github.com/filecoin-project/go-data-transfer v1.6.0
|
||||
github.com/filecoin-project/go-fil-markets v1.4.0
|
||||
github.com/filecoin-project/go-fil-markets v1.5.0
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec
|
||||
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||
github.com/filecoin-project/lotus v1.10.0-rc3.0.20210616215353-9c7db6d305e3
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4
|
||||
github.com/filecoin-project/specs-actors v0.9.14
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/gorilla/mux v1.7.4
|
||||
|
@ -286,18 +286,14 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod
|
||||
github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo=
|
||||
github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o=
|
||||
github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc=
|
||||
github.com/filecoin-project/go-data-transfer v1.4.3 h1:ECEw69NOfmEZ7XN1NSBvj3KTbbH2mIczQs+Z2w4bD7c=
|
||||
github.com/filecoin-project/go-data-transfer v1.4.3/go.mod h1:n8kbDQXWrY1c4UgfMa9KERxNCWbOTDwdNhf2MpN9dpo=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-fil-markets v1.2.5 h1:bQgtXbwxKyPxSEQoUI5EaTHJ0qfzyd5NosspuADCm6Y=
|
||||
github.com/filecoin-project/go-fil-markets v1.2.5/go.mod h1:7JIqNBmFvOyBzk/EiPYnweVdQnWhshixb5B9b1653Ag=
|
||||
github.com/filecoin-project/go-fil-markets v1.4.0 h1:J4L6o+FVOmS7ZWV6wxLPiuoDzGC7iS3S5NRFL1enEr0=
|
||||
github.com/filecoin-project/go-fil-markets v1.4.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk=
|
||||
github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k=
|
||||
github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
|
||||
@ -327,10 +323,8 @@ github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/
|
||||
github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
|
||||
github.com/filecoin-project/lotus v1.10.0-rc3.0.20210616215353-9c7db6d305e3 h1:oeVa5wjoNx888oIs83L+LqAG75yqa5DCj94I2dRK+Ms=
|
||||
github.com/filecoin-project/lotus v1.10.0-rc3.0.20210616215353-9c7db6d305e3/go.mod h1:a4kSO7IY58nxXhc29lpZwgZksbdTQFQ4nhBscFYPAjw=
|
||||
github.com/filecoin-project/lotus v1.9.0 h1:TDKDLbmgYTL8M0mlfd9HmJVEYRlSSOQnakg4+9rfyWM=
|
||||
github.com/filecoin-project/lotus v1.9.0/go.mod h1:4YC/8rizrrp2wKOYvHQEjCxZbziXi68BhrzvI+FCye0=
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 h1:u5/uky+PdeaGuEGsExtVP8UUB8No/e873xjqcb7h3CM=
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4/go.mod h1:8ooe5Rzw80rJL0br81A8NNiwZ4BUVzPRwAnDxUG4E7g=
|
||||
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
|
||||
github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
@ -338,21 +332,18 @@ github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsq
|
||||
github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb h1:orr/sMzrDZUPAveRE+paBdu1kScIUO5zm+HYeh+VlhA=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc=
|
||||
github.com/filecoin-project/specs-actors/v3 v3.1.0 h1:s4qiPw8pgypqBGAy853u/zdZJ7K9cTZdM1rTiSonHrg=
|
||||
github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww=
|
||||
github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E=
|
||||
github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww=
|
||||
github.com/filecoin-project/specs-actors/v4 v4.0.0 h1:vMALksY5G3J5rj3q9rbcyB+f4Tk1xrLqSgdB3jOok4s=
|
||||
github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
|
||||
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
|
||||
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c h1:GnDJ6q3QEm2ytTKjPFQSvczAltgCSb3j9F1FeynwvPA=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
|
||||
@ -659,8 +650,6 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28
|
||||
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
|
||||
github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0=
|
||||
github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY=
|
||||
github.com/ipfs/go-graphsync v0.6.0 h1:x6UvDUGA7wjaKNqx5Vbo7FGT8aJ5ryYA0dMQ5jN3dF0=
|
||||
github.com/ipfs/go-graphsync v0.6.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
|
||||
github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
|
||||
github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 h1:rOoF88dVuDGbIx7idSdimN7JvXriyOIT96WD3eX9sHA=
|
||||
github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg=
|
||||
@ -744,6 +733,7 @@ github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Ax
|
||||
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
||||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M=
|
||||
github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY=
|
||||
github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M=
|
||||
@ -761,6 +751,7 @@ github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUn
|
||||
github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4=
|
||||
github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8=
|
||||
github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k=
|
||||
github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k=
|
||||
github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo=
|
||||
github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw=
|
||||
github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E=
|
||||
@ -771,13 +762,16 @@ github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo=
|
||||
github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg=
|
||||
github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA=
|
||||
github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs=
|
||||
github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g=
|
||||
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw=
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U=
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
|
||||
github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=
|
||||
github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0=
|
||||
github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0=
|
||||
github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70=
|
||||
@ -1575,6 +1569,7 @@ github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
|
@ -120,10 +120,11 @@ func PrepareBootstrapper(t *TestEnvironment) (*Bootstrapper, error) {
|
||||
bootstrapperIP := t.NetClient.MustGetDataNetworkIP().String()
|
||||
|
||||
n := &LotusNode{}
|
||||
r := repo.NewMemory(nil)
|
||||
stop, err := node.New(context.Background(),
|
||||
node.FullAPI(&n.FullApi),
|
||||
node.Online(),
|
||||
node.Repo(repo.NewMemory(nil)),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
node.Override(new(modules.Genesis), modtest.MakeGenesisMem(&genesisBuffer, genesisTemplate)),
|
||||
withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))),
|
||||
withListenAddress(bootstrapperIP),
|
||||
|
@ -66,7 +66,7 @@ func PrepareClient(t *TestEnvironment) (*LotusClient, error) {
|
||||
n := &LotusNode{}
|
||||
stop, err := node.New(context.Background(),
|
||||
node.FullAPI(&n.FullApi),
|
||||
node.Online(),
|
||||
node.Base(),
|
||||
node.Repo(nodeRepo),
|
||||
withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))),
|
||||
withGenesis(genesisMsg.Genesis),
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
@ -52,6 +53,7 @@ type LotusMiner struct {
|
||||
NodeRepo repo.Repo
|
||||
FullNetAddrs []peer.AddrInfo
|
||||
GenesisMsg *GenesisMsg
|
||||
Subsystems config.MinerSubsystemConfig
|
||||
|
||||
t *TestEnvironment
|
||||
}
|
||||
@ -141,12 +143,22 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var subsystems config.MinerSubsystemConfig
|
||||
|
||||
{
|
||||
lr, err := minerRepo.Lock(repo.StorageMiner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := lr.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := c.(*config.StorageMiner)
|
||||
subsystems = cfg.Subsystems
|
||||
|
||||
ks, err := lr.KeyStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -239,7 +251,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) {
|
||||
|
||||
stop1, err := node.New(context.Background(),
|
||||
node.FullAPI(&n.FullApi),
|
||||
node.Online(),
|
||||
node.Base(),
|
||||
node.Repo(nodeRepo),
|
||||
withGenesis(genesisMsg.Genesis),
|
||||
withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))),
|
||||
@ -260,8 +272,8 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) {
|
||||
}
|
||||
|
||||
minerOpts := []node.Option{
|
||||
node.StorageMiner(&n.MinerApi),
|
||||
node.Online(),
|
||||
node.StorageMiner(&n.MinerApi, subsystems),
|
||||
node.Base(),
|
||||
node.Repo(minerRepo),
|
||||
node.Override(new(api.FullNode), n.FullApi),
|
||||
node.Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
@ -416,7 +428,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) {
|
||||
return err.ErrorOrNil()
|
||||
}
|
||||
|
||||
m := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, t}
|
||||
m := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, subsystems, t}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
@ -443,7 +455,7 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) {
|
||||
|
||||
stop1, err := node.New(context.Background(),
|
||||
node.FullAPI(&n.FullApi),
|
||||
node.Online(),
|
||||
node.Base(),
|
||||
node.Repo(nodeRepo),
|
||||
//withGenesis(genesisMsg.Genesis),
|
||||
withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))),
|
||||
@ -457,8 +469,8 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) {
|
||||
}
|
||||
|
||||
minerOpts := []node.Option{
|
||||
node.StorageMiner(&n.MinerApi),
|
||||
node.Online(),
|
||||
node.StorageMiner(&n.MinerApi, m.Subsystems),
|
||||
node.Base(),
|
||||
node.Repo(minerRepo),
|
||||
node.Override(new(api.FullNode), n.FullApi),
|
||||
withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("miner_rpc", "0"))),
|
||||
@ -501,7 +513,7 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) {
|
||||
t.RecordMessage("connected to full node of miner %d on %v", i, fullNetAddrs[i])
|
||||
}
|
||||
|
||||
pm := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, t}
|
||||
pm := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, m.Subsystems, t}
|
||||
|
||||
return pm, err
|
||||
}
|
||||
@ -600,7 +612,7 @@ func startStorageMinerAPIServer(t *TestEnvironment, repo repo.Repo, minerApi api
|
||||
rpcServer.Register("Filecoin", minerApi)
|
||||
|
||||
mux.Handle("/rpc/v0", rpcServer)
|
||||
mux.PathPrefix("/remote").HandlerFunc(minerApi.(*impl.StorageMinerAPI).ServeRemote)
|
||||
mux.PathPrefix("/remote").HandlerFunc(minerApi.(*impl.StorageMinerAPI).ServeRemote(true))
|
||||
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||
|
||||
exporter, err := prometheus.NewExporter(prometheus.Options{
|
||||
|
Loading…
Reference in New Issue
Block a user