Merge branch 'master' into raulk/merge-gs-fix

This commit is contained in:
Raúl Kripalani 2021-07-16 18:19:50 +01:00
commit 9042429abd
144 changed files with 6051 additions and 3169 deletions

View File

@ -806,6 +806,11 @@ workflows:
suite: itest-gateway suite: itest-gateway
target: "./itests/gateway_test.go" target: "./itests/gateway_test.go"
- test:
name: test-itest-get_messages_in_ts
suite: itest-get_messages_in_ts
target: "./itests/get_messages_in_ts_test.go"
- test: - test:
name: test-itest-multisig name: test-itest-multisig
suite: itest-multisig suite: itest-multisig
@ -836,6 +841,11 @@ workflows:
suite: itest-sector_finalize_early suite: itest-sector_finalize_early
target: "./itests/sector_finalize_early_test.go" target: "./itests/sector_finalize_early_test.go"
- test:
name: test-itest-sector_miner_collateral
suite: itest-sector_miner_collateral
target: "./itests/sector_miner_collateral_test.go"
- test: - test:
name: test-itest-sector_pledge name: test-itest-sector_pledge
suite: itest-sector_pledge suite: itest-sector_pledge

27
.github/workflows/stale.yml vendored Normal file
View File

@ -0,0 +1,27 @@
name: Close and mark stale issue
on:
schedule:
- cron: '0 0 * * *'
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
close-issue-message: 'This issue was closed because it is missing author input.'
stale-issue-label: 'kind/stale'
any-of-labels: 'hint/needs-author-input'
days-before-issue-stale: 5
days-before-issue-close: 1
enable-statistics: true

View File

@ -18,6 +18,8 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
## Building & Documentation ## Building & Documentation
> Note: The default `master` branch is the dev branch, please use with caution. For the latest stable version, checkout the most recent [`Latest release`](https://github.com/filecoin-project/lotus/releases).
For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme. For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme.
## Reporting a Vulnerability ## Reporting a Vulnerability

View File

@ -104,6 +104,9 @@ type FullNode interface {
// specified block. // specified block.
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read
// ChainGetMessagesInTipset returns message stores in current tipset
ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]Message, error) //perm:read
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch. // ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at an earlier epoch // If there are no blocks at the specified epoch, a tipset at an earlier epoch
// will be returned. // will be returned.

View File

@ -55,6 +55,13 @@ type StorageMiner interface {
// Get the status of a given sector by ID // Get the status of a given sector by ID
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read
// Add piece to an open sector. If no sectors with enough space are open,
// either a new sector will be created, or this call will block until more
// sectors can be created.
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin
SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin
// List all staged sectors // List all staged sectors
SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read
@ -135,8 +142,8 @@ type StorageMiner interface {
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
@ -302,3 +309,25 @@ type PendingDealInfo struct {
PublishPeriodStart time.Time PublishPeriodStart time.Time
PublishPeriod time.Duration PublishPeriod time.Duration
} }
type SectorOffset struct {
Sector abi.SectorNumber
Offset abi.PaddedPieceSize
}
// DealInfo is a tuple of deal identity and its schedule
type PieceDealInfo struct {
PublishCid *cid.Cid
DealID abi.DealID
DealProposal *market.DealProposal
DealSchedule DealSchedule
KeepUnsealed bool
}
// DealSchedule communicates the time interval of a storage deal. The deal must
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
// is invalid.
type DealSchedule struct {
StartEpoch abi.ChainEpoch
EndEpoch abi.ChainEpoch
}

View File

@ -8,6 +8,7 @@ import (
"sort" "sort"
abi "github.com/filecoin-project/go-state-types/abi" abi "github.com/filecoin-project/go-state-types/abi"
market "github.com/filecoin-project/specs-actors/actors/builtin/market"
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -738,3 +739,381 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
return nil return nil
} }
func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{165}); err != nil {
return err
}
scratch := make([]byte, 9)
// t.PublishCid (cid.Cid) (struct)
if len("PublishCid") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("PublishCid")); err != nil {
return err
}
if t.PublishCid == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
}
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealID")); err != nil {
return err
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
return err
}
// t.DealProposal (market.DealProposal) (struct)
if len("DealProposal") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealProposal")); err != nil {
return err
}
if err := t.DealProposal.MarshalCBOR(w); err != nil {
return err
}
// t.DealSchedule (api.DealSchedule) (struct)
if len("DealSchedule") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealSchedule")); err != nil {
return err
}
if err := t.DealSchedule.MarshalCBOR(w); err != nil {
return err
}
// t.KeepUnsealed (bool) (bool)
if len("KeepUnsealed") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil {
return err
}
if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil {
return err
}
return nil
}
func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
*t = PieceDealInfo{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadStringBuf(br, scratch)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.PublishCid (cid.Cid) (struct)
case "PublishCid":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
}
t.PublishCid = &c
}
}
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.DealID = abi.DealID(extra)
}
// t.DealProposal (market.DealProposal) (struct)
case "DealProposal":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
t.DealProposal = new(market.DealProposal)
if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
}
}
}
// t.DealSchedule (api.DealSchedule) (struct)
case "DealSchedule":
{
if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
}
}
// t.KeepUnsealed (bool) (bool)
case "KeepUnsealed":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajOther {
return fmt.Errorf("booleans must be major type 7")
}
switch extra {
case 20:
t.KeepUnsealed = false
case 21:
t.KeepUnsealed = true
default:
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
return nil
}
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{162}); err != nil {
return err
}
scratch := make([]byte, 9)
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
return err
}
} else {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
return err
}
}
// t.EndEpoch (abi.ChainEpoch) (int64)
if len("EndEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("EndEpoch")); err != nil {
return err
}
if t.EndEpoch >= 0 {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
return err
}
} else {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
return err
}
}
return nil
}
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
*t = DealSchedule{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadStringBuf(br, scratch)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
// t.EndEpoch (abi.ChainEpoch) (int64)
case "EndEpoch":
{
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.EndEpoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
return nil
}

View File

@ -52,8 +52,30 @@ func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Heade
return &res, closer, err return &res, closer, err
} }
func getPushUrl(addr string) (string, error) {
pushUrl, err := url.Parse(addr)
if err != nil {
return "", err
}
switch pushUrl.Scheme {
case "ws":
pushUrl.Scheme = "http"
case "wss":
pushUrl.Scheme = "https"
}
///rpc/v0 -> /rpc/streams/v0/push
pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push")
return pushUrl.String(), nil
}
// NewStorageMinerRPCV0 creates a new http jsonrpc client for miner // NewStorageMinerRPCV0 creates a new http jsonrpc client for miner
func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) { func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) {
pushUrl, err := getPushUrl(addr)
if err != nil {
return nil, nil, err
}
var res v0api.StorageMinerStruct var res v0api.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{ []interface{}{
@ -61,26 +83,19 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H
&res.Internal, &res.Internal,
}, },
requestHeader, requestHeader,
opts..., append([]jsonrpc.Option{
rpcenc.ReaderParamEncoder(pushUrl),
}, opts...)...,
) )
return &res, closer, err return &res, closer, err
} }
func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Worker, jsonrpc.ClientCloser, error) { func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) {
u, err := url.Parse(addr) pushUrl, err := getPushUrl(addr)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
switch u.Scheme {
case "ws":
u.Scheme = "http"
case "wss":
u.Scheme = "https"
}
///rpc/v0 -> /rpc/streams/v0/push
u.Path = path.Join(u.Path, "../streams/v0/push")
var res api.WorkerStruct var res api.WorkerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
@ -88,7 +103,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header)
&res.Internal, &res.Internal,
}, },
requestHeader, requestHeader,
rpcenc.ReaderParamEncoder(u.String()), rpcenc.ReaderParamEncoder(pushUrl),
jsonrpc.WithNoReconnect(), jsonrpc.WithNoReconnect(),
jsonrpc.WithTimeout(30*time.Second), jsonrpc.WithTimeout(30*time.Second),
) )

View File

@ -194,6 +194,21 @@ func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gom
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1)
} }
// ChainGetMessagesInTipset mocks base method.
func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1)
ret0, _ := ret[0].([]api.Message)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset.
func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1)
}
// ChainGetNode mocks base method. // ChainGetNode mocks base method.
func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -127,6 +127,8 @@ type FullNodeStruct struct {
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"`
ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]Message, error) `perm:"read"`
ChainGetNode func(p0 context.Context, p1 string) (*IpldObject, error) `perm:"read"` ChainGetNode func(p0 context.Context, p1 string) (*IpldObject, error) `perm:"read"`
ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `perm:"read"` ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `perm:"read"`
@ -665,6 +667,8 @@ type StorageMinerStruct struct {
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
@ -703,6 +707,8 @@ type StorageMinerStruct struct {
SectorsSummary func(p0 context.Context) (map[SectorState]int, error) `perm:"read"` SectorsSummary func(p0 context.Context) (map[SectorState]int, error) `perm:"read"`
SectorsUnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error `perm:"admin"`
SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"` SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"`
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"` StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
@ -1097,6 +1103,14 @@ func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.M
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) {
return s.Internal.ChainGetMessagesInTipset(p0, p1)
}
func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) {
return *new([]Message), xerrors.New("method not supported")
}
func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) { func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) {
return s.Internal.ChainGetNode(p0, p1) return s.Internal.ChainGetNode(p0, p1)
} }
@ -3153,6 +3167,14 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3)
}
func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
return *new(SectorOffset), xerrors.New("method not supported")
}
func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) {
return s.Internal.SectorCommitFlush(p0) return s.Internal.SectorCommitFlush(p0)
} }
@ -3305,6 +3327,14 @@ func (s *StorageMinerStub) SectorsSummary(p0 context.Context) (map[SectorState]i
return *new(map[SectorState]int), xerrors.New("method not supported") return *new(map[SectorState]int), xerrors.New("method not supported")
} }
func (s *StorageMinerStruct) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error {
return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5)
}
func (s *StorageMinerStub) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error {
return xerrors.New("method not supported")
}
func (s *StorageMinerStruct) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error { func (s *StorageMinerStruct) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error {
return s.Internal.SectorsUpdate(p0, p1, p2) return s.Internal.SectorsUpdate(p0, p1, p2)
} }

View File

@ -92,6 +92,9 @@ type FullNode interface {
// specified block. // specified block.
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read
// ChainGetMessagesInTipset returns message stores in current tipset
ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) //perm:read
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch. // ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at an earlier epoch // If there are no blocks at the specified epoch, a tipset at an earlier epoch
// will be returned. // will be returned.

View File

@ -45,6 +45,8 @@ type FullNodeStruct struct {
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"`
ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) `perm:"read"`
ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"` ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"`
ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"` ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"`
@ -514,6 +516,14 @@ func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.M
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) {
return s.Internal.ChainGetMessagesInTipset(p0, p1)
}
func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) {
return *new([]api.Message), xerrors.New("method not supported")
}
func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) { func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) {
return s.Internal.ChainGetNode(p0, p1) return s.Internal.ChainGetNode(p0, p1)
} }

View File

@ -194,6 +194,21 @@ func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gom
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1)
} }
// ChainGetMessagesInTipset mocks base method.
func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1)
ret0, _ := ret[0].([]api.Message)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset.
func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1)
}
// ChainGetNode mocks base method. // ChainGetNode mocks base method.
func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -57,7 +57,7 @@ var (
FullAPIVersion0 = newVer(1, 3, 0) FullAPIVersion0 = newVer(1, 3, 0)
FullAPIVersion1 = newVer(2, 1, 0) FullAPIVersion1 = newVer(2, 1, 0)
MinerAPIVersion0 = newVer(1, 1, 0) MinerAPIVersion0 = newVer(1, 2, 0)
WorkerAPIVersion0 = newVer(1, 1, 0) WorkerAPIVersion0 = newVer(1, 1, 0)
) )

View File

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"runtime" "runtime"
"sync/atomic" "sync"
"github.com/dgraph-io/badger/v2" "github.com/dgraph-io/badger/v2"
"github.com/dgraph-io/badger/v2/options" "github.com/dgraph-io/badger/v2/options"
@ -73,20 +73,16 @@ func (b *badgerLogger) Warningf(format string, args ...interface{}) {
} }
const ( const (
stateOpen int64 = iota stateOpen = iota
stateClosing stateClosing
stateClosed stateClosed
) )
// Blockstore is a badger-backed IPLD blockstore. // Blockstore is a badger-backed IPLD blockstore.
//
// NOTE: once Close() is called, methods will try their best to return
// ErrBlockstoreClosed. This will guaranteed to happen for all subsequent
// operation calls after Close() has returned, but it may not happen for
// operations in progress. Those are likely to fail with a different error.
type Blockstore struct { type Blockstore struct {
// state is accessed atomically stateLk sync.RWMutex
state int64 state int
viewers sync.WaitGroup
DB *badger.DB DB *badger.DB
@ -97,6 +93,8 @@ type Blockstore struct {
var _ blockstore.Blockstore = (*Blockstore)(nil) var _ blockstore.Blockstore = (*Blockstore)(nil)
var _ blockstore.Viewer = (*Blockstore)(nil) var _ blockstore.Viewer = (*Blockstore)(nil)
var _ blockstore.BlockstoreIterator = (*Blockstore)(nil)
var _ blockstore.BlockstoreGC = (*Blockstore)(nil)
var _ io.Closer = (*Blockstore)(nil) var _ io.Closer = (*Blockstore)(nil)
// Open creates a new badger-backed blockstore, with the supplied options. // Open creates a new badger-backed blockstore, with the supplied options.
@ -124,53 +122,82 @@ func Open(opts Options) (*Blockstore, error) {
// Close closes the store. If the store has already been closed, this noops and // Close closes the store. If the store has already been closed, this noops and
// returns an error, even if the first closure resulted in error. // returns an error, even if the first closure resulted in error.
func (b *Blockstore) Close() error { func (b *Blockstore) Close() error {
if !atomic.CompareAndSwapInt64(&b.state, stateOpen, stateClosing) { b.stateLk.Lock()
if b.state != stateOpen {
b.stateLk.Unlock()
return nil return nil
} }
b.state = stateClosing
b.stateLk.Unlock()
defer func() {
b.stateLk.Lock()
b.state = stateClosed
b.stateLk.Unlock()
}()
// wait for all accesses to complete
b.viewers.Wait()
defer atomic.StoreInt64(&b.state, stateClosed)
return b.DB.Close() return b.DB.Close()
} }
func (b *Blockstore) access() error {
b.stateLk.RLock()
defer b.stateLk.RUnlock()
if b.state != stateOpen {
return ErrBlockstoreClosed
}
b.viewers.Add(1)
return nil
}
func (b *Blockstore) isOpen() bool {
b.stateLk.RLock()
defer b.stateLk.RUnlock()
return b.state == stateOpen
}
// CollectGarbage runs garbage collection on the value log // CollectGarbage runs garbage collection on the value log
func (b *Blockstore) CollectGarbage() error { func (b *Blockstore) CollectGarbage() error {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return ErrBlockstoreClosed return err
}
defer b.viewers.Done()
// compact first to gather the necessary statistics for GC
nworkers := runtime.NumCPU() / 2
if nworkers < 2 {
nworkers = 2
}
err := b.DB.Flatten(nworkers)
if err != nil {
return err
} }
var err error
for err == nil { for err == nil {
err = b.DB.RunValueLogGC(0.125) err = b.DB.RunValueLogGC(0.125)
} }
if err == badger.ErrNoRewrite { if err == badger.ErrNoRewrite {
// not really an error in this case // not really an error in this case, it signals the end of GC
return nil return nil
} }
return err return err
} }
// Compact runs a synchronous compaction
func (b *Blockstore) Compact() error {
if atomic.LoadInt64(&b.state) != stateOpen {
return ErrBlockstoreClosed
}
nworkers := runtime.NumCPU() / 2
if nworkers < 2 {
nworkers = 2
}
return b.DB.Flatten(nworkers)
}
// View implements blockstore.Viewer, which leverages zero-copy read-only // View implements blockstore.Viewer, which leverages zero-copy read-only
// access to values. // access to values.
func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return ErrBlockstoreClosed return err
} }
defer b.viewers.Done()
k, pooled := b.PooledStorageKey(cid) k, pooled := b.PooledStorageKey(cid)
if pooled { if pooled {
@ -191,9 +218,10 @@ func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error {
// Has implements Blockstore.Has. // Has implements Blockstore.Has.
func (b *Blockstore) Has(cid cid.Cid) (bool, error) { func (b *Blockstore) Has(cid cid.Cid) (bool, error) {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return false, ErrBlockstoreClosed return false, err
} }
defer b.viewers.Done()
k, pooled := b.PooledStorageKey(cid) k, pooled := b.PooledStorageKey(cid)
if pooled { if pooled {
@ -221,9 +249,10 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) {
return nil, blockstore.ErrNotFound return nil, blockstore.ErrNotFound
} }
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return nil, ErrBlockstoreClosed return nil, err
} }
defer b.viewers.Done()
k, pooled := b.PooledStorageKey(cid) k, pooled := b.PooledStorageKey(cid)
if pooled { if pooled {
@ -250,9 +279,10 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) {
// GetSize implements Blockstore.GetSize. // GetSize implements Blockstore.GetSize.
func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { func (b *Blockstore) GetSize(cid cid.Cid) (int, error) {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return -1, ErrBlockstoreClosed return 0, err
} }
defer b.viewers.Done()
k, pooled := b.PooledStorageKey(cid) k, pooled := b.PooledStorageKey(cid)
if pooled { if pooled {
@ -279,9 +309,10 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) {
// Put implements Blockstore.Put. // Put implements Blockstore.Put.
func (b *Blockstore) Put(block blocks.Block) error { func (b *Blockstore) Put(block blocks.Block) error {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return ErrBlockstoreClosed return err
} }
defer b.viewers.Done()
k, pooled := b.PooledStorageKey(block.Cid()) k, pooled := b.PooledStorageKey(block.Cid())
if pooled { if pooled {
@ -299,9 +330,10 @@ func (b *Blockstore) Put(block blocks.Block) error {
// PutMany implements Blockstore.PutMany. // PutMany implements Blockstore.PutMany.
func (b *Blockstore) PutMany(blocks []blocks.Block) error { func (b *Blockstore) PutMany(blocks []blocks.Block) error {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return ErrBlockstoreClosed return err
} }
defer b.viewers.Done()
// toReturn tracks the byte slices to return to the pool, if we're using key // toReturn tracks the byte slices to return to the pool, if we're using key
// prefixing. we can't return each slice to the pool after each Set, because // prefixing. we can't return each slice to the pool after each Set, because
@ -338,9 +370,10 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error {
// DeleteBlock implements Blockstore.DeleteBlock. // DeleteBlock implements Blockstore.DeleteBlock.
func (b *Blockstore) DeleteBlock(cid cid.Cid) error { func (b *Blockstore) DeleteBlock(cid cid.Cid) error {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return ErrBlockstoreClosed return err
} }
defer b.viewers.Done()
k, pooled := b.PooledStorageKey(cid) k, pooled := b.PooledStorageKey(cid)
if pooled { if pooled {
@ -353,9 +386,10 @@ func (b *Blockstore) DeleteBlock(cid cid.Cid) error {
} }
func (b *Blockstore) DeleteMany(cids []cid.Cid) error { func (b *Blockstore) DeleteMany(cids []cid.Cid) error {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return ErrBlockstoreClosed return err
} }
defer b.viewers.Done()
// toReturn tracks the byte slices to return to the pool, if we're using key // toReturn tracks the byte slices to return to the pool, if we're using key
// prefixing. we can't return each slice to the pool after each Set, because // prefixing. we can't return each slice to the pool after each Set, because
@ -392,8 +426,8 @@ func (b *Blockstore) DeleteMany(cids []cid.Cid) error {
// AllKeysChan implements Blockstore.AllKeysChan. // AllKeysChan implements Blockstore.AllKeysChan.
func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
if atomic.LoadInt64(&b.state) != stateOpen { if err := b.access(); err != nil {
return nil, ErrBlockstoreClosed return nil, err
} }
txn := b.DB.NewTransaction(false) txn := b.DB.NewTransaction(false)
@ -405,6 +439,7 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
ch := make(chan cid.Cid) ch := make(chan cid.Cid)
go func() { go func() {
defer b.viewers.Done()
defer close(ch) defer close(ch)
defer iter.Close() defer iter.Close()
@ -415,7 +450,7 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
if ctx.Err() != nil { if ctx.Err() != nil {
return // context has fired. return // context has fired.
} }
if atomic.LoadInt64(&b.state) != stateOpen { if !b.isOpen() {
// open iterators will run even after the database is closed... // open iterators will run even after the database is closed...
return // closing, yield. return // closing, yield.
} }
@ -442,6 +477,56 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return ch, nil return ch, nil
} }
// Implementation of BlockstoreIterator interface
func (b *Blockstore) ForEachKey(f func(cid.Cid) error) error {
if err := b.access(); err != nil {
return err
}
defer b.viewers.Done()
txn := b.DB.NewTransaction(false)
defer txn.Discard()
opts := badger.IteratorOptions{PrefetchSize: 100}
if b.prefixing {
opts.Prefix = b.prefix
}
iter := txn.NewIterator(opts)
defer iter.Close()
var buf []byte
for iter.Rewind(); iter.Valid(); iter.Next() {
if !b.isOpen() {
return ErrBlockstoreClosed
}
k := iter.Item().Key()
if b.prefixing {
k = k[b.prefixLen:]
}
klen := base32.RawStdEncoding.DecodedLen(len(k))
if klen > len(buf) {
buf = make([]byte, klen)
}
n, err := base32.RawStdEncoding.Decode(buf, k)
if err != nil {
return err
}
c := cid.NewCidV1(cid.Raw, buf[:n])
err = f(c)
if err != nil {
return err
}
}
return nil
}
// HashOnRead implements Blockstore.HashOnRead. It is not supported by this // HashOnRead implements Blockstore.HashOnRead. It is not supported by this
// blockstore. // blockstore.
func (b *Blockstore) HashOnRead(_ bool) { func (b *Blockstore) HashOnRead(_ bool) {

View File

@ -30,6 +30,16 @@ type BatchDeleter interface {
DeleteMany(cids []cid.Cid) error DeleteMany(cids []cid.Cid) error
} }
// BlockstoreIterator is a trait for efficient iteration
type BlockstoreIterator interface {
ForEachKey(func(cid.Cid) error) error
}
// BlockstoreGC is a trait for blockstores that support online garbage collection
type BlockstoreGC interface {
CollectGarbage() error
}
// WrapIDStore wraps the underlying blockstore in an "identity" blockstore. // WrapIDStore wraps the underlying blockstore in an "identity" blockstore.
// The ID store filters out all puts for blocks with CIDs using the "identity" // The ID store filters out all puts for blocks with CIDs using the "identity"
// hash function. It also extracts inlined blocks from CIDs using the identity // hash function. It also extracts inlined blocks from CIDs using the identity

66
blockstore/discard.go Normal file
View File

@ -0,0 +1,66 @@
package blockstore
import (
"context"
"io"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
)
var _ Blockstore = (*discardstore)(nil)
type discardstore struct {
bs Blockstore
}
func NewDiscardStore(bs Blockstore) Blockstore {
return &discardstore{bs: bs}
}
func (b *discardstore) Has(cid cid.Cid) (bool, error) {
return b.bs.Has(cid)
}
func (b *discardstore) HashOnRead(hor bool) {
b.bs.HashOnRead(hor)
}
func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) {
return b.bs.Get(cid)
}
func (b *discardstore) GetSize(cid cid.Cid) (int, error) {
return b.bs.GetSize(cid)
}
func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error {
return b.bs.View(cid, f)
}
func (b *discardstore) Put(blk blocks.Block) error {
return nil
}
func (b *discardstore) PutMany(blks []blocks.Block) error {
return nil
}
func (b *discardstore) DeleteBlock(cid cid.Cid) error {
return nil
}
func (b *discardstore) DeleteMany(cids []cid.Cid) error {
return nil
}
func (b *discardstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return b.bs.AllKeysChan(ctx)
}
func (b *discardstore) Close() error {
if c, ok := b.bs.(io.Closer); ok {
return c.Close()
}
return nil
}

View File

@ -0,0 +1,72 @@
# SplitStore: An actively scalable blockstore for the Filecoin chain
The SplitStore was first introduced in lotus v1.5.1, as an experiment
in reducing the performance impact of large blockstores.
With lotus v1.11.1, we introduce the next iteration in design and
implementation, which we call SplitStore v1.
The new design (see [#6474](https://github.com/filecoin-project/lotus/pull/6474)
evolves the splitstore to be a freestanding compacting blockstore that
allows us to keep a small (60-100GB) working set in a hot blockstore
and reliably archive out of scope objects in a coldstore. The
coldstore can also be a discard store, whereby out of scope objects
are discarded or a regular badger blockstore (the default), which can
be periodically garbage collected according to configurable user
retention policies.
To enable the splitstore, edit `.lotus/config.toml` and add the following:
```
[Chainstore]
EnableSplitstore = true
```
If you intend to use the discard coldstore, your also need to add the following:
```
[Chainstore.Splitstore]
ColdStoreType = "discard"
```
In general you _should not_ have to use the discard store, unless you
are running a network booster or have very constrained hardware with
not enough disk space to maintain a coldstore, even with garbage
collection.
## Operation
When the splitstore is first enabled, the existing blockstore becomes
the coldstore and a fresh hotstore is initialized.
The hotstore is warmed up on first startup so as to load all chain
headers and state roots in the current head. This allows us to
immediately gain the performance benefits of a smallerblockstore which
can be substantial for full archival nodes.
All new writes are directed to the hotstore, while reads first hit the
hotstore, with fallback to the coldstore.
Once 5 finalities have ellapsed, and every finality henceforth, the
blockstore _compacts_. Compaction is the process of moving all
unreachable objects within the last 4 finalities from the hotstore to
the coldstore. If the system is configured with a discard coldstore,
these objects are discarded. Note that chain headers, all the way to
genesis, are considered reachable. Stateroots and messages are
considered reachable only within the last 4 finalities, unless there
is a live reference to them.
## Compaction
Compaction works transactionally with the following algorithm:
- We prepare a transaction, whereby all i/o referenced objects through the API are tracked.
- We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis.
- Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references.
- We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge.
- When running with a coldstore, we next copy all cold objects to the coldstore.
- At this point we are ready to begin purging:
- We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
- We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
- We then end the transaction and compact/gc the hotstore.
## Coldstore Garbage Collection
TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577)

View File

@ -0,0 +1,273 @@
package splitstore
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime/debug"
"strings"
"sync"
"time"
"go.uber.org/multierr"
"golang.org/x/xerrors"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
)
type debugLog struct {
readLog, writeLog, deleteLog, stackLog *debugLogOp
stackMx sync.Mutex
stackMap map[string]string
}
type debugLogOp struct {
path string
mx sync.Mutex
log *os.File
count int
}
func openDebugLog(path string) (*debugLog, error) {
basePath := filepath.Join(path, "debug")
err := os.MkdirAll(basePath, 0755)
if err != nil {
return nil, err
}
readLog, err := openDebugLogOp(basePath, "read.log")
if err != nil {
return nil, err
}
writeLog, err := openDebugLogOp(basePath, "write.log")
if err != nil {
_ = readLog.Close()
return nil, err
}
deleteLog, err := openDebugLogOp(basePath, "delete.log")
if err != nil {
_ = readLog.Close()
_ = writeLog.Close()
return nil, err
}
stackLog, err := openDebugLogOp(basePath, "stack.log")
if err != nil {
_ = readLog.Close()
_ = writeLog.Close()
_ = deleteLog.Close()
return nil, xerrors.Errorf("error opening stack log: %w", err)
}
return &debugLog{
readLog: readLog,
writeLog: writeLog,
deleteLog: deleteLog,
stackLog: stackLog,
stackMap: make(map[string]string),
}, nil
}
func (d *debugLog) LogReadMiss(cid cid.Cid) {
if d == nil {
return
}
stack := d.getStack()
err := d.readLog.Log("%s %s %s\n", d.timestamp(), cid, stack)
if err != nil {
log.Warnf("error writing read log: %s", err)
}
}
func (d *debugLog) LogWrite(blk blocks.Block) {
if d == nil {
return
}
var stack string
if enableDebugLogWriteTraces {
stack = " " + d.getStack()
}
err := d.writeLog.Log("%s %s%s\n", d.timestamp(), blk.Cid(), stack)
if err != nil {
log.Warnf("error writing write log: %s", err)
}
}
func (d *debugLog) LogWriteMany(blks []blocks.Block) {
if d == nil {
return
}
var stack string
if enableDebugLogWriteTraces {
stack = " " + d.getStack()
}
now := d.timestamp()
for _, blk := range blks {
err := d.writeLog.Log("%s %s%s\n", now, blk.Cid(), stack)
if err != nil {
log.Warnf("error writing write log: %s", err)
break
}
}
}
func (d *debugLog) LogDelete(cids []cid.Cid) {
if d == nil {
return
}
now := d.timestamp()
for _, c := range cids {
err := d.deleteLog.Log("%s %s\n", now, c)
if err != nil {
log.Warnf("error writing delete log: %s", err)
break
}
}
}
func (d *debugLog) Flush() {
if d == nil {
return
}
// rotate non-empty logs
d.readLog.Rotate()
d.writeLog.Rotate()
d.deleteLog.Rotate()
d.stackLog.Rotate()
}
func (d *debugLog) Close() error {
if d == nil {
return nil
}
err1 := d.readLog.Close()
err2 := d.writeLog.Close()
err3 := d.deleteLog.Close()
err4 := d.stackLog.Close()
return multierr.Combine(err1, err2, err3, err4)
}
func (d *debugLog) getStack() string {
sk := d.getNormalizedStackTrace()
hash := sha256.Sum256([]byte(sk))
key := string(hash[:])
d.stackMx.Lock()
repr, ok := d.stackMap[key]
if !ok {
repr = hex.EncodeToString(hash[:])
d.stackMap[key] = repr
err := d.stackLog.Log("%s\n%s\n", repr, sk)
if err != nil {
log.Warnf("error writing stack trace for %s: %s", repr, err)
}
}
d.stackMx.Unlock()
return repr
}
func (d *debugLog) getNormalizedStackTrace() string {
sk := string(debug.Stack())
// Normalization for deduplication
// skip first line -- it's the goroutine
// for each line that ends in a ), remove the call args -- these are the registers
lines := strings.Split(sk, "\n")[1:]
for i, line := range lines {
if len(line) > 0 && line[len(line)-1] == ')' {
idx := strings.LastIndex(line, "(")
if idx < 0 {
continue
}
lines[i] = line[:idx]
}
}
return strings.Join(lines, "\n")
}
func (d *debugLog) timestamp() string {
ts, _ := time.Now().MarshalText()
return string(ts)
}
func openDebugLogOp(basePath, name string) (*debugLogOp, error) {
path := filepath.Join(basePath, name)
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return nil, xerrors.Errorf("error opening %s: %w", name, err)
}
return &debugLogOp{path: path, log: file}, nil
}
func (d *debugLogOp) Close() error {
d.mx.Lock()
defer d.mx.Unlock()
return d.log.Close()
}
func (d *debugLogOp) Log(template string, arg ...interface{}) error {
d.mx.Lock()
defer d.mx.Unlock()
d.count++
_, err := fmt.Fprintf(d.log, template, arg...)
return err
}
func (d *debugLogOp) Rotate() {
d.mx.Lock()
defer d.mx.Unlock()
if d.count == 0 {
return
}
err := d.log.Close()
if err != nil {
log.Warnf("error closing log (file: %s): %s", d.path, err)
return
}
arxivPath := fmt.Sprintf("%s-%d", d.path, time.Now().Unix())
err = os.Rename(d.path, arxivPath)
if err != nil {
log.Warnf("error moving log (file: %s): %s", d.path, err)
return
}
go func() {
cmd := exec.Command("gzip", arxivPath)
err := cmd.Run()
if err != nil {
log.Warnf("error compressing log (file: %s): %s", arxivPath, err)
}
}()
d.count = 0
d.log, err = os.OpenFile(d.path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
log.Warnf("error opening log (file: %s): %s", d.path, err)
return
}
}

View File

@ -1,26 +1,26 @@
package splitstore package splitstore
import ( import (
"path/filepath" "errors"
"golang.org/x/xerrors" "golang.org/x/xerrors"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
) )
var errMarkSetClosed = errors.New("markset closed")
// MarkSet is a utility to keep track of seen CID, and later query for them. // MarkSet is a utility to keep track of seen CID, and later query for them.
// //
// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt). // * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt).
// * If a probabilistic result is acceptable, it can be backed by a bloom filter (default). // * If a probabilistic result is acceptable, it can be backed by a bloom filter
type MarkSet interface { type MarkSet interface {
Mark(cid.Cid) error Mark(cid.Cid) error
Has(cid.Cid) (bool, error) Has(cid.Cid) (bool, error)
Close() error Close() error
SetConcurrent()
} }
// markBytes is deliberately a non-nil empty byte slice for serialization.
var markBytes = []byte{}
type MarkSetEnv interface { type MarkSetEnv interface {
Create(name string, sizeHint int64) (MarkSet, error) Create(name string, sizeHint int64) (MarkSet, error)
Close() error Close() error
@ -28,10 +28,10 @@ type MarkSetEnv interface {
func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) {
switch mtype { switch mtype {
case "", "bloom": case "bloom":
return NewBloomMarkSetEnv() return NewBloomMarkSetEnv()
case "bolt": case "map":
return NewBoltMarkSetEnv(filepath.Join(path, "markset.bolt")) return NewMapMarkSetEnv()
default: default:
return nil, xerrors.Errorf("unknown mark set type %s", mtype) return nil, xerrors.Errorf("unknown mark set type %s", mtype)
} }

View File

@ -3,6 +3,7 @@ package splitstore
import ( import (
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"sync"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -21,7 +22,9 @@ var _ MarkSetEnv = (*BloomMarkSetEnv)(nil)
type BloomMarkSet struct { type BloomMarkSet struct {
salt []byte salt []byte
mx sync.RWMutex
bf *bbloom.Bloom bf *bbloom.Bloom
ts bool
} }
var _ MarkSet = (*BloomMarkSet)(nil) var _ MarkSet = (*BloomMarkSet)(nil)
@ -64,14 +67,41 @@ func (s *BloomMarkSet) saltedKey(cid cid.Cid) []byte {
} }
func (s *BloomMarkSet) Mark(cid cid.Cid) error { func (s *BloomMarkSet) Mark(cid cid.Cid) error {
if s.ts {
s.mx.Lock()
defer s.mx.Unlock()
}
if s.bf == nil {
return errMarkSetClosed
}
s.bf.Add(s.saltedKey(cid)) s.bf.Add(s.saltedKey(cid))
return nil return nil
} }
func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) { func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) {
if s.ts {
s.mx.RLock()
defer s.mx.RUnlock()
}
if s.bf == nil {
return false, errMarkSetClosed
}
return s.bf.Has(s.saltedKey(cid)), nil return s.bf.Has(s.saltedKey(cid)), nil
} }
func (s *BloomMarkSet) Close() error { func (s *BloomMarkSet) Close() error {
if s.ts {
s.mx.Lock()
defer s.mx.Unlock()
}
s.bf = nil
return nil return nil
} }
func (s *BloomMarkSet) SetConcurrent() {
s.ts = true
}

View File

@ -1,81 +0,0 @@
package splitstore
import (
"time"
"golang.org/x/xerrors"
cid "github.com/ipfs/go-cid"
bolt "go.etcd.io/bbolt"
)
type BoltMarkSetEnv struct {
db *bolt.DB
}
var _ MarkSetEnv = (*BoltMarkSetEnv)(nil)
type BoltMarkSet struct {
db *bolt.DB
bucketId []byte
}
var _ MarkSet = (*BoltMarkSet)(nil)
func NewBoltMarkSetEnv(path string) (*BoltMarkSetEnv, error) {
db, err := bolt.Open(path, 0644,
&bolt.Options{
Timeout: 1 * time.Second,
NoSync: true,
})
if err != nil {
return nil, err
}
return &BoltMarkSetEnv{db: db}, nil
}
func (e *BoltMarkSetEnv) Create(name string, hint int64) (MarkSet, error) {
bucketId := []byte(name)
err := e.db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucketId)
if err != nil {
return xerrors.Errorf("error creating bolt db bucket %s: %w", name, err)
}
return nil
})
if err != nil {
return nil, err
}
return &BoltMarkSet{db: e.db, bucketId: bucketId}, nil
}
func (e *BoltMarkSetEnv) Close() error {
return e.db.Close()
}
func (s *BoltMarkSet) Mark(cid cid.Cid) error {
return s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
return b.Put(cid.Hash(), markBytes)
})
}
func (s *BoltMarkSet) Has(cid cid.Cid) (result bool, err error) {
err = s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
v := b.Get(cid.Hash())
result = v != nil
return nil
})
return result, err
}
func (s *BoltMarkSet) Close() error {
return s.db.Update(func(tx *bolt.Tx) error {
return tx.DeleteBucket(s.bucketId)
})
}

View File

@ -0,0 +1,75 @@
package splitstore
import (
"sync"
cid "github.com/ipfs/go-cid"
)
type MapMarkSetEnv struct{}
var _ MarkSetEnv = (*MapMarkSetEnv)(nil)
type MapMarkSet struct {
mx sync.RWMutex
set map[string]struct{}
ts bool
}
var _ MarkSet = (*MapMarkSet)(nil)
func NewMapMarkSetEnv() (*MapMarkSetEnv, error) {
return &MapMarkSetEnv{}, nil
}
func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
return &MapMarkSet{
set: make(map[string]struct{}, sizeHint),
}, nil
}
func (e *MapMarkSetEnv) Close() error {
return nil
}
func (s *MapMarkSet) Mark(cid cid.Cid) error {
if s.ts {
s.mx.Lock()
defer s.mx.Unlock()
}
if s.set == nil {
return errMarkSetClosed
}
s.set[string(cid.Hash())] = struct{}{}
return nil
}
func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
if s.ts {
s.mx.RLock()
defer s.mx.RUnlock()
}
if s.set == nil {
return false, errMarkSetClosed
}
_, ok := s.set[string(cid.Hash())]
return ok, nil
}
func (s *MapMarkSet) Close() error {
if s.ts {
s.mx.Lock()
defer s.mx.Unlock()
}
s.set = nil
return nil
}
func (s *MapMarkSet) SetConcurrent() {
s.ts = true
}

View File

@ -8,8 +8,8 @@ import (
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
) )
func TestBoltMarkSet(t *testing.T) { func TestMapMarkSet(t *testing.T) {
testMarkSet(t, "bolt") testMarkSet(t, "map")
} }
func TestBloomMarkSet(t *testing.T) { func TestBloomMarkSet(t *testing.T) {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,114 @@
package splitstore
import (
"context"
"errors"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
bstore "github.com/filecoin-project/lotus/blockstore"
)
type exposedSplitStore struct {
s *SplitStore
}
var _ bstore.Blockstore = (*exposedSplitStore)(nil)
func (s *SplitStore) Expose() bstore.Blockstore {
return &exposedSplitStore{s: s}
}
func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error {
return errors.New("DeleteBlock: operation not supported")
}
func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error {
return errors.New("DeleteMany: operation not supported")
}
func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) {
if isIdentiyCid(c) {
return true, nil
}
has, err := es.s.hot.Has(c)
if has || err != nil {
return has, err
}
return es.s.cold.Has(c)
}
func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) {
if isIdentiyCid(c) {
data, err := decodeIdentityCid(c)
if err != nil {
return nil, err
}
return blocks.NewBlockWithCid(data, c)
}
blk, err := es.s.hot.Get(c)
switch err {
case bstore.ErrNotFound:
return es.s.cold.Get(c)
default:
return blk, err
}
}
func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) {
if isIdentiyCid(c) {
data, err := decodeIdentityCid(c)
if err != nil {
return 0, err
}
return len(data), nil
}
size, err := es.s.hot.GetSize(c)
switch err {
case bstore.ErrNotFound:
return es.s.cold.GetSize(c)
default:
return size, err
}
}
func (es *exposedSplitStore) Put(blk blocks.Block) error {
return es.s.Put(blk)
}
func (es *exposedSplitStore) PutMany(blks []blocks.Block) error {
return es.s.PutMany(blks)
}
func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return es.s.AllKeysChan(ctx)
}
func (es *exposedSplitStore) HashOnRead(enabled bool) {}
func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error {
if isIdentiyCid(c) {
data, err := decodeIdentityCid(c)
if err != nil {
return err
}
return f(data)
}
err := es.s.hot.View(c, f)
switch err {
case bstore.ErrNotFound:
return es.s.cold.View(c, f)
default:
return err
}
}

View File

@ -0,0 +1,30 @@
package splitstore
import (
"fmt"
"time"
bstore "github.com/filecoin-project/lotus/blockstore"
)
func (s *SplitStore) gcHotstore() {
if err := s.gcBlockstoreOnline(s.hot); err != nil {
log.Warnf("error garbage collecting hostore: %s", err)
}
}
func (s *SplitStore) gcBlockstoreOnline(b bstore.Blockstore) error {
if gc, ok := b.(bstore.BlockstoreGC); ok {
log.Info("garbage collecting blockstore")
startGC := time.Now()
if err := gc.CollectGarbage(); err != nil {
return err
}
log.Infow("garbage collecting hotstore done", "took", time.Since(startGC))
return nil
}
return fmt.Errorf("blockstore doesn't support online gc: %T", b)
}

View File

@ -2,6 +2,7 @@ package splitstore
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -13,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/types/mock"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
datastore "github.com/ipfs/go-datastore" datastore "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync" dssync "github.com/ipfs/go-datastore/sync"
@ -21,22 +23,34 @@ import (
func init() { func init() {
CompactionThreshold = 5 CompactionThreshold = 5
CompactionCold = 1
CompactionBoundary = 2 CompactionBoundary = 2
logging.SetLogLevel("splitstore", "DEBUG") logging.SetLogLevel("splitstore", "DEBUG")
} }
func testSplitStore(t *testing.T, cfg *Config) { func testSplitStore(t *testing.T, cfg *Config) {
chain := &mockChain{t: t} chain := &mockChain{t: t}
// genesis
genBlock := mock.MkBlock(nil, 0, 0)
genTs := mock.TipSet(genBlock)
chain.push(genTs)
// the myriads of stores // the myriads of stores
ds := dssync.MutexWrap(datastore.NewMapDatastore()) ds := dssync.MutexWrap(datastore.NewMapDatastore())
hot := blockstore.NewMemorySync() hot := newMockStore()
cold := blockstore.NewMemorySync() cold := newMockStore()
// this is necessary to avoid the garbage mock puts in the blocks
garbage := blocks.NewBlock([]byte{1, 2, 3})
err := cold.Put(garbage)
if err != nil {
t.Fatal(err)
}
// genesis
genBlock := mock.MkBlock(nil, 0, 0)
genBlock.Messages = garbage.Cid()
genBlock.ParentMessageReceipts = garbage.Cid()
genBlock.ParentStateRoot = garbage.Cid()
genBlock.Timestamp = uint64(time.Now().Unix())
genTs := mock.TipSet(genBlock)
chain.push(genTs)
// put the genesis block to cold store // put the genesis block to cold store
blk, err := genBlock.ToStorageBlock() blk, err := genBlock.ToStorageBlock()
@ -62,12 +76,22 @@ func testSplitStore(t *testing.T, cfg *Config) {
} }
// make some tipsets, but not enough to cause compaction // make some tipsets, but not enough to cause compaction
mkBlock := func(curTs *types.TipSet, i int) *types.TipSet { mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet {
blk := mock.MkBlock(curTs, uint64(i), uint64(i)) blk := mock.MkBlock(curTs, uint64(i), uint64(i))
blk.Messages = garbage.Cid()
blk.ParentMessageReceipts = garbage.Cid()
blk.ParentStateRoot = stateRoot.Cid()
blk.Timestamp = uint64(time.Now().Unix())
sblk, err := blk.ToStorageBlock() sblk, err := blk.ToStorageBlock()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
err = ss.Put(stateRoot)
if err != nil {
t.Fatal(err)
}
err = ss.Put(sblk) err = ss.Put(sblk)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -78,18 +102,6 @@ func testSplitStore(t *testing.T, cfg *Config) {
return ts return ts
} }
mkGarbageBlock := func(curTs *types.TipSet, i int) {
blk := mock.MkBlock(curTs, uint64(i), uint64(i))
sblk, err := blk.ToStorageBlock()
if err != nil {
t.Fatal(err)
}
err = ss.Put(sblk)
if err != nil {
t.Fatal(err)
}
}
waitForCompaction := func() { waitForCompaction := func() {
for atomic.LoadInt32(&ss.compacting) == 1 { for atomic.LoadInt32(&ss.compacting) == 1 {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
@ -98,105 +110,63 @@ func testSplitStore(t *testing.T, cfg *Config) {
curTs := genTs curTs := genTs
for i := 1; i < 5; i++ { for i := 1; i < 5; i++ {
curTs = mkBlock(curTs, i) stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
curTs = mkBlock(curTs, i, stateRoot)
waitForCompaction() waitForCompaction()
} }
mkGarbageBlock(genTs, 1)
// count objects in the cold and hot stores // count objects in the cold and hot stores
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
countBlocks := func(bs blockstore.Blockstore) int { countBlocks := func(bs blockstore.Blockstore) int {
count := 0 count := 0
ch, err := bs.AllKeysChan(ctx) _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error {
if err != nil {
t.Fatal(err)
}
for range ch {
count++ count++
} return nil
})
return count return count
} }
coldCnt := countBlocks(cold) coldCnt := countBlocks(cold)
hotCnt := countBlocks(hot) hotCnt := countBlocks(hot)
if coldCnt != 1 { if coldCnt != 2 {
t.Errorf("expected %d blocks, but got %d", 1, coldCnt) t.Errorf("expected %d blocks, but got %d", 2, coldCnt)
} }
if hotCnt != 5 { if hotCnt != 10 {
t.Errorf("expected %d blocks, but got %d", 5, hotCnt) t.Errorf("expected %d blocks, but got %d", 10, hotCnt)
} }
// trigger a compaction // trigger a compaction
for i := 5; i < 10; i++ { for i := 5; i < 10; i++ {
curTs = mkBlock(curTs, i) stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
curTs = mkBlock(curTs, i, stateRoot)
waitForCompaction() waitForCompaction()
} }
coldCnt = countBlocks(cold) coldCnt = countBlocks(cold)
hotCnt = countBlocks(hot) hotCnt = countBlocks(hot)
if !cfg.EnableFullCompaction { if coldCnt != 5 {
if coldCnt != 5 { t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt)
t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt)
}
if hotCnt != 5 {
t.Errorf("expected %d hot blocks, but got %d", 5, hotCnt)
}
} }
if cfg.EnableFullCompaction && !cfg.EnableGC { if hotCnt != 17 {
if coldCnt != 3 { t.Errorf("expected %d hot blocks, but got %d", 17, hotCnt)
t.Errorf("expected %d cold blocks, but got %d", 3, coldCnt)
}
if hotCnt != 7 {
t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt)
}
}
if cfg.EnableFullCompaction && cfg.EnableGC {
if coldCnt != 2 {
t.Errorf("expected %d cold blocks, but got %d", 2, coldCnt)
}
if hotCnt != 7 {
t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt)
}
} }
// Make sure we can revert without panicking. // Make sure we can revert without panicking.
chain.revert(2) chain.revert(2)
} }
func TestSplitStoreSimpleCompaction(t *testing.T) { func TestSplitStoreCompaction(t *testing.T) {
testSplitStore(t, &Config{TrackingStoreType: "mem"}) testSplitStore(t, &Config{MarkSetType: "map"})
}
func TestSplitStoreFullCompactionWithoutGC(t *testing.T) {
testSplitStore(t, &Config{
TrackingStoreType: "mem",
EnableFullCompaction: true,
})
}
func TestSplitStoreFullCompactionWithGC(t *testing.T) {
testSplitStore(t, &Config{
TrackingStoreType: "mem",
EnableFullCompaction: true,
EnableGC: true,
})
} }
type mockChain struct { type mockChain struct {
t testing.TB t testing.TB
sync.Mutex sync.Mutex
genesis *types.BlockHeader
tipsets []*types.TipSet tipsets []*types.TipSet
listener func(revert []*types.TipSet, apply []*types.TipSet) error listener func(revert []*types.TipSet, apply []*types.TipSet) error
} }
@ -204,6 +174,9 @@ type mockChain struct {
func (c *mockChain) push(ts *types.TipSet) { func (c *mockChain) push(ts *types.TipSet) {
c.Lock() c.Lock()
c.tipsets = append(c.tipsets, ts) c.tipsets = append(c.tipsets, ts)
if c.genesis == nil {
c.genesis = ts.Blocks()[0]
}
c.Unlock() c.Unlock()
if c.listener != nil { if c.listener != nil {
@ -242,7 +215,7 @@ func (c *mockChain) GetTipsetByHeight(_ context.Context, epoch abi.ChainEpoch, _
return nil, fmt.Errorf("bad epoch %d", epoch) return nil, fmt.Errorf("bad epoch %d", epoch)
} }
return c.tipsets[iEpoch-1], nil return c.tipsets[iEpoch], nil
} }
func (c *mockChain) GetHeaviestTipSet() *types.TipSet { func (c *mockChain) GetHeaviestTipSet() *types.TipSet {
@ -256,24 +229,105 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app
c.listener = change c.listener = change
} }
func (c *mockChain) WalkSnapshot(_ context.Context, ts *types.TipSet, epochs abi.ChainEpoch, _ bool, _ bool, f func(cid.Cid) error) error { type mockStore struct {
c.Lock() mx sync.Mutex
defer c.Unlock() set map[cid.Cid]blocks.Block
}
start := int(ts.Height()) - 1 func newMockStore() *mockStore {
end := start - int(epochs) return &mockStore{set: make(map[cid.Cid]blocks.Block)}
if end < 0 { }
end = -1
func (b *mockStore) Has(cid cid.Cid) (bool, error) {
b.mx.Lock()
defer b.mx.Unlock()
_, ok := b.set[cid]
return ok, nil
}
func (b *mockStore) HashOnRead(hor bool) {}
func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) {
b.mx.Lock()
defer b.mx.Unlock()
blk, ok := b.set[cid]
if !ok {
return nil, blockstore.ErrNotFound
} }
for i := start; i > end; i-- { return blk, nil
ts := c.tipsets[i] }
for _, cid := range ts.Cids() {
err := f(cid) func (b *mockStore) GetSize(cid cid.Cid) (int, error) {
if err != nil { blk, err := b.Get(cid)
return err if err != nil {
} return 0, err
}
} }
return len(blk.RawData()), nil
}
func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error {
blk, err := b.Get(cid)
if err != nil {
return err
}
return f(blk.RawData())
}
func (b *mockStore) Put(blk blocks.Block) error {
b.mx.Lock()
defer b.mx.Unlock()
b.set[blk.Cid()] = blk
return nil
}
func (b *mockStore) PutMany(blks []blocks.Block) error {
b.mx.Lock()
defer b.mx.Unlock()
for _, blk := range blks {
b.set[blk.Cid()] = blk
}
return nil
}
func (b *mockStore) DeleteBlock(cid cid.Cid) error {
b.mx.Lock()
defer b.mx.Unlock()
delete(b.set, cid)
return nil
}
func (b *mockStore) DeleteMany(cids []cid.Cid) error {
b.mx.Lock()
defer b.mx.Unlock()
for _, c := range cids {
delete(b.set, c)
}
return nil
}
func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, errors.New("not implemented")
}
func (b *mockStore) ForEachKey(f func(cid.Cid) error) error {
b.mx.Lock()
defer b.mx.Unlock()
for c := range b.set {
err := f(c)
if err != nil {
return err
}
}
return nil
}
func (b *mockStore) Close() error {
return nil return nil
} }

View File

@ -0,0 +1,67 @@
package splitstore
import (
"encoding/binary"
"golang.org/x/xerrors"
cid "github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
"github.com/filecoin-project/go-state-types/abi"
)
func epochToBytes(epoch abi.ChainEpoch) []byte {
return uint64ToBytes(uint64(epoch))
}
func bytesToEpoch(buf []byte) abi.ChainEpoch {
return abi.ChainEpoch(bytesToUint64(buf))
}
func int64ToBytes(i int64) []byte {
return uint64ToBytes(uint64(i))
}
func bytesToInt64(buf []byte) int64 {
return int64(bytesToUint64(buf))
}
func uint64ToBytes(i uint64) []byte {
buf := make([]byte, 16)
n := binary.PutUvarint(buf, i)
return buf[:n]
}
func bytesToUint64(buf []byte) uint64 {
i, _ := binary.Uvarint(buf)
return i
}
func isUnitaryObject(c cid.Cid) bool {
pre := c.Prefix()
switch pre.Codec {
case cid.FilCommitmentSealed, cid.FilCommitmentUnsealed:
return true
default:
return pre.MhType == mh.IDENTITY
}
}
func isIdentiyCid(c cid.Cid) bool {
return c.Prefix().MhType == mh.IDENTITY
}
func decodeIdentityCid(c cid.Cid) ([]byte, error) {
dmh, err := mh.Decode(c.Hash())
if err != nil {
return nil, xerrors.Errorf("error decoding identity cid %s: %w", c, err)
}
// sanity check
if dmh.Code != mh.IDENTITY {
return nil, xerrors.Errorf("error decoding identity cid %s: hash type is not identity", c)
}
return dmh.Digest, nil
}

View File

@ -0,0 +1,126 @@
package splitstore
import (
"sync/atomic"
"time"
"golang.org/x/xerrors"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/types"
)
// warmup acuiqres the compaction lock and spawns a goroutine to warm up the hotstore;
// this is necessary when we sync from a snapshot or when we enable the splitstore
// on top of an existing blockstore (which becomes the coldstore).
func (s *SplitStore) warmup(curTs *types.TipSet) error {
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
return xerrors.Errorf("error locking compaction")
}
go func() {
defer atomic.StoreInt32(&s.compacting, 0)
log.Info("warming up hotstore")
start := time.Now()
err := s.doWarmup(curTs)
if err != nil {
log.Errorf("error warming up hotstore: %s", err)
return
}
log.Infow("warm up done", "took", time.Since(start))
}()
return nil
}
// the actual warmup procedure; it walks the chain loading all state roots at the boundary
// and headers all the way up to genesis.
// objects are written in batches so as to minimize overhead.
func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
epoch := curTs.Height()
batchHot := make([]blocks.Block, 0, batchSize)
count := int64(0)
xcount := int64(0)
missing := int64(0)
err := s.walkChain(curTs, epoch, false,
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk
}
count++
has, err := s.hot.Has(c)
if err != nil {
return err
}
if has {
return nil
}
blk, err := s.cold.Get(c)
if err != nil {
if err == bstore.ErrNotFound {
missing++
return nil
}
return err
}
xcount++
batchHot = append(batchHot, blk)
if len(batchHot) == batchSize {
err = s.hot.PutMany(batchHot)
if err != nil {
return err
}
batchHot = batchHot[:0]
}
return nil
})
if err != nil {
return err
}
if len(batchHot) > 0 {
err = s.hot.PutMany(batchHot)
if err != nil {
return err
}
}
log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing)
s.markSetSize = count + count>>2 // overestimate a bit
err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize))
if err != nil {
log.Warnf("error saving mark set size: %s", err)
}
// save the warmup epoch
err = s.ds.Put(warmupEpochKey, epochToBytes(epoch))
if err != nil {
return xerrors.Errorf("error saving warm up epoch: %w", err)
}
s.mx.Lock()
s.warmupEpoch = epoch
s.mx.Unlock()
// also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex))
if err != nil {
return xerrors.Errorf("error saving compaction index: %w", err)
}
return nil
}

View File

@ -1,109 +0,0 @@
package splitstore
import (
"path/filepath"
"sync"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
cid "github.com/ipfs/go-cid"
)
// TrackingStore is a persistent store that tracks blocks that are added
// to the hotstore, tracking the epoch at which they are written.
type TrackingStore interface {
Put(cid.Cid, abi.ChainEpoch) error
PutBatch([]cid.Cid, abi.ChainEpoch) error
Get(cid.Cid) (abi.ChainEpoch, error)
Delete(cid.Cid) error
DeleteBatch([]cid.Cid) error
ForEach(func(cid.Cid, abi.ChainEpoch) error) error
Sync() error
Close() error
}
// OpenTrackingStore opens a tracking store of the specified type in the
// specified path.
func OpenTrackingStore(path string, ttype string) (TrackingStore, error) {
switch ttype {
case "", "bolt":
return OpenBoltTrackingStore(filepath.Join(path, "tracker.bolt"))
case "mem":
return NewMemTrackingStore(), nil
default:
return nil, xerrors.Errorf("unknown tracking store type %s", ttype)
}
}
// NewMemTrackingStore creates an in-memory tracking store.
// This is only useful for test or situations where you don't want to open the
// real tracking store (eg concurrent read only access on a node's datastore)
func NewMemTrackingStore() *MemTrackingStore {
return &MemTrackingStore{tab: make(map[cid.Cid]abi.ChainEpoch)}
}
// MemTrackingStore is a simple in-memory tracking store
type MemTrackingStore struct {
sync.Mutex
tab map[cid.Cid]abi.ChainEpoch
}
var _ TrackingStore = (*MemTrackingStore)(nil)
func (s *MemTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error {
s.Lock()
defer s.Unlock()
s.tab[cid] = epoch
return nil
}
func (s *MemTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error {
s.Lock()
defer s.Unlock()
for _, cid := range cids {
s.tab[cid] = epoch
}
return nil
}
func (s *MemTrackingStore) Get(cid cid.Cid) (abi.ChainEpoch, error) {
s.Lock()
defer s.Unlock()
epoch, ok := s.tab[cid]
if ok {
return epoch, nil
}
return 0, xerrors.Errorf("missing tracking epoch for %s", cid)
}
func (s *MemTrackingStore) Delete(cid cid.Cid) error {
s.Lock()
defer s.Unlock()
delete(s.tab, cid)
return nil
}
func (s *MemTrackingStore) DeleteBatch(cids []cid.Cid) error {
s.Lock()
defer s.Unlock()
for _, cid := range cids {
delete(s.tab, cid)
}
return nil
}
func (s *MemTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error {
s.Lock()
defer s.Unlock()
for cid, epoch := range s.tab {
err := f(cid, epoch)
if err != nil {
return err
}
}
return nil
}
func (s *MemTrackingStore) Sync() error { return nil }
func (s *MemTrackingStore) Close() error { return nil }

View File

@ -1,120 +0,0 @@
package splitstore
import (
"time"
"golang.org/x/xerrors"
cid "github.com/ipfs/go-cid"
bolt "go.etcd.io/bbolt"
"github.com/filecoin-project/go-state-types/abi"
)
type BoltTrackingStore struct {
db *bolt.DB
bucketId []byte
}
var _ TrackingStore = (*BoltTrackingStore)(nil)
func OpenBoltTrackingStore(path string) (*BoltTrackingStore, error) {
opts := &bolt.Options{
Timeout: 1 * time.Second,
NoSync: true,
}
db, err := bolt.Open(path, 0644, opts)
if err != nil {
return nil, err
}
bucketId := []byte("tracker")
err = db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucketId)
if err != nil {
return xerrors.Errorf("error creating bolt db bucket %s: %w", string(bucketId), err)
}
return nil
})
if err != nil {
_ = db.Close()
return nil, err
}
return &BoltTrackingStore{db: db, bucketId: bucketId}, nil
}
func (s *BoltTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error {
val := epochToBytes(epoch)
return s.db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
return b.Put(cid.Hash(), val)
})
}
func (s *BoltTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error {
val := epochToBytes(epoch)
return s.db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
for _, cid := range cids {
err := b.Put(cid.Hash(), val)
if err != nil {
return err
}
}
return nil
})
}
func (s *BoltTrackingStore) Get(cid cid.Cid) (epoch abi.ChainEpoch, err error) {
err = s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
val := b.Get(cid.Hash())
if val == nil {
return xerrors.Errorf("missing tracking epoch for %s", cid)
}
epoch = bytesToEpoch(val)
return nil
})
return epoch, err
}
func (s *BoltTrackingStore) Delete(cid cid.Cid) error {
return s.db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
return b.Delete(cid.Hash())
})
}
func (s *BoltTrackingStore) DeleteBatch(cids []cid.Cid) error {
return s.db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
for _, cid := range cids {
err := b.Delete(cid.Hash())
if err != nil {
return xerrors.Errorf("error deleting %s", cid)
}
}
return nil
})
}
func (s *BoltTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error {
return s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(s.bucketId)
return b.ForEach(func(k, v []byte) error {
cid := cid.NewCidV1(cid.Raw, k)
epoch := bytesToEpoch(v)
return f(cid, epoch)
})
})
}
func (s *BoltTrackingStore) Sync() error {
return s.db.Sync()
}
func (s *BoltTrackingStore) Close() error {
return s.db.Close()
}

View File

@ -1,130 +0,0 @@
package splitstore
import (
"io/ioutil"
"testing"
cid "github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/filecoin-project/go-state-types/abi"
)
func TestBoltTrackingStore(t *testing.T) {
testTrackingStore(t, "bolt")
}
func testTrackingStore(t *testing.T, tsType string) {
t.Helper()
makeCid := func(key string) cid.Cid {
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
if err != nil {
t.Fatal(err)
}
return cid.NewCidV1(cid.Raw, h)
}
mustHave := func(s TrackingStore, cid cid.Cid, epoch abi.ChainEpoch) {
val, err := s.Get(cid)
if err != nil {
t.Fatal(err)
}
if val != epoch {
t.Fatal("epoch mismatch")
}
}
mustNotHave := func(s TrackingStore, cid cid.Cid) {
_, err := s.Get(cid)
if err == nil {
t.Fatal("expected error")
}
}
path, err := ioutil.TempDir("", "snoop-test.*")
if err != nil {
t.Fatal(err)
}
s, err := OpenTrackingStore(path, tsType)
if err != nil {
t.Fatal(err)
}
k1 := makeCid("a")
k2 := makeCid("b")
k3 := makeCid("c")
k4 := makeCid("d")
s.Put(k1, 1) //nolint
s.Put(k2, 2) //nolint
s.Put(k3, 3) //nolint
s.Put(k4, 4) //nolint
mustHave(s, k1, 1)
mustHave(s, k2, 2)
mustHave(s, k3, 3)
mustHave(s, k4, 4)
s.Delete(k1) // nolint
s.Delete(k2) // nolint
mustNotHave(s, k1)
mustNotHave(s, k2)
mustHave(s, k3, 3)
mustHave(s, k4, 4)
s.PutBatch([]cid.Cid{k1}, 1) //nolint
s.PutBatch([]cid.Cid{k2}, 2) //nolint
mustHave(s, k1, 1)
mustHave(s, k2, 2)
mustHave(s, k3, 3)
mustHave(s, k4, 4)
allKeys := map[string]struct{}{
k1.String(): {},
k2.String(): {},
k3.String(): {},
k4.String(): {},
}
err = s.ForEach(func(k cid.Cid, _ abi.ChainEpoch) error {
_, ok := allKeys[k.String()]
if !ok {
t.Fatal("unexpected key")
}
delete(allKeys, k.String())
return nil
})
if err != nil {
t.Fatal(err)
}
if len(allKeys) != 0 {
t.Fatal("not all keys were returned")
}
// no close and reopen and ensure the keys still exist
err = s.Close()
if err != nil {
t.Fatal(err)
}
s, err = OpenTrackingStore(path, tsType)
if err != nil {
t.Fatal(err)
}
mustHave(s, k1, 1)
mustHave(s, k2, 2)
mustHave(s, k3, 3)
mustHave(s, k4, 4)
s.Close() //nolint:errcheck
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -297,22 +297,26 @@ func TestForkRefuseCall(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
pts, err := cg.ChainStore().LoadTipSet(ts.TipSet.TipSet().Parents())
require.NoError(t, err)
parentHeight := pts.Height()
currentHeight := ts.TipSet.TipSet().Height()
// CallWithGas calls _at_ the current tipset.
ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet()) ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet())
switch ts.TipSet.TipSet().Height() { if parentHeight <= testForkHeight && currentHeight >= testForkHeight {
case testForkHeight, testForkHeight + 1:
// If I had a fork, or I _will_ have a fork, it should fail. // If I had a fork, or I _will_ have a fork, it should fail.
require.Equal(t, ErrExpensiveFork, err) require.Equal(t, ErrExpensiveFork, err)
default: } else {
require.NoError(t, err) require.NoError(t, err)
require.True(t, ret.MsgRct.ExitCode.IsSuccess()) require.True(t, ret.MsgRct.ExitCode.IsSuccess())
} }
// Call just runs on the parent state for a tipset, so we only
// expect an error at the fork height. // Call always applies the message to the "next block" after the tipset's parent state.
ret, err = sm.Call(ctx, m, ts.TipSet.TipSet()) ret, err = sm.Call(ctx, m, ts.TipSet.TipSet())
switch ts.TipSet.TipSet().Height() { if parentHeight == testForkHeight {
case testForkHeight + 1:
require.Equal(t, ErrExpensiveFork, err) require.Equal(t, ErrExpensiveFork, err)
default: } else {
require.NoError(t, err) require.NoError(t, err)
require.True(t, ret.MsgRct.ExitCode.IsSuccess()) require.True(t, ret.MsgRct.ExitCode.IsSuccess())
} }

View File

@ -279,7 +279,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) {
stop, err := node.New(tu.ctx, stop, err := node.New(tu.ctx,
node.FullAPI(&out), node.FullAPI(&out),
node.Online(), node.Base(),
node.Repo(sourceRepo), node.Repo(sourceRepo),
node.MockHost(tu.mn), node.MockHost(tu.mn),
node.Test(), node.Test(),
@ -310,10 +310,11 @@ func (tu *syncTestUtil) addClientNode() int {
var out api.FullNode var out api.FullNode
r := repo.NewMemory(nil)
stop, err := node.New(tu.ctx, stop, err := node.New(tu.ctx,
node.FullAPI(&out), node.FullAPI(&out),
node.Online(), node.Base(),
node.Repo(repo.NewMemory(nil)), node.Repo(r),
node.MockHost(tu.mn), node.MockHost(tu.mn),
node.Test(), node.Test(),

View File

@ -45,7 +45,6 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/lib/tablewriter" "github.com/filecoin-project/lotus/lib/tablewriter"
) )
@ -1234,7 +1233,6 @@ var clientListRetrievalsCmd = &cli.Command{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Usage: "use color in display output", Usage: "use color in display output",
Value: cliutil.DefaultColorUse,
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
&cli.BoolFlag{ &cli.BoolFlag{
@ -1252,6 +1250,10 @@ var clientListRetrievalsCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -1260,7 +1262,6 @@ var clientListRetrievalsCmd = &cli.Command{
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
verbose := cctx.Bool("verbose") verbose := cctx.Bool("verbose")
color := cctx.Bool("color")
watch := cctx.Bool("watch") watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed") showFailed := cctx.Bool("show-failed")
completed := cctx.Bool("completed") completed := cctx.Bool("completed")
@ -1280,7 +1281,7 @@ var clientListRetrievalsCmd = &cli.Command{
tm.Clear() tm.Clear()
tm.MoveCursor(1, 1) tm.MoveCursor(1, 1)
err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, color, showFailed, completed) err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, showFailed, completed)
if err != nil { if err != nil {
return err return err
} }
@ -1306,7 +1307,7 @@ var clientListRetrievalsCmd = &cli.Command{
} }
} }
return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, color, showFailed, completed) return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, showFailed, completed)
}, },
} }
@ -1314,7 +1315,7 @@ func isTerminalError(status retrievalmarket.DealStatus) bool {
// should patch this in go-fil-markets but to solve the problem immediate and not have buggy output // should patch this in go-fil-markets but to solve the problem immediate and not have buggy output
return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled
} }
func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, color bool, showFailed bool, completed bool) error { func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, showFailed bool, completed bool) error {
var deals []api.RetrievalInfo var deals []api.RetrievalInfo
for _, deal := range localDeals { for _, deal := range localDeals {
if !showFailed && isTerminalError(deal.Status) { if !showFailed && isTerminalError(deal.Status) {
@ -1350,13 +1351,13 @@ func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.
w := tablewriter.New(tableColumns...) w := tablewriter.New(tableColumns...)
for _, d := range deals { for _, d := range deals {
w.Write(toRetrievalOutput(d, color, verbose)) w.Write(toRetrievalOutput(d, verbose))
} }
return w.Flush(out) return w.Flush(out)
} }
func toRetrievalOutput(d api.RetrievalInfo, color bool, verbose bool) map[string]interface{} { func toRetrievalOutput(d api.RetrievalInfo, verbose bool) map[string]interface{} {
payloadCID := d.PayloadCID.String() payloadCID := d.PayloadCID.String()
provider := d.Provider.String() provider := d.Provider.String()
@ -1369,7 +1370,7 @@ func toRetrievalOutput(d api.RetrievalInfo, color bool, verbose bool) map[string
"PayloadCID": payloadCID, "PayloadCID": payloadCID,
"DealId": d.ID, "DealId": d.ID,
"Provider": provider, "Provider": provider,
"Status": retrievalStatusString(color, d.Status), "Status": retrievalStatusString(d.Status),
"PricePerByte": types.FIL(d.PricePerByte), "PricePerByte": types.FIL(d.PricePerByte),
"Received": units.BytesSize(float64(d.BytesReceived)), "Received": units.BytesSize(float64(d.BytesReceived)),
"TotalPaid": types.FIL(d.TotalPaid), "TotalPaid": types.FIL(d.TotalPaid),
@ -1399,19 +1400,17 @@ func toRetrievalOutput(d api.RetrievalInfo, color bool, verbose bool) map[string
return retrievalOutput return retrievalOutput
} }
func retrievalStatusString(c bool, status retrievalmarket.DealStatus) string { func retrievalStatusString(status retrievalmarket.DealStatus) string {
s := retrievalmarket.DealStatuses[status] s := retrievalmarket.DealStatuses[status]
if !c {
switch {
case isTerminalError(status):
return color.RedString(s)
case retrievalmarket.IsTerminalSuccess(status):
return color.GreenString(s)
default:
return s return s
} }
if isTerminalError(status) {
return color.RedString(s)
}
if retrievalmarket.IsTerminalSuccess(status) {
return color.GreenString(s)
}
return s
} }
var clientInspectDealCmd = &cli.Command{ var clientInspectDealCmd = &cli.Command{
@ -1808,7 +1807,6 @@ var clientListDeals = &cli.Command{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Usage: "use color in display output", Usage: "use color in display output",
Value: cliutil.DefaultColorUse,
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
&cli.BoolFlag{ &cli.BoolFlag{
@ -1821,6 +1819,10 @@ var clientListDeals = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -1829,7 +1831,6 @@ var clientListDeals = &cli.Command{
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
verbose := cctx.Bool("verbose") verbose := cctx.Bool("verbose")
color := cctx.Bool("color")
watch := cctx.Bool("watch") watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed") showFailed := cctx.Bool("show-failed")
@ -1848,7 +1849,7 @@ var clientListDeals = &cli.Command{
tm.Clear() tm.Clear()
tm.MoveCursor(1, 1) tm.MoveCursor(1, 1)
err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color, showFailed) err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, showFailed)
if err != nil { if err != nil {
return err return err
} }
@ -1874,7 +1875,7 @@ var clientListDeals = &cli.Command{
} }
} }
return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, color, showFailed) return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, showFailed)
}, },
} }
@ -1897,7 +1898,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS
} }
} }
func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, color bool, showFailed bool) error { func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, showFailed bool) error {
sort.Slice(localDeals, func(i, j int) bool { sort.Slice(localDeals, func(i, j int) bool {
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime) return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
}) })
@ -1949,7 +1950,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode,
d.LocalDeal.ProposalCid, d.LocalDeal.ProposalCid,
d.LocalDeal.DealID, d.LocalDeal.DealID,
d.LocalDeal.Provider, d.LocalDeal.Provider,
dealStateString(color, d.LocalDeal.State), dealStateString(d.LocalDeal.State),
onChain, onChain,
slashed, slashed,
d.LocalDeal.PieceCID, d.LocalDeal.PieceCID,
@ -1998,7 +1999,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode,
"DealCid": propcid, "DealCid": propcid,
"DealId": d.LocalDeal.DealID, "DealId": d.LocalDeal.DealID,
"Provider": d.LocalDeal.Provider, "Provider": d.LocalDeal.Provider,
"State": dealStateString(color, d.LocalDeal.State), "State": dealStateString(d.LocalDeal.State),
"On Chain?": onChain, "On Chain?": onChain,
"Slashed?": slashed, "Slashed?": slashed,
"PieceCID": piece, "PieceCID": piece,
@ -2013,12 +2014,8 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode,
return w.Flush(out) return w.Flush(out)
} }
func dealStateString(c bool, state storagemarket.StorageDealStatus) string { func dealStateString(state storagemarket.StorageDealStatus) string {
s := storagemarket.DealStates[state] s := storagemarket.DealStates[state]
if !c {
return s
}
switch state { switch state {
case storagemarket.StorageDealError, storagemarket.StorageDealExpired: case storagemarket.StorageDealError, storagemarket.StorageDealExpired:
return color.RedString(s) return color.RedString(s)
@ -2339,7 +2336,6 @@ var clientListTransfers = &cli.Command{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Usage: "use color in display output", Usage: "use color in display output",
Value: cliutil.DefaultColorUse,
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
&cli.BoolFlag{ &cli.BoolFlag{
@ -2356,6 +2352,10 @@ var clientListTransfers = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -2370,7 +2370,6 @@ var clientListTransfers = &cli.Command{
verbose := cctx.Bool("verbose") verbose := cctx.Bool("verbose")
completed := cctx.Bool("completed") completed := cctx.Bool("completed")
color := cctx.Bool("color")
watch := cctx.Bool("watch") watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed") showFailed := cctx.Bool("show-failed")
if watch { if watch {
@ -2384,7 +2383,7 @@ var clientListTransfers = &cli.Command{
tm.MoveCursor(1, 1) tm.MoveCursor(1, 1)
OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed)
tm.Flush() tm.Flush()
@ -2409,13 +2408,13 @@ var clientListTransfers = &cli.Command{
} }
} }
} }
OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed)
return nil return nil
}, },
} }
// OutputDataTransferChannels generates table output for a list of channels // OutputDataTransferChannels generates table output for a list of channels
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, color, showFailed bool) { func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, showFailed bool) {
sort.Slice(channels, func(i, j int) bool { sort.Slice(channels, func(i, j int) bool {
return channels[i].TransferID < channels[j].TransferID return channels[i].TransferID < channels[j].TransferID
}) })
@ -2445,7 +2444,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
tablewriter.Col("Voucher"), tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message")) tablewriter.NewLineCol("Message"))
for _, channel := range sendingChannels { for _, channel := range sendingChannels {
w.Write(toChannelOutput(color, "Sending To", channel, verbose)) w.Write(toChannelOutput("Sending To", channel, verbose))
} }
w.Flush(out) //nolint:errcheck w.Flush(out) //nolint:errcheck
@ -2459,17 +2458,13 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
tablewriter.Col("Voucher"), tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message")) tablewriter.NewLineCol("Message"))
for _, channel := range receivingChannels { for _, channel := range receivingChannels {
w.Write(toChannelOutput(color, "Receiving From", channel, verbose)) w.Write(toChannelOutput("Receiving From", channel, verbose))
} }
w.Flush(out) //nolint:errcheck w.Flush(out) //nolint:errcheck
} }
func channelStatusString(useColor bool, status datatransfer.Status) string { func channelStatusString(status datatransfer.Status) string {
s := datatransfer.Statuses[status] s := datatransfer.Statuses[status]
if !useColor {
return s
}
switch status { switch status {
case datatransfer.Failed, datatransfer.Cancelled: case datatransfer.Failed, datatransfer.Cancelled:
return color.RedString(s) return color.RedString(s)
@ -2480,7 +2475,7 @@ func channelStatusString(useColor bool, status datatransfer.Status) string {
} }
} }
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { func toChannelOutput(otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} {
rootCid := channel.BaseCID.String() rootCid := channel.BaseCID.String()
otherParty := channel.OtherPeer.String() otherParty := channel.OtherPeer.String()
if !verbose { if !verbose {
@ -2500,7 +2495,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
return map[string]interface{}{ return map[string]interface{}{
"ID": channel.TransferID, "ID": channel.TransferID,
"Status": channelStatusString(useColor, channel.Status), "Status": channelStatusString(channel.Status),
otherPartyColumn: otherParty, otherPartyColumn: otherParty,
"Root Cid": rootCid, "Root Cid": rootCid,
"Initiated?": initiated, "Initiated?": initiated,

View File

@ -3,10 +3,13 @@ package cli
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"time" "time"
"github.com/fatih/color"
"github.com/hako/durafmt" "github.com/hako/durafmt"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/mattn/go-isatty"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
@ -15,6 +18,13 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
// Set the global default, to be overridden by individual cli flags in order
func init() {
color.NoColor = os.Getenv("GOLOG_LOG_FMT") != "color" &&
!isatty.IsTerminal(os.Stdout.Fd()) &&
!isatty.IsCygwinTerminal(os.Stdout.Fd())
}
func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types.TipSet, error) { func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types.TipSet, error) {
var headers []*types.BlockHeader var headers []*types.BlockHeader
for _, c := range vals { for _, c := range vals {

View File

@ -1,14 +0,0 @@
package cliutil
import (
"os"
"github.com/mattn/go-isatty"
)
// DefaultColorUse is the globally referenced variable for all Lotus CLI tools
// It sets to `true` if STDOUT is a TTY or if the variable GOLOG_LOG_FMT is set
// to color (as recognizd by github.com/ipfs/go-log/v2)
var DefaultColorUse = os.Getenv("GOLOG_LOG_FMT") == "color" ||
isatty.IsTerminal(os.Stdout.Fd()) ||
isatty.IsCygwinTerminal(os.Stdout.Fd())

View File

@ -20,7 +20,6 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/lib/tablewriter" "github.com/filecoin-project/lotus/lib/tablewriter"
) )
@ -267,12 +266,14 @@ var actorControlList = &cli.Command{
}, },
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Value: cliutil.DefaultColorUse, Usage: "use color in display output",
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color") if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
var maddr address.Address var maddr address.Address
if act := cctx.String("actor"); act != "" { if act := cctx.String("actor"); act != "" {

View File

@ -1,10 +1,16 @@
package main package main
import ( import (
"context"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"math/rand" "math/rand"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@ -18,6 +24,7 @@ var electionCmd = &cli.Command{
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
electionRunDummy, electionRunDummy,
electionEstimate, electionEstimate,
electionBacktest,
}, },
} }
@ -124,3 +131,97 @@ var electionEstimate = &cli.Command{
return nil return nil
}, },
} }
var electionBacktest = &cli.Command{
Name: "backtest",
Usage: "Backtest elections with given miner",
ArgsUsage: "[minerAddress]",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "height",
Usage: "blockchain head height",
},
&cli.IntFlag{
Name: "count",
Usage: "number of won elections to look for",
Value: 120,
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return xerrors.Errorf("GetFullNodeAPI: %w", err)
}
defer closer()
ctx := lcli.ReqContext(cctx)
var head *types.TipSet
if cctx.IsSet("height") {
head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("height")), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("ChainGetTipSetByHeight: %w", err)
}
} else {
head, err = api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("ChainHead: %w", err)
}
}
miner, err := address.NewFromString(cctx.Args().First())
if err != nil {
return xerrors.Errorf("miner address: %w", err)
}
count := cctx.Int("count")
if count < 1 {
return xerrors.Errorf("count: %d", count)
}
fmt.Println("height, winCount")
roundEnd := head.Height() + abi.ChainEpoch(1)
for i := 0; i < count; {
for round := head.Height() + abi.ChainEpoch(1); round <= roundEnd; round++ {
i++
win, err := backTestWinner(ctx, miner, round, head, api)
if err == nil && win != nil {
fmt.Printf("%d, %d\n", round, win.WinCount)
}
}
roundEnd = head.Height()
head, err = api.ChainGetTipSet(ctx, head.Parents())
if err != nil {
break
}
}
return nil
},
}
func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainEpoch, ts *types.TipSet, api v0api.FullNode) (*types.ElectionProof, error) {
mbi, err := api.MinerGetBaseInfo(ctx, miner, round, ts.Key())
if err != nil {
return nil, xerrors.Errorf("failed to get mining base info: %w", err)
}
if mbi == nil {
return nil, nil
}
if !mbi.EligibleForMining {
return nil, nil
}
brand := mbi.PrevBeaconEntry
bvals := mbi.BeaconEntries
if len(bvals) > 0 {
brand = bvals[len(bvals)-1]
}
winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api)
if err != nil {
return nil, xerrors.Errorf("failed to check if we win next round: %w", err)
}
return winner, nil
}

View File

@ -2,10 +2,12 @@ package main
import ( import (
"encoding/json" "encoding/json"
corebig "math/big"
"os" "os"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
filbig "github.com/filecoin-project/go-state-types/big"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -21,13 +23,16 @@ type networkTotalsOutput struct {
} }
type networkTotals struct { type networkTotals struct {
UniqueCids int `json:"total_unique_cids"` QaNetworkPower filbig.Int `json:"total_qa_power"`
UniqueProviders int `json:"total_unique_providers"` RawNetworkPower filbig.Int `json:"total_raw_capacity"`
UniqueClients int `json:"total_unique_clients"` CapacityCarryingData float64 `json:"capacity_fraction_carrying_data"`
TotalDeals int `json:"total_num_deals"` UniqueCids int `json:"total_unique_cids"`
TotalBytes int64 `json:"total_stored_data_size"` UniqueProviders int `json:"total_unique_providers"`
FilplusTotalDeals int `json:"filplus_total_num_deals"` UniqueClients int `json:"total_unique_clients"`
FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` TotalDeals int `json:"total_num_deals"`
TotalBytes int64 `json:"total_stored_data_size"`
FilplusTotalDeals int `json:"filplus_total_num_deals"`
FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"`
seenClient map[address.Address]bool seenClient map[address.Address]bool
seenProvider map[address.Address]bool seenProvider map[address.Address]bool
@ -66,10 +71,17 @@ var storageStatsCmd = &cli.Command{
return err return err
} }
power, err := api.StateMinerPower(ctx, address.Address{}, head.Key())
if err != nil {
return err
}
netTotals := networkTotals{ netTotals := networkTotals{
seenClient: make(map[address.Address]bool), QaNetworkPower: power.TotalPower.QualityAdjPower,
seenProvider: make(map[address.Address]bool), RawNetworkPower: power.TotalPower.RawBytePower,
seenPieceCid: make(map[cid.Cid]bool), seenClient: make(map[address.Address]bool),
seenProvider: make(map[address.Address]bool),
seenPieceCid: make(map[cid.Cid]bool),
} }
deals, err := api.StateMarketDeals(ctx, head.Key()) deals, err := api.StateMarketDeals(ctx, head.Key())
@ -103,6 +115,11 @@ var storageStatsCmd = &cli.Command{
netTotals.UniqueClients = len(netTotals.seenClient) netTotals.UniqueClients = len(netTotals.seenClient)
netTotals.UniqueProviders = len(netTotals.seenProvider) netTotals.UniqueProviders = len(netTotals.seenProvider)
netTotals.CapacityCarryingData, _ = new(corebig.Rat).SetFrac(
corebig.NewInt(netTotals.TotalBytes),
netTotals.RawNetworkPower.Int,
).Float64()
return json.NewEncoder(os.Stdout).Encode( return json.NewEncoder(os.Stdout).Encode(
networkTotalsOutput{ networkTotalsOutput{
Epoch: int64(head.Height()), Epoch: int64(head.Height()),

View File

@ -26,7 +26,6 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/lib/tablewriter" "github.com/filecoin-project/lotus/lib/tablewriter"
) )
@ -390,12 +389,14 @@ var actorControlList = &cli.Command{
}, },
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Value: cliutil.DefaultColorUse, Usage: "use color in display output",
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color") if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {

View File

@ -49,7 +49,7 @@ func TestMinerAllInfo(t *testing.T) {
t.Run("pre-info-all", run) t.Run("pre-info-all", run)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6}) deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
kit.AssertFilesEqual(t, inPath, outPath) kit.AssertFilesEqual(t, inPath, outPath)

View File

@ -5,7 +5,10 @@ import (
"fmt" "fmt"
"math" "math"
corebig "math/big" corebig "math/big"
"os"
"sort" "sort"
"strings"
"text/tabwriter"
"time" "time"
"github.com/fatih/color" "github.com/fatih/color"
@ -14,6 +17,7 @@ import (
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
@ -45,8 +49,6 @@ var infoCmd = &cli.Command{
} }
func infoCmdAct(cctx *cli.Context) error { func infoCmdAct(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color")
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -222,29 +224,89 @@ func infoCmdAct(cctx *cli.Context) error {
return err return err
} }
var nactiveDeals, nVerifDeals, ndeals uint64 type dealStat struct {
var activeDealBytes, activeVerifDealBytes, dealBytes abi.PaddedPieceSize count, verifCount int
for _, deal := range deals { bytes, verifBytes uint64
if deal.State == storagemarket.StorageDealError { }
continue dsAdd := func(ds *dealStat, deal storagemarket.MinerDeal) {
} ds.count++
ds.bytes += uint64(deal.Proposal.PieceSize)
ndeals++ if deal.Proposal.VerifiedDeal {
dealBytes += deal.Proposal.PieceSize ds.verifCount++
ds.verifBytes += uint64(deal.Proposal.PieceSize)
if deal.State == storagemarket.StorageDealActive {
nactiveDeals++
activeDealBytes += deal.Proposal.PieceSize
if deal.Proposal.VerifiedDeal {
nVerifDeals++
activeVerifDealBytes += deal.Proposal.PieceSize
}
} }
} }
fmt.Printf("Deals: %d, %s\n", ndeals, types.SizeStr(types.NewInt(uint64(dealBytes)))) showDealStates := map[storagemarket.StorageDealStatus]struct{}{
fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes)))) storagemarket.StorageDealActive: {},
storagemarket.StorageDealTransferring: {},
storagemarket.StorageDealStaged: {},
storagemarket.StorageDealAwaitingPreCommit: {},
storagemarket.StorageDealSealing: {},
storagemarket.StorageDealPublish: {},
storagemarket.StorageDealCheckForAcceptance: {},
storagemarket.StorageDealPublishing: {},
}
var total dealStat
perState := map[storagemarket.StorageDealStatus]*dealStat{}
for _, deal := range deals {
if _, ok := showDealStates[deal.State]; !ok {
continue
}
if perState[deal.State] == nil {
perState[deal.State] = new(dealStat)
}
dsAdd(&total, deal)
dsAdd(perState[deal.State], deal)
}
type wstr struct {
str string
status storagemarket.StorageDealStatus
}
sorted := make([]wstr, 0, len(perState))
for status, stat := range perState {
st := strings.TrimPrefix(storagemarket.DealStates[status], "StorageDeal")
sorted = append(sorted, wstr{
str: fmt.Sprintf(" %s:\t%d\t\t%s\t(Verified: %d\t%s)\n", st, stat.count, types.SizeStr(types.NewInt(stat.bytes)), stat.verifCount, types.SizeStr(types.NewInt(stat.verifBytes))),
status: status,
},
)
}
sort.Slice(sorted, func(i, j int) bool {
if sorted[i].status == storagemarket.StorageDealActive || sorted[j].status == storagemarket.StorageDealActive {
return sorted[i].status == storagemarket.StorageDealActive
}
return sorted[i].status > sorted[j].status
})
fmt.Printf("Storage Deals: %d, %s\n", total.count, types.SizeStr(types.NewInt(total.bytes)))
tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0)
for _, e := range sorted {
_, _ = tw.Write([]byte(e.str))
}
_ = tw.Flush()
fmt.Println()
retrievals, err := nodeApi.MarketListRetrievalDeals(ctx)
if err != nil {
return xerrors.Errorf("getting retrieval deal list: %w", err)
}
var retrComplete dealStat
for _, retrieval := range retrievals {
if retrieval.Status == retrievalmarket.DealStatusCompleted {
retrComplete.count++
retrComplete.bytes += retrieval.TotalSent
}
}
fmt.Printf("Retrieval Deals (complete): %d, %s\n", retrComplete.count, types.SizeStr(types.NewInt(retrComplete.bytes)))
fmt.Println() fmt.Println()
spendable := big.Zero() spendable := big.Zero()

View File

@ -121,7 +121,8 @@ var initCmd = &cli.Command{
}, },
}, },
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
initRestoreCmd, restoreCmd,
serviceCmd,
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
log.Info("Initializing lotus miner") log.Info("Initializing lotus miner")
@ -317,10 +318,10 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string
Size: abi.PaddedPieceSize(meta.SectorSize), Size: abi.PaddedPieceSize(meta.SectorSize),
PieceCID: commD, PieceCID: commD,
}, },
DealInfo: &sealing.DealInfo{ DealInfo: &lapi.PieceDealInfo{
DealID: dealID, DealID: dealID,
DealProposal: &sector.Deal, DealProposal: &sector.Deal,
DealSchedule: sealing.DealSchedule{ DealSchedule: lapi.DealSchedule{
StartEpoch: sector.Deal.StartEpoch, StartEpoch: sector.Deal.StartEpoch,
EndEpoch: sector.Deal.EndEpoch, EndEpoch: sector.Deal.EndEpoch,
}, },
@ -470,7 +471,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
AllowCommit: true, AllowCommit: true,
AllowUnseal: true, AllowUnseal: true,
}, wsts, smsts) }, wsts, smsts)
if err != nil { if err != nil {
return err return err
} }
@ -734,6 +734,8 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID,
return retval.IDAddress, nil return retval.IDAddress, nil
} }
// checkV1ApiSupport uses v0 api version to signal support for v1 API
// trying to query the v1 api on older lotus versions would get a 404, which can happen for any number of other reasons
func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error { func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error {
// check v0 api version to make sure it supports v1 api // check v0 api version to make sure it supports v1 api
api0, closer, err := lcli.GetFullNodeAPI(cctx) api0, closer, err := lcli.GetFullNodeAPI(cctx)

View File

@ -22,6 +22,7 @@ import (
lapi "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
@ -30,7 +31,7 @@ import (
"github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo"
) )
var initRestoreCmd = &cli.Command{ var restoreCmd = &cli.Command{
Name: "restore", Name: "restore",
Usage: "Initialize a lotus miner repo from a backup", Usage: "Initialize a lotus miner repo from a backup",
Flags: []cli.Flag{ Flags: []cli.Flag{
@ -49,129 +50,11 @@ var initRestoreCmd = &cli.Command{
}, },
ArgsUsage: "[backupFile]", ArgsUsage: "[backupFile]",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
log.Info("Initializing lotus miner using a backup")
if cctx.Args().Len() != 1 {
return xerrors.Errorf("expected 1 argument")
}
ctx := lcli.ReqContext(cctx) ctx := lcli.ReqContext(cctx)
log.Info("Initializing lotus miner using a backup")
log.Info("Trying to connect to full node RPC") var storageCfg *stores.StorageConfig
if err := checkV1ApiSupport(ctx, cctx); err != nil {
return err
}
api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config
if err != nil {
return err
}
defer closer()
log.Info("Checking full node version")
v, err := api.Version(ctx)
if err != nil {
return err
}
if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) {
return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion)
}
if !cctx.Bool("nosync") {
if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil {
return xerrors.Errorf("sync wait: %w", err)
}
}
bf, err := homedir.Expand(cctx.Args().First())
if err != nil {
return xerrors.Errorf("expand backup file path: %w", err)
}
st, err := os.Stat(bf)
if err != nil {
return xerrors.Errorf("stat backup file (%s): %w", bf, err)
}
f, err := os.Open(bf)
if err != nil {
return xerrors.Errorf("opening backup file: %w", err)
}
defer f.Close() // nolint:errcheck
log.Info("Checking if repo exists")
repoPath := cctx.String(FlagMinerRepo)
r, err := repo.NewFS(repoPath)
if err != nil {
return err
}
ok, err := r.Exists()
if err != nil {
return err
}
if ok {
return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo))
}
log.Info("Initializing repo")
if err := r.Init(repo.StorageMiner); err != nil {
return err
}
lr, err := r.Lock(repo.StorageMiner)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
if cctx.IsSet("config") {
log.Info("Restoring config")
cf, err := homedir.Expand(cctx.String("config"))
if err != nil {
return xerrors.Errorf("expanding config path: %w", err)
}
_, err = os.Stat(cf)
if err != nil {
return xerrors.Errorf("stat config file (%s): %w", cf, err)
}
var cerr error
err = lr.SetConfig(func(raw interface{}) {
rcfg, ok := raw.(*config.StorageMiner)
if !ok {
cerr = xerrors.New("expected miner config")
return
}
ff, err := config.FromFile(cf, rcfg)
if err != nil {
cerr = xerrors.Errorf("loading config: %w", err)
return
}
*rcfg = *ff.(*config.StorageMiner)
})
if cerr != nil {
return cerr
}
if err != nil {
return xerrors.Errorf("setting config: %w", err)
}
} else {
log.Warn("--config NOT SET, WILL USE DEFAULT VALUES")
}
if cctx.IsSet("storage-config") { if cctx.IsSet("storage-config") {
log.Info("Restoring storage path config")
cf, err := homedir.Expand(cctx.String("storage-config")) cf, err := homedir.Expand(cctx.String("storage-config"))
if err != nil { if err != nil {
return xerrors.Errorf("expanding storage config path: %w", err) return xerrors.Errorf("expanding storage config path: %w", err)
@ -182,101 +65,233 @@ var initRestoreCmd = &cli.Command{
return xerrors.Errorf("reading storage config: %w", err) return xerrors.Errorf("reading storage config: %w", err)
} }
var cerr error storageCfg = &stores.StorageConfig{}
err = lr.SetStorage(func(scfg *stores.StorageConfig) { err = json.Unmarshal(cfb, storageCfg)
cerr = json.Unmarshal(cfb, scfg)
})
if cerr != nil {
return xerrors.Errorf("unmarshalling storage config: %w", cerr)
}
if err != nil { if err != nil {
return xerrors.Errorf("setting storage config: %w", err) return xerrors.Errorf("cannot unmarshal json for storage config: %w", err)
} }
} else {
log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
} }
log.Info("Restoring metadata backup") if err := restore(ctx, cctx, storageCfg, nil, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error {
log.Info("Checking proof parameters")
mds, err := lr.Datastore(context.TODO(), "/metadata") if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil {
if err != nil { return xerrors.Errorf("fetching proof parameters: %w", err)
return err }
}
bar := pb.New64(st.Size()) log.Info("Configuring miner actor")
br := bar.NewProxyReader(f)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
bar.Start() if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
err = backupds.RestoreInto(br, mds) return err
bar.Finish() }
if err != nil { return nil
return xerrors.Errorf("restoring metadata: %w", err) }); err != nil {
}
log.Info("Checking actor metadata")
abytes, err := mds.Get(datastore.NewKey("miner-address"))
if err != nil {
return xerrors.Errorf("getting actor address from metadata datastore: %w", err)
}
maddr, err := address.NewFromBytes(abytes)
if err != nil {
return xerrors.Errorf("parsing actor address: %w", err)
}
log.Info("ACTOR ADDRESS: ", maddr.String())
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize)))
wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("resolving worker key: %w", err)
}
has, err := api.WalletHas(ctx, wk)
if err != nil {
return xerrors.Errorf("checking worker address: %w", err)
}
if !has {
return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr)
}
log.Info("Checking proof parameters")
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
log.Info("Initializing libp2p identity")
p2pSk, err := makeHostKey(lr)
if err != nil {
return xerrors.Errorf("make host key: %w", err)
}
peerid, err := peer.IDFromPrivateKey(p2pSk)
if err != nil {
return xerrors.Errorf("peer ID from private key: %w", err)
}
log.Info("Configuring miner actor")
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
return err return err
} }
return nil return nil
}, },
} }
func restore(ctx context.Context, cctx *cli.Context, strConfig *stores.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi miner.MinerInfo) error) error {
if cctx.Args().Len() != 1 {
return xerrors.Errorf("expected 1 argument")
}
log.Info("Trying to connect to full node RPC")
api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config
if err != nil {
return err
}
defer closer()
log.Info("Checking full node version")
v, err := api.Version(ctx)
if err != nil {
return err
}
if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) {
return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion)
}
if !cctx.Bool("nosync") {
if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil {
return xerrors.Errorf("sync wait: %w", err)
}
}
bf, err := homedir.Expand(cctx.Args().First())
if err != nil {
return xerrors.Errorf("expand backup file path: %w", err)
}
st, err := os.Stat(bf)
if err != nil {
return xerrors.Errorf("stat backup file (%s): %w", bf, err)
}
f, err := os.Open(bf)
if err != nil {
return xerrors.Errorf("opening backup file: %w", err)
}
defer f.Close() // nolint:errcheck
log.Info("Checking if repo exists")
repoPath := cctx.String(FlagMinerRepo)
r, err := repo.NewFS(repoPath)
if err != nil {
return err
}
ok, err := r.Exists()
if err != nil {
return err
}
if ok {
return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo))
}
log.Info("Initializing repo")
if err := r.Init(repo.StorageMiner); err != nil {
return err
}
lr, err := r.Lock(repo.StorageMiner)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
if cctx.IsSet("config") {
log.Info("Restoring config")
cf, err := homedir.Expand(cctx.String("config"))
if err != nil {
return xerrors.Errorf("expanding config path: %w", err)
}
_, err = os.Stat(cf)
if err != nil {
return xerrors.Errorf("stat config file (%s): %w", cf, err)
}
var cerr error
err = lr.SetConfig(func(raw interface{}) {
rcfg, ok := raw.(*config.StorageMiner)
if !ok {
cerr = xerrors.New("expected miner config")
return
}
ff, err := config.FromFile(cf, rcfg)
if err != nil {
cerr = xerrors.Errorf("loading config: %w", err)
return
}
*rcfg = *ff.(*config.StorageMiner)
if manageConfig != nil {
cerr = manageConfig(rcfg)
}
})
if cerr != nil {
return cerr
}
if err != nil {
return xerrors.Errorf("setting config: %w", err)
}
} else {
log.Warn("--config NOT SET, WILL USE DEFAULT VALUES")
}
if strConfig != nil {
log.Info("Restoring storage path config")
err = lr.SetStorage(func(scfg *stores.StorageConfig) {
*scfg = *strConfig
})
if err != nil {
return xerrors.Errorf("setting storage config: %w", err)
}
} else {
log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
}
log.Info("Restoring metadata backup")
mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return err
}
bar := pb.New64(st.Size())
br := bar.NewProxyReader(f)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
bar.Start()
err = backupds.RestoreInto(br, mds)
bar.Finish()
if err != nil {
return xerrors.Errorf("restoring metadata: %w", err)
}
log.Info("Checking actor metadata")
abytes, err := mds.Get(datastore.NewKey("miner-address"))
if err != nil {
return xerrors.Errorf("getting actor address from metadata datastore: %w", err)
}
maddr, err := address.NewFromBytes(abytes)
if err != nil {
return xerrors.Errorf("parsing actor address: %w", err)
}
log.Info("ACTOR ADDRESS: ", maddr.String())
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize)))
wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("resolving worker key: %w", err)
}
has, err := api.WalletHas(ctx, wk)
if err != nil {
return xerrors.Errorf("checking worker address: %w", err)
}
if !has {
return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr)
}
log.Info("Initializing libp2p identity")
p2pSk, err := makeHostKey(lr)
if err != nil {
return xerrors.Errorf("make host key: %w", err)
}
peerid, err := peer.IDFromPrivateKey(p2pSk)
if err != nil {
return xerrors.Errorf("peer ID from private key: %w", err)
}
return after(api, maddr, peerid, mi)
}

View File

@ -0,0 +1,152 @@
package main
import (
"context"
"strings"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/node/config"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
const (
MarketsService = "markets"
)
var serviceCmd = &cli.Command{
Name: "service",
Usage: "Initialize a lotus miner sub-service",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "config",
Usage: "config file (config.toml)",
Required: true,
},
&cli.BoolFlag{
Name: "nosync",
Usage: "don't check full-node sync status",
},
&cli.StringSliceFlag{
Name: "type",
Usage: "type of service to be enabled",
},
&cli.StringFlag{
Name: "api-sealer",
Usage: "sealer API info (lotus-miner auth api-info --perm=admin)",
},
&cli.StringFlag{
Name: "api-sector-index",
Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)",
},
},
ArgsUsage: "[backupFile]",
Action: func(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx)
log.Info("Initializing lotus miner service")
es := EnabledServices(cctx.StringSlice("type"))
if len(es) == 0 {
return xerrors.Errorf("at least one module must be enabled")
}
// we should remove this as soon as we have more service types and not just `markets`
if !es.Contains(MarketsService) {
return xerrors.Errorf("markets module must be enabled")
}
if !cctx.IsSet("api-sealer") {
return xerrors.Errorf("--api-sealer is required without the sealer module enabled")
}
if !cctx.IsSet("api-sector-index") {
return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled")
}
if err := restore(ctx, cctx, &stores.StorageConfig{}, func(cfg *config.StorageMiner) error {
cfg.Subsystems.EnableMarkets = es.Contains(MarketsService)
cfg.Subsystems.EnableMining = false
cfg.Subsystems.EnableSealing = false
cfg.Subsystems.EnableSectorStorage = false
if !cfg.Subsystems.EnableSealing {
ai, err := checkApiInfo(ctx, cctx.String("api-sealer"))
if err != nil {
return xerrors.Errorf("checking sealer API: %w", err)
}
cfg.Subsystems.SealerApiInfo = ai
}
if !cfg.Subsystems.EnableSectorStorage {
ai, err := checkApiInfo(ctx, cctx.String("api-sector-index"))
if err != nil {
return xerrors.Errorf("checking sector index API: %w", err)
}
cfg.Subsystems.SectorIndexApiInfo = ai
}
return nil
}, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error {
if es.Contains(MarketsService) {
log.Info("Configuring miner actor")
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
return nil
},
}
type EnabledServices []string
func (es EnabledServices) Contains(name string) bool {
for _, s := range es {
if s == name {
return true
}
}
return false
}
func checkApiInfo(ctx context.Context, ai string) (string, error) {
ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=")
info := cliutil.ParseApiInfo(ai)
addr, err := info.DialArgs("v0")
if err != nil {
return "", xerrors.Errorf("could not get DialArgs: %w", err)
}
log.Infof("Checking api version of %s", addr)
api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader())
if err != nil {
return "", err
}
defer closer()
v, err := api.Version(ctx)
if err != nil {
return "", xerrors.Errorf("checking version: %w", err)
}
if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) {
return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion)
}
return ai, nil
}

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/fatih/color"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"go.opencensus.io/trace" "go.opencensus.io/trace"
@ -13,7 +14,6 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/lib/tracing" "github.com/filecoin-project/lotus/lib/tracing"
"github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo"
@ -62,9 +62,14 @@ func main() {
trace.UnregisterExporter(jaeger) trace.UnregisterExporter(jaeger)
jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name)
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
if originBefore != nil { if originBefore != nil {
return originBefore(cctx) return originBefore(cctx)
} }
return nil return nil
} }
} }
@ -82,8 +87,9 @@ func main() {
Aliases: []string{"a"}, Aliases: []string{"a"},
}, },
&cli.BoolFlag{ &cli.BoolFlag{
// examined in the Before above
Name: "color", Name: "color",
Value: cliutil.DefaultColorUse, Usage: "use color in display output",
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
&cli.StringFlag{ &cli.StringFlag{

View File

@ -15,6 +15,7 @@ import (
tm "github.com/buger/goterm" tm "github.com/buger/goterm"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/fatih/color"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc" "github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
@ -30,7 +31,6 @@ import (
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
) )
var CidBaseFlag = cli.StringFlag{ var CidBaseFlag = cli.StringFlag{
@ -755,7 +755,6 @@ var transfersListCmd = &cli.Command{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Usage: "use color in display output", Usage: "use color in display output",
Value: cliutil.DefaultColorUse,
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
&cli.BoolFlag{ &cli.BoolFlag{
@ -772,6 +771,10 @@ var transfersListCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
api, closer, err := lcli.GetStorageMinerAPI(cctx) api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -786,7 +789,6 @@ var transfersListCmd = &cli.Command{
verbose := cctx.Bool("verbose") verbose := cctx.Bool("verbose")
completed := cctx.Bool("completed") completed := cctx.Bool("completed")
color := cctx.Bool("color")
watch := cctx.Bool("watch") watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed") showFailed := cctx.Bool("show-failed")
if watch { if watch {
@ -800,7 +802,7 @@ var transfersListCmd = &cli.Command{
tm.MoveCursor(1, 1) tm.MoveCursor(1, 1)
lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed)
tm.Flush() tm.Flush()
@ -825,7 +827,7 @@ var transfersListCmd = &cli.Command{
} }
} }
} }
lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed)
return nil return nil
}, },
} }

View File

@ -36,8 +36,6 @@ var provingFaultsCmd = &cli.Command{
Name: "faults", Name: "faults",
Usage: "View the currently known proving faulty sectors information", Usage: "View the currently known proving faulty sectors information",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color")
api, acloser, err := lcli.GetFullNodeAPI(cctx) api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -90,8 +88,6 @@ var provingInfoCmd = &cli.Command{
Name: "info", Name: "info",
Usage: "View current state information", Usage: "View current state information",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color")
api, acloser, err := lcli.GetFullNodeAPI(cctx) api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -197,8 +193,6 @@ var provingDeadlinesCmd = &cli.Command{
Name: "deadlines", Name: "deadlines",
Usage: "View the current proving period deadlines information", Usage: "View the current proving period deadlines information",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color")
api, acloser, err := lcli.GetFullNodeAPI(cctx) api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err

View File

@ -22,6 +22,7 @@ import (
"github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo"
) )
@ -118,13 +119,33 @@ var runCmd = &cli.Command{
return xerrors.Errorf("repo at '%s' is not initialized, run 'lotus-miner init' to set it up", minerRepoPath) return xerrors.Errorf("repo at '%s' is not initialized, run 'lotus-miner init' to set it up", minerRepoPath)
} }
lr, err := r.Lock(repo.StorageMiner)
if err != nil {
return err
}
c, err := lr.Config()
if err != nil {
return err
}
cfg, ok := c.(*config.StorageMiner)
if !ok {
return xerrors.Errorf("invalid config for repo, got: %T", c)
}
bootstrapLibP2P := cfg.Subsystems.EnableMarkets
err = lr.Close()
if err != nil {
return err
}
shutdownChan := make(chan struct{}) shutdownChan := make(chan struct{})
var minerapi api.StorageMiner var minerapi api.StorageMiner
stop, err := node.New(ctx, stop, err := node.New(ctx,
node.StorageMiner(&minerapi), node.StorageMiner(&minerapi, cfg.Subsystems),
node.Override(new(dtypes.ShutdownChan), shutdownChan), node.Override(new(dtypes.ShutdownChan), shutdownChan),
node.Online(), node.Base(),
node.Repo(r), node.Repo(r),
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") }, node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") },
@ -142,14 +163,18 @@ var runCmd = &cli.Command{
return xerrors.Errorf("getting API endpoint: %w", err) return xerrors.Errorf("getting API endpoint: %w", err)
} }
// Bootstrap with full node if bootstrapLibP2P {
remoteAddrs, err := nodeApi.NetAddrsListen(ctx) log.Infof("Bootstrapping libp2p network with full node")
if err != nil {
return xerrors.Errorf("getting full node libp2p address: %w", err)
}
if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { // Bootstrap with full node
return xerrors.Errorf("connecting to full node (libp2p): %w", err) remoteAddrs, err := nodeApi.NetAddrsListen(ctx)
if err != nil {
return xerrors.Errorf("getting full node libp2p address: %w", err)
}
if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil {
return xerrors.Errorf("connecting to full node (libp2p): %w", err)
}
} }
log.Infof("Remote version %s", v) log.Infof("Remote version %s", v)

View File

@ -19,7 +19,6 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
) )
var sealingCmd = &cli.Command{ var sealingCmd = &cli.Command{
@ -39,12 +38,14 @@ var sealingWorkersCmd = &cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Value: cliutil.DefaultColorUse, Usage: "use color in display output",
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color") if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {
@ -134,7 +135,7 @@ var sealingJobsCmd = &cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Value: cliutil.DefaultColorUse, Usage: "use color in display output",
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
&cli.BoolFlag{ &cli.BoolFlag{
@ -143,7 +144,9 @@ var sealingJobsCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color") if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {

View File

@ -26,7 +26,6 @@ import (
"github.com/filecoin-project/lotus/lib/tablewriter" "github.com/filecoin-project/lotus/lib/tablewriter"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing" sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
) )
@ -163,9 +162,9 @@ var sectorsListCmd = &cli.Command{
}, },
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Aliases: []string{"c"}, Usage: "use color in display output",
Value: cliutil.DefaultColorUse,
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
Aliases: []string{"c"},
}, },
&cli.BoolFlag{ &cli.BoolFlag{
Name: "fast", Name: "fast",
@ -185,7 +184,9 @@ var sectorsListCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color") if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {
@ -438,6 +439,12 @@ var sectorsExtendCmd = &cli.Command{
Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs", Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs",
Required: false, Required: false,
}, },
&cli.Int64Flag{
Name: "expiration-ignore",
Value: 120,
Usage: "when extending v1 sectors, skip sectors whose current expiration is less than <ignore> epochs from now",
Required: false,
},
&cli.Int64Flag{ &cli.Int64Flag{
Name: "expiration-cutoff", Name: "expiration-cutoff",
Usage: "when extending v1 sectors, skip sectors whose current expiration is more than <cutoff> epochs from now (infinity if unspecified)", Usage: "when extending v1 sectors, skip sectors whose current expiration is more than <cutoff> epochs from now (infinity if unspecified)",
@ -496,6 +503,10 @@ var sectorsExtendCmd = &cli.Command{
continue continue
} }
if si.Expiration < (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-ignore"))) {
continue
}
if cctx.IsSet("expiration-cutoff") { if cctx.IsSet("expiration-cutoff") {
if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) { if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) {
continue continue
@ -510,6 +521,10 @@ var sectorsExtendCmd = &cli.Command{
// Set the new expiration to 48 hours less than the theoretical maximum lifetime // Set the new expiration to 48 hours less than the theoretical maximum lifetime
newExp := ml - (miner3.WPoStProvingPeriod * 2) + si.Activation newExp := ml - (miner3.WPoStProvingPeriod * 2) + si.Activation
if withinTolerance(si.Expiration, newExp) || si.Expiration >= newExp {
continue
}
p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK) p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK)
if err != nil { if err != nil {
return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err) return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err)
@ -527,7 +542,7 @@ var sectorsExtendCmd = &cli.Command{
} else { } else {
added := false added := false
for exp := range es { for exp := range es {
if withinTolerance(exp, newExp) { if withinTolerance(exp, newExp) && newExp >= exp && exp > si.Expiration {
es[exp] = append(es[exp], uint64(si.SectorNumber)) es[exp] = append(es[exp], uint64(si.SectorNumber))
added = true added = true
break break

View File

@ -27,7 +27,6 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
@ -169,7 +168,7 @@ var storageListCmd = &cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Value: cliutil.DefaultColorUse, Usage: "use color in display output",
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
}, },
@ -177,7 +176,9 @@ var storageListCmd = &cli.Command{
storageListSectorsCmd, storageListSectorsCmd,
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color") if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {
@ -484,12 +485,14 @@ var storageListSectorsCmd = &cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "color", Name: "color",
Value: cliutil.DefaultColorUse, Usage: "use color in display output",
DefaultText: "depends on output being a TTY", DefaultText: "depends on output being a TTY",
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color") if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {

View File

@ -314,7 +314,7 @@ var DaemonCmd = &cli.Command{
stop, err := node.New(ctx, stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)), node.FullAPI(&api, node.Lite(isLite)),
node.Online(), node.Base(),
node.Repo(r), node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper), node.Override(new(dtypes.Bootstrapper), isBootstrapper),

View File

@ -98,6 +98,7 @@
* [SealingAbort](#SealingAbort) * [SealingAbort](#SealingAbort)
* [SealingSchedDiag](#SealingSchedDiag) * [SealingSchedDiag](#SealingSchedDiag)
* [Sector](#Sector) * [Sector](#Sector)
* [SectorAddPieceToAny](#SectorAddPieceToAny)
* [SectorCommitFlush](#SectorCommitFlush) * [SectorCommitFlush](#SectorCommitFlush)
* [SectorCommitPending](#SectorCommitPending) * [SectorCommitPending](#SectorCommitPending)
* [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration)
@ -118,6 +119,7 @@
* [SectorsRefs](#SectorsRefs) * [SectorsRefs](#SectorsRefs)
* [SectorsStatus](#SectorsStatus) * [SectorsStatus](#SectorsStatus)
* [SectorsSummary](#SectorsSummary) * [SectorsSummary](#SectorsSummary)
* [SectorsUnsealPiece](#SectorsUnsealPiece)
* [SectorsUpdate](#SectorsUpdate) * [SectorsUpdate](#SectorsUpdate)
* [Storage](#Storage) * [Storage](#Storage)
* [StorageAddLocal](#StorageAddLocal) * [StorageAddLocal](#StorageAddLocal)
@ -1561,6 +1563,54 @@ Response: `{}`
## Sector ## Sector
### SectorAddPieceToAny
Add piece to an open sector. If no sectors with enough space are open,
either a new sector will be created, or this call will block until more
sectors can be created.
Perms: admin
Inputs:
```json
[
1024,
{},
{
"PublishCid": null,
"DealID": 5432,
"DealProposal": {
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceSize": 1032,
"VerifiedDeal": true,
"Client": "f01234",
"Provider": "f01234",
"Label": "string value",
"StartEpoch": 10101,
"EndEpoch": 10101,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealSchedule": {
"StartEpoch": 10101,
"EndEpoch": 10101
},
"KeepUnsealed": true
}
]
```
Response:
```json
{
"Sector": 9,
"Offset": 1032
}
```
### SectorCommitFlush ### SectorCommitFlush
SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
Returns null if message wasn't sent Returns null if message wasn't sent
@ -1861,6 +1911,30 @@ Response:
} }
``` ```
### SectorsUnsealPiece
Perms: admin
Inputs:
```json
[
{
"ID": {
"Miner": 1000,
"Number": 9
},
"ProofType": 8
},
1040384,
1024,
null,
null
]
```
Response: `{}`
### SectorsUpdate ### SectorsUpdate

View File

@ -17,6 +17,7 @@
* [ChainGetBlockMessages](#ChainGetBlockMessages) * [ChainGetBlockMessages](#ChainGetBlockMessages)
* [ChainGetGenesis](#ChainGetGenesis) * [ChainGetGenesis](#ChainGetGenesis)
* [ChainGetMessage](#ChainGetMessage) * [ChainGetMessage](#ChainGetMessage)
* [ChainGetMessagesInTipset](#ChainGetMessagesInTipset)
* [ChainGetNode](#ChainGetNode) * [ChainGetNode](#ChainGetNode)
* [ChainGetParentMessages](#ChainGetParentMessages) * [ChainGetParentMessages](#ChainGetParentMessages)
* [ChainGetParentReceipts](#ChainGetParentReceipts) * [ChainGetParentReceipts](#ChainGetParentReceipts)
@ -533,6 +534,28 @@ Response:
} }
``` ```
### ChainGetMessagesInTipset
ChainGetMessagesInTipset returns message stores in current tipset
Perms: read
Inputs:
```json
[
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
}
]
]
```
Response: `null`
### ChainGetNode ### ChainGetNode

View File

@ -17,6 +17,7 @@
* [ChainGetBlockMessages](#ChainGetBlockMessages) * [ChainGetBlockMessages](#ChainGetBlockMessages)
* [ChainGetGenesis](#ChainGetGenesis) * [ChainGetGenesis](#ChainGetGenesis)
* [ChainGetMessage](#ChainGetMessage) * [ChainGetMessage](#ChainGetMessage)
* [ChainGetMessagesInTipset](#ChainGetMessagesInTipset)
* [ChainGetNode](#ChainGetNode) * [ChainGetNode](#ChainGetNode)
* [ChainGetParentMessages](#ChainGetParentMessages) * [ChainGetParentMessages](#ChainGetParentMessages)
* [ChainGetParentReceipts](#ChainGetParentReceipts) * [ChainGetParentReceipts](#ChainGetParentReceipts)
@ -535,6 +536,28 @@ Response:
} }
``` ```
### ChainGetMessagesInTipset
ChainGetMessagesInTipset returns message stores in current tipset
Perms: read
Inputs:
```json
[
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
}
]
]
```
Response: `null`
### ChainGetNode ### ChainGetNode

View File

@ -41,7 +41,7 @@ COMMANDS:
GLOBAL OPTIONS: GLOBAL OPTIONS:
--actor value, -a value specify other actor to check state for (read only) --actor value, -a value specify other actor to check state for (read only)
--color (default: depends on output being a TTY) --color use color in display output (default: depends on output being a TTY)
--miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
--help, -h show help (default: false) --help, -h show help (default: false)
--version, -v print the version (default: false) --version, -v print the version (default: false)
@ -57,6 +57,7 @@ USAGE:
COMMANDS: COMMANDS:
restore Initialize a lotus miner repo from a backup restore Initialize a lotus miner repo from a backup
service Initialize a lotus miner sub-service
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
OPTIONS: OPTIONS:
@ -93,6 +94,24 @@ OPTIONS:
``` ```
### lotus-miner init service
```
NAME:
lotus-miner init service - Initialize a lotus miner sub-service
USAGE:
lotus-miner init service [command options] [backupFile]
OPTIONS:
--config value config file (config.toml)
--nosync don't check full-node sync status (default: false)
--type value type of service to be enabled
--api-sealer value sealer API info (lotus-miner auth api-info --perm=admin)
--api-sector-index value sector Index API info (lotus-miner auth api-info --perm=admin)
--help, -h show help (default: false)
```
## lotus-miner run ## lotus-miner run
``` ```
NAME: NAME:
@ -295,7 +314,7 @@ USAGE:
OPTIONS: OPTIONS:
--verbose (default: false) --verbose (default: false)
--color (default: depends on output being a TTY) --color use color in display output (default: depends on output being a TTY)
--help, -h show help (default: false) --help, -h show help (default: false)
``` ```
@ -1344,7 +1363,7 @@ USAGE:
OPTIONS: OPTIONS:
--show-removed show removed sectors (default: false) --show-removed show removed sectors (default: false)
--color, -c (default: depends on output being a TTY) --color, -c use color in display output (default: depends on output being a TTY)
--fast don't show on-chain info for better performance (default: false) --fast don't show on-chain info for better performance (default: false)
--events display number of events the sector has received (default: false) --events display number of events the sector has received (default: false)
--seal-time display how long it took for the sector to be sealed (default: false) --seal-time display how long it took for the sector to be sealed (default: false)
@ -1405,6 +1424,7 @@ OPTIONS:
--new-expiration value new expiration epoch (default: 0) --new-expiration value new expiration epoch (default: 0)
--v1-sectors renews all v1 sectors up to the maximum possible lifetime (default: false) --v1-sectors renews all v1 sectors up to the maximum possible lifetime (default: false)
--tolerance value when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs (default: 20160) --tolerance value when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs (default: 20160)
--expiration-ignore value when extending v1 sectors, skip sectors whose current expiration is less than <ignore> epochs from now (default: 120)
--expiration-cutoff value when extending v1 sectors, skip sectors whose current expiration is more than <cutoff> epochs from now (infinity if unspecified) (default: 0) --expiration-cutoff value when extending v1 sectors, skip sectors whose current expiration is more than <cutoff> epochs from now (infinity if unspecified) (default: 0)
--help, -h show help (default: false) --help, -h show help (default: false)
@ -1739,7 +1759,7 @@ COMMANDS:
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
OPTIONS: OPTIONS:
--color (default: depends on output being a TTY) --color use color in display output (default: depends on output being a TTY)
--help, -h show help (default: false) --help, -h show help (default: false)
--version, -v print the version (default: false) --version, -v print the version (default: false)
@ -1754,7 +1774,7 @@ USAGE:
lotus-miner storage list sectors [command options] [arguments...] lotus-miner storage list sectors [command options] [arguments...]
OPTIONS: OPTIONS:
--color (default: depends on output being a TTY) --color use color in display output (default: depends on output being a TTY)
--help, -h show help (default: false) --help, -h show help (default: false)
``` ```
@ -1816,7 +1836,7 @@ USAGE:
lotus-miner sealing jobs [command options] [arguments...] lotus-miner sealing jobs [command options] [arguments...]
OPTIONS: OPTIONS:
--color (default: depends on output being a TTY) --color use color in display output (default: depends on output being a TTY)
--show-ret-done show returned but not consumed calls (default: false) --show-ret-done show returned but not consumed calls (default: false)
--help, -h show help (default: false) --help, -h show help (default: false)
@ -1831,7 +1851,7 @@ USAGE:
lotus-miner sealing workers [command options] [arguments...] lotus-miner sealing workers [command options] [arguments...]
OPTIONS: OPTIONS:
--color (default: depends on output being a TTY) --color use color in display output (default: depends on output being a TTY)
--help, -h show help (default: false) --help, -h show help (default: false)
``` ```

View File

@ -44,11 +44,7 @@ Testing an RC:
- Binaries - Binaries
- [ ] Ensure the RC release has downloadable binaries - [ ] Ensure the RC release has downloadable binaries
- Upgrade our testnet infra - Upgrade our testnet infra
- [ ] 1 bootstrap node
- [ ] 1 miner
- [ ] Scratch nodes
- [ ] Wait 24 hours, confirm nodes stay in sync - [ ] Wait 24 hours, confirm nodes stay in sync
- [ ] Remaining testnet infra
- Upgrade our mainnet infra - Upgrade our mainnet infra
- [ ] Subset of development full archival nodes - [ ] Subset of development full archival nodes
- [ ] Subset of bootstrappers (1 per region) - [ ] Subset of bootstrappers (1 per region)

View File

@ -11,7 +11,6 @@ import (
"os" "os"
"runtime" "runtime"
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -24,6 +23,7 @@ import (
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32" "github.com/filecoin-project/lotus/extern/sector-storage/fr32"
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
) )

View File

@ -1,13 +1,13 @@
package ffiwrapper package ffiwrapper
import ( import (
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
"golang.org/x/xerrors" "golang.org/x/xerrors"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle" rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
) )

View File

@ -29,8 +29,6 @@ var log = logging.Logger("advmgr")
var ErrNoWorkers = errors.New("no suitable workers found") var ErrNoWorkers = errors.New("no suitable workers found")
type URLs []string
type Worker interface { type Worker interface {
storiface.WorkerCalls storiface.WorkerCalls

View File

@ -75,6 +75,10 @@ func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) e
return nil return nil
} }
func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error {
panic("SectorMgr: unsealing piece: implement me")
}
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
log.Warn("Add piece: ", sectorID, size, sectorID.ProofType) log.Warn("Add piece: ", sectorID, size, sectorID.ProofType)
@ -496,10 +500,6 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID,
panic("not supported") panic("not supported")
} }
func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error {
return nil
}
func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
plen, err := svi.SealProof.ProofSize() plen, err := svi.SealProof.ProofSize()
if err != nil { if err != nil {

View File

@ -7,7 +7,6 @@ import (
"syscall" "syscall"
"github.com/detailyang/go-fallocate" "github.com/detailyang/go-fallocate"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle" rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
@ -15,6 +14,8 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
logging "github.com/ipfs/go-log/v2"
) )
var log = logging.Logger("partialfile") var log = logging.Logger("partialfile")

View File

@ -7,12 +7,12 @@ import (
"os" "os"
"strconv" "strconv"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
"github.com/gorilla/mux" "github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil" "github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
@ -53,11 +53,10 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
mux := mux.NewRouter() mux := mux.NewRouter()
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET") mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
mux.ServeHTTP(w, r) mux.ServeHTTP(w, r)
} }

View File

@ -66,6 +66,8 @@ type SectorIndex interface { // part of storage-miner api
// atomically acquire locks on all sector file types. close ctx to unlock // atomically acquire locks on all sector file types. close ctx to unlock
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error)
StorageList(ctx context.Context) (map[ID][]Decl, error)
} }
type Decl struct { type Decl struct {

View File

@ -158,6 +158,8 @@ func (p *path) sectorPath(sid abi.SectorID, fileType storiface.SectorFileType) s
return filepath.Join(p.local, fileType.String(), storiface.SectorName(sid)) return filepath.Join(p.local, fileType.String(), storiface.SectorName(sid))
} }
type URLs []string
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
l := &Local{ l := &Local{
localStorage: ls, localStorage: ls,

View File

@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT. // Code generated by MockGen. DO NOT EDIT.
// Source: index.go // Source: index.go
// Package mock_stores is a generated GoMock package. // Package mocks is a generated GoMock package.
package mocks package mocks
import ( import (
@ -125,6 +125,21 @@ func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomo
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageInfo", reflect.TypeOf((*MockSectorIndex)(nil).StorageInfo), arg0, arg1) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageInfo", reflect.TypeOf((*MockSectorIndex)(nil).StorageInfo), arg0, arg1)
} }
// StorageList mocks base method.
func (m *MockSectorIndex) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StorageList", ctx)
ret0, _ := ret[0].(map[stores.ID][]stores.Decl)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StorageList indicates an expected call of StorageList.
func (mr *MockSectorIndexMockRecorder) StorageList(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageList", reflect.TypeOf((*MockSectorIndex)(nil).StorageList), ctx)
}
// StorageLock mocks base method. // StorageLock mocks base method.
func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error { func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT. // Code generated by MockGen. DO NOT EDIT.
// Source: interface.go // Source: interface.go
// Package mock_stores is a generated GoMock package. // Package mocks is a generated GoMock package.
package mocks package mocks
import ( import (

View File

@ -297,6 +297,32 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error {
} }
} }
func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) {
url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded())
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return false, xerrors.Errorf("request: %w", err)
}
req.Header = r.auth.Clone()
fmt.Printf("req using header: %#v \n", r.auth)
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false, xerrors.Errorf("do request: %w", err)
}
defer resp.Body.Close() // nolint
switch resp.StatusCode {
case http.StatusOK:
return true, nil
case http.StatusRequestedRangeNotSatisfiable:
return false, nil
default:
return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode)
}
}
func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error {
// Make sure we have the data local // Make sure we have the data local
_, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) _, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
@ -419,31 +445,6 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
return out, nil return out, nil
} }
func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) {
url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded())
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return false, xerrors.Errorf("request: %w", err)
}
req.Header = r.auth.Clone()
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false, xerrors.Errorf("do request: %w", err)
}
defer resp.Body.Close() // nolint
switch resp.StatusCode {
case http.StatusOK:
return true, nil
case http.StatusRequestedRangeNotSatisfiable:
return false, nil
default:
return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode)
}
}
func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) {
if len(r.limit) >= cap(r.limit) { if len(r.limit) >= cap(r.limit) {
log.Infof("Throttling remote read, %d already running", len(r.limit)) log.Infof("Throttling remote read, %d already running", len(r.limit))

View File

@ -8,7 +8,7 @@ import (
"sort" "sort"
abi "github.com/filecoin-project/go-state-types/abi" abi "github.com/filecoin-project/go-state-types/abi"
market "github.com/filecoin-project/specs-actors/actors/builtin/market" api "github.com/filecoin-project/lotus/api"
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -46,7 +46,7 @@ func (t *Piece) MarshalCBOR(w io.Writer) error {
return err return err
} }
// t.DealInfo (sealing.DealInfo) (struct) // t.DealInfo (api.PieceDealInfo) (struct)
if len("DealInfo") > cbg.MaxLength { if len("DealInfo") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealInfo\" was too long") return xerrors.Errorf("Value in field \"DealInfo\" was too long")
} }
@ -107,7 +107,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
} }
} }
// t.DealInfo (sealing.DealInfo) (struct) // t.DealInfo (api.PieceDealInfo) (struct)
case "DealInfo": case "DealInfo":
{ {
@ -120,7 +120,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
if err := br.UnreadByte(); err != nil { if err := br.UnreadByte(); err != nil {
return err return err
} }
t.DealInfo = new(DealInfo) t.DealInfo = new(api.PieceDealInfo)
if err := t.DealInfo.UnmarshalCBOR(br); err != nil { if err := t.DealInfo.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err)
} }
@ -136,384 +136,6 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
return nil return nil
} }
func (t *DealInfo) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{165}); err != nil {
return err
}
scratch := make([]byte, 9)
// t.PublishCid (cid.Cid) (struct)
if len("PublishCid") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("PublishCid")); err != nil {
return err
}
if t.PublishCid == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
}
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealID")); err != nil {
return err
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
return err
}
// t.DealProposal (market.DealProposal) (struct)
if len("DealProposal") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealProposal")); err != nil {
return err
}
if err := t.DealProposal.MarshalCBOR(w); err != nil {
return err
}
// t.DealSchedule (sealing.DealSchedule) (struct)
if len("DealSchedule") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealSchedule")); err != nil {
return err
}
if err := t.DealSchedule.MarshalCBOR(w); err != nil {
return err
}
// t.KeepUnsealed (bool) (bool)
if len("KeepUnsealed") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil {
return err
}
if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil {
return err
}
return nil
}
func (t *DealInfo) UnmarshalCBOR(r io.Reader) error {
*t = DealInfo{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("DealInfo: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadStringBuf(br, scratch)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.PublishCid (cid.Cid) (struct)
case "PublishCid":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
}
t.PublishCid = &c
}
}
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.DealID = abi.DealID(extra)
}
// t.DealProposal (market.DealProposal) (struct)
case "DealProposal":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
t.DealProposal = new(market.DealProposal)
if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
}
}
}
// t.DealSchedule (sealing.DealSchedule) (struct)
case "DealSchedule":
{
if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
}
}
// t.KeepUnsealed (bool) (bool)
case "KeepUnsealed":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajOther {
return fmt.Errorf("booleans must be major type 7")
}
switch extra {
case 20:
t.KeepUnsealed = false
case 21:
t.KeepUnsealed = true
default:
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
return nil
}
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{162}); err != nil {
return err
}
scratch := make([]byte, 9)
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
return err
}
} else {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
return err
}
}
// t.EndEpoch (abi.ChainEpoch) (int64)
if len("EndEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("EndEpoch")); err != nil {
return err
}
if t.EndEpoch >= 0 {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
return err
}
} else {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
return err
}
}
return nil
}
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
*t = DealSchedule{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadStringBuf(br, scratch)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
// t.EndEpoch (abi.ChainEpoch) (int64)
case "EndEpoch":
{
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.EndEpoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
return nil
}
func (t *SectorInfo) MarshalCBOR(w io.Writer) error { func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
if t == nil { if t == nil {
_, err := w.Write(cbg.CborNull) _, err := w.Write(cbg.CborNull)

View File

@ -7,10 +7,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -18,13 +14,16 @@ import (
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/config"
@ -46,6 +45,7 @@ type CommitBatcherApi interface {
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error)
} }
type AggregateInput struct { type AggregateInput struct {
@ -225,7 +225,7 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes,
} }
if individual { if individual {
res, err = b.processIndividually() res, err = b.processIndividually(cfg)
} else { } else {
res, err = b.processBatch(cfg) res, err = b.processBatch(cfg)
} }
@ -341,6 +341,10 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
aggFee := big.Div(big.Mul(policy.AggregateNetworkFee(nv, len(infos), bf), aggFeeNum), aggFeeDen) aggFee := big.Div(big.Mul(policy.AggregateNetworkFee(nv, len(infos), bf), aggFeeNum), aggFeeDen)
needFunds := big.Add(collateral, aggFee) needFunds := big.Add(collateral, aggFee)
needFunds, err = collateralSendAmount(b.mctx, b.api, b.maddr, cfg, needFunds)
if err != nil {
return []sealiface.CommitBatchRes{res}, err
}
goodFunds := big.Add(maxFee, needFunds) goodFunds := big.Add(maxFee, needFunds)
@ -361,12 +365,26 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
return []sealiface.CommitBatchRes{res}, nil return []sealiface.CommitBatchRes{res}, nil
} }
func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error) { func (b *CommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.CommitBatchRes, error) {
mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil)
if err != nil { if err != nil {
return nil, xerrors.Errorf("couldn't get miner info: %w", err) return nil, xerrors.Errorf("couldn't get miner info: %w", err)
} }
avail := types.TotalFilecoinInt
if cfg.CollateralFromMinerBalance && !cfg.DisableCollateralFallback {
avail, err = b.api.StateMinerAvailableBalance(b.mctx, b.maddr, nil)
if err != nil {
return nil, xerrors.Errorf("getting available miner balance: %w", err)
}
avail = big.Sub(avail, cfg.AvailableBalanceBuffer)
if avail.LessThan(big.Zero()) {
avail = big.Zero()
}
}
tok, _, err := b.api.ChainHead(b.mctx) tok, _, err := b.api.ChainHead(b.mctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -380,7 +398,7 @@ func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error
FailedSectors: map[abi.SectorNumber]string{}, FailedSectors: map[abi.SectorNumber]string{},
} }
mcid, err := b.processSingle(mi, sn, info, tok) mcid, err := b.processSingle(cfg, mi, &avail, sn, info, tok)
if err != nil { if err != nil {
log.Errorf("process single error: %+v", err) // todo: return to user log.Errorf("process single error: %+v", err) // todo: return to user
r.FailedSectors[sn] = err.Error() r.FailedSectors[sn] = err.Error()
@ -394,7 +412,7 @@ func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error
return res, nil return res, nil
} }
func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) { func (b *CommitBatcher) processSingle(cfg sealiface.Config, mi miner.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) {
enc := new(bytes.Buffer) enc := new(bytes.Buffer)
params := &miner.ProveCommitSectorParams{ params := &miner.ProveCommitSectorParams{
SectorNumber: sn, SectorNumber: sn,
@ -410,6 +428,19 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
return cid.Undef, err return cid.Undef, err
} }
if cfg.CollateralFromMinerBalance {
c := big.Sub(collateral, *avail)
*avail = big.Sub(*avail, collateral)
collateral = c
if collateral.LessThan(big.Zero()) {
collateral = big.Zero()
}
if (*avail).LessThan(big.Zero()) {
*avail = big.Zero()
}
}
goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee)) goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee))
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral) from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)

View File

@ -12,8 +12,6 @@ import (
func main() { func main() {
err := gen.WriteMapEncodersToFile("./cbor_gen.go", "sealing", err := gen.WriteMapEncodersToFile("./cbor_gen.go", "sealing",
sealing.Piece{}, sealing.Piece{},
sealing.DealInfo{},
sealing.DealSchedule{},
sealing.SectorInfo{}, sealing.SectorInfo{},
sealing.Log{}, sealing.Log{},
) )

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
@ -236,34 +237,34 @@ func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorIn
return nil return nil
} }
func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo) (api.SectorOffset, error) {
log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid)
if (padreader.PaddedSize(uint64(size))) != size { if (padreader.PaddedSize(uint64(size))) != size {
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece") return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece")
} }
sp, err := m.currentSealProof(ctx) sp, err := m.currentSealProof(ctx)
if err != nil { if err != nil {
return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) return api.SectorOffset{}, xerrors.Errorf("getting current seal proof type: %w", err)
} }
ssize, err := sp.SectorSize() ssize, err := sp.SectorSize()
if err != nil { if err != nil {
return 0, 0, err return api.SectorOffset{}, err
} }
if size > abi.PaddedPieceSize(ssize).Unpadded() { if size > abi.PaddedPieceSize(ssize).Unpadded() {
return 0, 0, xerrors.Errorf("piece cannot fit into a sector") return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector")
} }
if _, err := deal.DealProposal.Cid(); err != nil { if _, err := deal.DealProposal.Cid(); err != nil {
return 0, 0, xerrors.Errorf("getting proposal CID: %w", err) return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err)
} }
m.inputLk.Lock() m.inputLk.Lock()
if _, exist := m.pendingPieces[proposalCID(deal)]; exist { if _, exist := m.pendingPieces[proposalCID(deal)]; exist {
m.inputLk.Unlock() m.inputLk.Unlock()
return 0, 0, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal)) return api.SectorOffset{}, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal))
} }
resCh := make(chan struct { resCh := make(chan struct {
@ -295,7 +296,7 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec
res := <-resCh res := <-resCh
return res.sn, res.offset.Padded(), res.err return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err
} }
// called with m.inputLk // called with m.inputLk
@ -454,7 +455,7 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
return m.sectors.Send(uint64(sid), SectorStartPacking{}) return m.sectors.Send(uint64(sid), SectorStartPacking{})
} }
func proposalCID(deal DealInfo) cid.Cid { func proposalCID(deal api.PieceDealInfo) cid.Cid {
pc, err := deal.DealProposal.Cid() pc, err := deal.DealProposal.Cid()
if err != nil { if err != nil {
log.Errorf("DealProposal.Cid error: %+v", err) log.Errorf("DealProposal.Cid error: %+v", err)

View File

@ -88,6 +88,21 @@ func (mr *MockCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
} }
// StateMinerAvailableBalance mocks base method.
func (m *MockCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2)
ret0, _ := ret[0].(big.Int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance.
func (mr *MockCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2)
}
// StateMinerInfo mocks base method. // StateMinerInfo mocks base method.
func (m *MockCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { func (m *MockCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -71,6 +71,21 @@ func (mr *MockPreCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
} }
// StateMinerAvailableBalance mocks base method.
func (m *MockPreCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2)
ret0, _ := ret[0].(big.Int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance.
func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2)
}
// StateMinerInfo mocks base method. // StateMinerInfo mocks base method.
func (m *MockPreCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { func (m *MockPreCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -7,9 +7,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -20,7 +17,9 @@ import (
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/config"
) )
@ -30,6 +29,7 @@ import (
type PreCommitBatcherApi interface { type PreCommitBatcherApi interface {
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
} }
@ -226,6 +226,11 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo
deposit = big.Add(deposit, p.deposit) deposit = big.Add(deposit, p.deposit)
} }
deposit, err := collateralSendAmount(b.mctx, b.api, b.maddr, cfg, deposit)
if err != nil {
return []sealiface.PreCommitBatchRes{res}, err
}
enc := new(bytes.Buffer) enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil { if err := params.MarshalCBOR(enc); err != nil {
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err) return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err)

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
api "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -58,9 +59,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) {
Size: abi.PaddedPieceSize(1024), Size: abi.PaddedPieceSize(1024),
PieceCID: fakePieceCid(t), PieceCID: fakePieceCid(t),
}, },
DealInfo: &sealing.DealInfo{ DealInfo: &api.PieceDealInfo{
DealID: abi.DealID(42), DealID: abi.DealID(42),
DealSchedule: sealing.DealSchedule{ DealSchedule: api.DealSchedule{
StartEpoch: abi.ChainEpoch(70), StartEpoch: abi.ChainEpoch(70),
EndEpoch: abi.ChainEpoch(75), EndEpoch: abi.ChainEpoch(75),
}, },
@ -71,9 +72,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) {
Size: abi.PaddedPieceSize(1024), Size: abi.PaddedPieceSize(1024),
PieceCID: fakePieceCid(t), PieceCID: fakePieceCid(t),
}, },
DealInfo: &sealing.DealInfo{ DealInfo: &api.PieceDealInfo{
DealID: abi.DealID(43), DealID: abi.DealID(43),
DealSchedule: sealing.DealSchedule{ DealSchedule: api.DealSchedule{
StartEpoch: abi.ChainEpoch(80), StartEpoch: abi.ChainEpoch(80),
EndEpoch: abi.ChainEpoch(100), EndEpoch: abi.ChainEpoch(100),
}, },
@ -98,9 +99,9 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) {
Size: abi.PaddedPieceSize(1024), Size: abi.PaddedPieceSize(1024),
PieceCID: fakePieceCid(t), PieceCID: fakePieceCid(t),
}, },
DealInfo: &sealing.DealInfo{ DealInfo: &api.PieceDealInfo{
DealID: abi.DealID(44), DealID: abi.DealID(44),
DealSchedule: sealing.DealSchedule{ DealSchedule: api.DealSchedule{
StartEpoch: abi.ChainEpoch(1), StartEpoch: abi.ChainEpoch(1),
EndEpoch: abi.ChainEpoch(10), EndEpoch: abi.ChainEpoch(10),
}, },
@ -125,9 +126,9 @@ func TestMissingDealIsIgnored(t *testing.T) {
Size: abi.PaddedPieceSize(1024), Size: abi.PaddedPieceSize(1024),
PieceCID: fakePieceCid(t), PieceCID: fakePieceCid(t),
}, },
DealInfo: &sealing.DealInfo{ DealInfo: &api.PieceDealInfo{
DealID: abi.DealID(44), DealID: abi.DealID(44),
DealSchedule: sealing.DealSchedule{ DealSchedule: api.DealSchedule{
StartEpoch: abi.ChainEpoch(1), StartEpoch: abi.ChainEpoch(1),
EndEpoch: abi.ChainEpoch(10), EndEpoch: abi.ChainEpoch(10),
}, },

View File

@ -24,6 +24,10 @@ type Config struct {
FinalizeEarly bool FinalizeEarly bool
CollateralFromMinerBalance bool
AvailableBalanceBuffer abi.TokenAmount
DisableCollateralFallback bool
BatchPreCommits bool BatchPreCommits bool
MaxPreCommitBatch int MaxPreCommitBatch int
PreCommitBatchWait time.Duration PreCommitBatchWait time.Duration

View File

@ -59,6 +59,7 @@ type SealingAPI interface {
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error)
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error)
StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
@ -124,7 +125,7 @@ type openSector struct {
type pendingPiece struct { type pendingPiece struct {
size abi.UnpaddedPieceSize size abi.UnpaddedPieceSize
deal DealInfo deal api.PieceDealInfo
data storage.Data data storage.Data

View File

@ -126,3 +126,22 @@ func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) er
return ctx.Send(SectorRemoved{}) return ctx.Send(SectorRemoved{})
} }
func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error {
// TODO: track sector health / expiration
log.Infof("Proving sector %d", sector.SectorNumber)
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting sealing config: %w", err)
}
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil {
log.Error(err)
}
// TODO: Watch termination
// TODO: Auto-extend if set
return nil
}

View File

@ -357,11 +357,16 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
} }
} }
params, deposit, tok, err := m.preCommitParams(ctx, sector) params, pcd, tok, err := m.preCommitParams(ctx, sector)
if params == nil || err != nil { if params == nil || err != nil {
return err return err
} }
deposit, err := collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, pcd)
if err != nil {
return err
}
enc := new(bytes.Buffer) enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil { if err := params.MarshalCBOR(enc); err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)}) return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)})
@ -389,7 +394,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
} }
return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params}) return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: pcd, PreCommitInfo: *params})
} }
func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error {
@ -628,6 +633,11 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
collateral = big.Zero() collateral = big.Zero()
} }
collateral, err = collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, collateral)
if err != nil {
return err
}
goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee)) goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee))
from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral) from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral)
@ -739,22 +749,3 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn
return ctx.Send(SectorFinalized{}) return ctx.Send(SectorFinalized{})
} }
func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error {
// TODO: track sector health / expiration
log.Infof("Proving sector %d", sector.SectorNumber)
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting sealing config: %w", err)
}
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil {
log.Error(err)
}
// TODO: Watch termination
// TODO: Auto-extend if set
return nil
}

View File

@ -11,39 +11,22 @@ import (
"github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
) )
// Piece is a tuple of piece and deal info // Piece is a tuple of piece and deal info
type PieceWithDealInfo struct { type PieceWithDealInfo struct {
Piece abi.PieceInfo Piece abi.PieceInfo
DealInfo DealInfo DealInfo api.PieceDealInfo
} }
// Piece is a tuple of piece info and optional deal // Piece is a tuple of piece info and optional deal
type Piece struct { type Piece struct {
Piece abi.PieceInfo Piece abi.PieceInfo
DealInfo *DealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) DealInfo *api.PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
}
// DealInfo is a tuple of deal identity and its schedule
type DealInfo struct {
PublishCid *cid.Cid
DealID abi.DealID
DealProposal *market.DealProposal
DealSchedule DealSchedule
KeepUnsealed bool
}
// DealSchedule communicates the time interval of a storage deal. The deal must
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
// is invalid.
type DealSchedule struct {
StartEpoch abi.ChainEpoch
EndEpoch abi.ChainEpoch
} }
type Log struct { type Log struct {

View File

@ -10,6 +10,7 @@ import (
cborutil "github.com/filecoin-project/go-cbor-util" cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
api "github.com/filecoin-project/lotus/api"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing" tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
) )
@ -22,9 +23,9 @@ func TestSectorInfoSerialization(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
dealInfo := DealInfo{ dealInfo := api.PieceDealInfo{
DealID: d, DealID: d,
DealSchedule: DealSchedule{ DealSchedule: api.DealSchedule{
StartEpoch: 0, StartEpoch: 0,
EndEpoch: 100, EndEpoch: 100,
}, },

View File

@ -1,9 +1,16 @@
package sealing package sealing
import ( import (
"context"
"math/bits" "math/bits"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
) )
func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) { func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) {
@ -55,3 +62,30 @@ func (m *Sealing) GetSectorInfo(sid abi.SectorNumber) (SectorInfo, error) {
err := m.sectors.Get(uint64(sid)).Get(&out) err := m.sectors.Get(uint64(sid)).Get(&out)
return out, err return out, err
} }
func collateralSendAmount(ctx context.Context, api interface {
StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error)
}, maddr address.Address, cfg sealiface.Config, collateral abi.TokenAmount) (abi.TokenAmount, error) {
if cfg.CollateralFromMinerBalance {
if cfg.DisableCollateralFallback {
return big.Zero(), nil
}
avail, err := api.StateMinerAvailableBalance(ctx, maddr, nil)
if err != nil {
return big.Zero(), xerrors.Errorf("getting available miner balance: %w", err)
}
avail = big.Sub(avail, cfg.AvailableBalanceBuffer)
if avail.LessThan(big.Zero()) {
avail = big.Zero()
}
collateral = big.Sub(collateral, avail)
if collateral.LessThan(big.Zero()) {
collateral = big.Zero()
}
}
return collateral, nil
}

View File

@ -53,6 +53,8 @@ func main() {
api.SealedRefs{}, api.SealedRefs{},
api.SealTicket{}, api.SealTicket{},
api.SealSeed{}, api.SealSeed{},
api.PieceDealInfo{},
api.DealSchedule{},
) )
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)

3
go.mod
View File

@ -110,7 +110,7 @@ require (
github.com/libp2p/go-libp2p-mplex v0.4.1 github.com/libp2p/go-libp2p-mplex v0.4.1
github.com/libp2p/go-libp2p-noise v0.2.0 github.com/libp2p/go-libp2p-noise v0.2.0
github.com/libp2p/go-libp2p-peerstore v0.2.7 github.com/libp2p/go-libp2p-peerstore v0.2.7
github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb github.com/libp2p/go-libp2p-pubsub v0.5.0
github.com/libp2p/go-libp2p-quic-transport v0.10.0 github.com/libp2p/go-libp2p-quic-transport v0.10.0
github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-record v0.1.3
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3
@ -144,7 +144,6 @@ require (
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
go.etcd.io/bbolt v1.3.4
go.opencensus.io v0.23.0 go.opencensus.io v0.23.0
go.uber.org/dig v1.10.0 // indirect go.uber.org/dig v1.10.0 // indirect
go.uber.org/fx v1.9.0 go.uber.org/fx v1.9.0

6
go.sum
View File

@ -945,6 +945,7 @@ github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB
github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.3/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw=
github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE=
@ -1021,8 +1022,8 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb h1:HExLcdXn8fgtXPciUw97O5NNhBn31dt6d9fVUD4cngo= github.com/libp2p/go-libp2p-pubsub v0.5.0 h1:OzcIuCWyJpOrWH0PTOfvxTzqFur4tiXpY5jXC8OxjyE=
github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= github.com/libp2p/go-libp2p-pubsub v0.5.0/go.mod h1:MKnrsQkFgPcrQs1KVmOXy6Uz2RDQ1xO7dQo/P0Ba+ig=
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M=
github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0=
@ -1054,6 +1055,7 @@ github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYR
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
github.com/libp2p/go-libp2p-swarm v0.4.3/go.mod h1:mmxP1pGBSc1Arw4F5DIjcpjFAmsRzA1KADuMtMuCT4g=
github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E=
github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4=
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=

View File

@ -186,6 +186,7 @@ func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
ens.Miner(&newMiner, full, ens.Miner(&newMiner, full,
kit.OwnerAddr(full.DefaultKey), kit.OwnerAddr(full.DefaultKey),
kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs. kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs.
kit.WithAllSubsystems(),
).Start().InterconnectAll() ).Start().InterconnectAll()
ta, err := newMiner.ActorAddress(ctx) ta, err := newMiner.ActorAddress(ctx)

View File

@ -58,7 +58,7 @@ func TestBatchDealInput(t *testing.T) {
)) ))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blockTime) ens.InterconnectAll().BeginMining(blockTime)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30) err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err) require.NoError(t, err)

View File

@ -61,7 +61,7 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
err = miner.SectorMarkForUpgrade(ctx, sl[0]) err = miner.SectorMarkForUpgrade(ctx, sl[0])
require.NoError(t, err) require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{ deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
Rseed: 6, Rseed: 6,
SuspendUntilCryptoeconStable: true, SuspendUntilCryptoeconStable: true,

View File

@ -76,6 +76,7 @@ func TestDeadlineToggling(t *testing.T) {
minerE kit.TestMiner minerE kit.TestMiner
) )
opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))} opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))}
opts = append(opts, kit.WithAllSubsystems())
ens := kit.NewEnsemble(t, kit.MockProofs()). ens := kit.NewEnsemble(t, kit.MockProofs()).
FullNode(&client, opts...). FullNode(&client, opts...).
Miner(&minerA, &client, opts...). Miner(&minerA, &client, opts...).

View File

@ -13,6 +13,8 @@ import (
datatransfer "github.com/filecoin-project/go-data-transfer" datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules"
@ -20,6 +22,53 @@ import (
"github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo"
) )
// TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner
// architecture where the `mining/sealing/proving` node is a separate process from the `markets` node
func TestDealWithMarketAndMinerNode(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
})
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
startEpoch := abi.ChainEpoch(2 << 12)
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
api.RunningNodeType = api.NodeMiner // TODO(anteva): fix me
client, main, market, _ := kit.EnsembleWithMinerAndMarketNodes(t, kit.ThroughRPC())
dh := kit.NewDealHarness(t, client, main, market)
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
N: n,
FastRetrieval: fastRetrieval,
CarExport: carExport,
StartEpoch: startEpoch,
})
}
// TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175#
cycles := []int{1}
for _, n := range cycles {
n := n
ns := fmt.Sprintf("%d", n)
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) })
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
}
}
func TestDealCyclesConcurrent(t *testing.T) { func TestDealCyclesConcurrent(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode") t.Skip("skipping test in short mode")
@ -43,7 +92,7 @@ func TestDealCyclesConcurrent(t *testing.T) {
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blockTime) ens.InterconnectAll().BeginMining(blockTime)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
N: n, N: n,
@ -91,7 +140,7 @@ func TestSimultanenousTransferLimit(t *testing.T) {
node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle))), node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle))),
)) ))
ens.InterconnectAll().BeginMining(blockTime) ens.InterconnectAll().BeginMining(blockTime)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())

View File

@ -28,7 +28,7 @@ func TestOfflineDealFlow(t *testing.T) {
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime) ens.InterconnectAll().BeginMining(blocktime)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
// Create a random file and import on the client. // Create a random file and import on the client.
res, inFile := client.CreateImportFile(ctx, 1, 0) res, inFile := client.CreateImportFile(ctx, 1, 0)

View File

@ -24,13 +24,13 @@ func TestFirstDealEnablesMining(t *testing.T) {
ens := kit.NewEnsemble(t, kit.MockProofs()) ens := kit.NewEnsemble(t, kit.MockProofs())
ens.FullNode(&client) ens.FullNode(&client)
ens.Miner(&genMiner, &client) ens.Miner(&genMiner, &client, kit.WithAllSubsystems())
ens.Miner(&provider, &client, kit.PresealSectors(0)) ens.Miner(&provider, &client, kit.WithAllSubsystems(), kit.PresealSectors(0))
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond) ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
ctx := context.Background() ctx := context.Background()
dh := kit.NewDealHarness(t, &client, &provider) dh := kit.NewDealHarness(t, &client, &provider, &provider)
ref, _ := client.CreateImportFile(ctx, 5, 0) ref, _ := client.CreateImportFile(ctx, 5, 0)

View File

@ -14,7 +14,7 @@ import (
func TestQuotePriceForUnsealedRetrieval(t *testing.T) { func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
var ( var (
ctx = context.Background() ctx = context.Background()
blocktime = time.Second blocktime = 50 * time.Millisecond
) )
kit.QuietMiningLogs() kit.QuietMiningLogs()
@ -35,7 +35,7 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
err = miner.MarketSetRetrievalAsk(ctx, ask) err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err) require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
@ -123,7 +123,7 @@ func TestZeroPricePerByteRetrieval(t *testing.T) {
err = miner.MarketSetRetrievalAsk(ctx, ask) err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err) require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner) dh := kit.NewDealHarness(t, client, miner, miner)
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
N: 1, N: 1,
StartEpoch: startEpoch, StartEpoch: startEpoch,

Some files were not shown because too many files have changed in this diff Show More