Merge branch 'master' into rate-limit-gateway
This commit is contained in:
commit
e645e8aa7c
2
.github/workflows/testground-on-push.yml
vendored
2
.github/workflows/testground-on-push.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: testground run
|
||||
uses: coryschwartz/testground-github-action@v1.1
|
||||
uses: testground/testground-github-action@v1
|
||||
with:
|
||||
backend_addr: ${{ matrix.backend_addr }}
|
||||
backend_proto: ${{ matrix.backend_proto }}
|
||||
|
4
Makefile
4
Makefile
@ -330,7 +330,7 @@ docsgen-md-storage: docsgen-md-bin
|
||||
docsgen-md-worker: docsgen-md-bin
|
||||
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
|
||||
|
||||
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker
|
||||
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
|
||||
|
||||
docsgen-openrpc-full: docsgen-openrpc-bin
|
||||
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz
|
||||
@ -338,6 +338,8 @@ docsgen-openrpc-storage: docsgen-openrpc-bin
|
||||
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz
|
||||
docsgen-openrpc-worker: docsgen-openrpc-bin
|
||||
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz
|
||||
docsgen-openrpc-gateway: docsgen-openrpc-bin
|
||||
./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" -gzip > build/openrpc/gateway.json.gz
|
||||
|
||||
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
|
||||
|
||||
|
@ -66,4 +66,5 @@ type Gateway interface {
|
||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
|
||||
WalletBalance(context.Context, address.Address) (types.BigInt, error)
|
||||
Version(context.Context) (APIVersion, error)
|
||||
Discover(context.Context) (apitypes.OpenRPCDocument, error)
|
||||
}
|
||||
|
@ -227,6 +227,9 @@ type StorageMiner interface {
|
||||
// DagstoreGC runs garbage collection on the DAG store.
|
||||
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
|
||||
|
||||
// DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID
|
||||
DagstoreRegisterShard(ctx context.Context, key string) error //perm:admin
|
||||
|
||||
// IndexerAnnounceDeal informs indexer nodes that a new deal was received,
|
||||
// so they can download its index
|
||||
IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin
|
||||
|
294
api/cbor_gen.go
294
api/cbor_gen.go
@ -26,25 +26,26 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{163}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{163}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Channel (address.Address) (struct)
|
||||
if len("Channel") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Channel\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Channel"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Channel"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Channel")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Channel.MarshalCBOR(w); err != nil {
|
||||
if err := t.Channel.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -53,14 +54,14 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteCidBuf(scratch, w, t.WaitSentinel); err != nil {
|
||||
if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
|
||||
@ -69,7 +70,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Vouchers\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Vouchers"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Vouchers"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Vouchers")); err != nil {
|
||||
@ -80,27 +81,32 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Vouchers was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Vouchers))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Vouchers {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = PaymentInfo{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -115,7 +121,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -129,7 +135,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Channel.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Channel.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Channel: %w", err)
|
||||
}
|
||||
|
||||
@ -139,7 +145,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
@ -150,7 +156,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Vouchers ([]*paych.SignedVoucher) (slice)
|
||||
case "Vouchers":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -170,7 +176,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v paych.SignedVoucher
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -190,25 +196,26 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{163}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{163}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.SectorID (abi.SectorNumber) (uint64)
|
||||
if len("SectorID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"SectorID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SectorID"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("SectorID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -217,14 +224,14 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Offset\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Offset"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Offset"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Offset")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Offset)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -233,30 +240,35 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Size\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Size"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Size")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = SealedRef{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -271,7 +283,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -285,7 +297,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -300,7 +312,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -315,7 +327,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -339,18 +351,19 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{161}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{161}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Refs ([]api.SealedRef) (slice)
|
||||
if len("Refs") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Refs\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Refs"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Refs"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Refs")); err != nil {
|
||||
@ -361,27 +374,32 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Refs was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Refs))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Refs))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Refs {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = SealedRefs{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -396,7 +414,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -408,7 +426,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Refs ([]api.SealedRef) (slice)
|
||||
case "Refs":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -428,7 +446,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v SealedRef
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -448,18 +466,19 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Value (abi.SealRandomness) (slice)
|
||||
if len("Value") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Value\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Value")); err != nil {
|
||||
@ -470,11 +489,11 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Byte array in field t.Value was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Value))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(t.Value[:]); err != nil {
|
||||
if _, err := cw.Write(t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -483,7 +502,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
@ -491,27 +510,32 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = SealTicket{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -526,7 +550,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -538,7 +562,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Value (abi.SealRandomness) (slice)
|
||||
case "Value":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -554,13 +578,13 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
|
||||
t.Value = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(br, t.Value[:]); err != nil {
|
||||
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -597,18 +621,19 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Value (abi.InteractiveSealRandomness) (slice)
|
||||
if len("Value") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Value\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Value")); err != nil {
|
||||
@ -619,11 +644,11 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Byte array in field t.Value was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Value))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(t.Value[:]); err != nil {
|
||||
if _, err := cw.Write(t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -632,7 +657,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
@ -640,27 +665,32 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = SealSeed{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -675,7 +705,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -687,7 +717,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Value (abi.InteractiveSealRandomness) (slice)
|
||||
case "Value":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -703,13 +733,13 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
|
||||
t.Value = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(br, t.Value[:]); err != nil {
|
||||
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -746,18 +776,19 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{165}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{165}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
if len("PublishCid") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("PublishCid")); err != nil {
|
||||
@ -765,11 +796,11 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
if t.PublishCid == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
if _, err := cw.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
|
||||
if err := cbg.WriteCid(cw, *t.PublishCid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
|
||||
}
|
||||
}
|
||||
@ -779,14 +810,14 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"DealID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -795,14 +826,14 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealProposal")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.DealProposal.MarshalCBOR(w); err != nil {
|
||||
if err := t.DealProposal.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -811,14 +842,14 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealSchedule")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.DealSchedule.MarshalCBOR(w); err != nil {
|
||||
if err := t.DealSchedule.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -827,7 +858,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil {
|
||||
@ -840,16 +871,21 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = PieceDealInfo{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -864,7 +900,7 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -878,16 +914,16 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
|
||||
}
|
||||
@ -901,7 +937,7 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -916,16 +952,16 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.DealProposal = new(market.DealProposal)
|
||||
if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.DealProposal.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
|
||||
}
|
||||
}
|
||||
@ -936,7 +972,7 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
|
||||
}
|
||||
|
||||
@ -944,7 +980,7 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.KeepUnsealed (bool) (bool)
|
||||
case "KeepUnsealed":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -973,18 +1009,19 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
if len("StartEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
|
||||
@ -992,11 +1029,11 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
if t.StartEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1006,7 +1043,7 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("EndEpoch")); err != nil {
|
||||
@ -1014,27 +1051,32 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
if t.EndEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = DealSchedule{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -1049,7 +1091,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1061,7 +1103,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
case "StartEpoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1087,7 +1129,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
case "EndEpoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -350,6 +350,10 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
|
||||
i = &api.WorkerStruct{}
|
||||
t = reflect.TypeOf(new(struct{ api.Worker })).Elem()
|
||||
permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal))
|
||||
case "Gateway":
|
||||
i = &api.GatewayStruct{}
|
||||
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
|
||||
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
|
||||
default:
|
||||
panic("unknown type")
|
||||
}
|
||||
|
@ -512,6 +512,8 @@ type GatewayStruct struct {
|
||||
|
||||
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
|
||||
|
||||
Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) ``
|
||||
|
||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
||||
|
||||
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
|
||||
@ -663,6 +665,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
DagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"write"`
|
||||
|
||||
DagstoreRegisterShard func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsConsiderOfflineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
@ -3297,6 +3301,17 @@ func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, erro
|
||||
return *new([]byte), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) {
|
||||
if s.Internal.Discover == nil {
|
||||
return *new(apitypes.OpenRPCDocument), ErrNotSupported
|
||||
}
|
||||
return s.Internal.Discover(p0)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) {
|
||||
return *new(apitypes.OpenRPCDocument), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
|
||||
if s.Internal.GasEstimateMessageGas == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -3990,6 +4005,17 @@ func (s *StorageMinerStub) DagstoreRecoverShard(p0 context.Context, p1 string) e
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreRegisterShard(p0 context.Context, p1 string) error {
|
||||
if s.Internal.DagstoreRegisterShard == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreRegisterShard(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreRegisterShard(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderOfflineRetrievalDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
|
Binary file not shown.
Binary file not shown.
@ -52,3 +52,11 @@ func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
|
||||
}
|
||||
return mustReadGzippedOpenRPCDocument(data)
|
||||
}
|
||||
|
||||
func OpenRPCDiscoverJSON_Gateway() apitypes.OpenRPCDocument {
|
||||
data, err := openrpcfs.ReadFile("openrpc/gateway.json.gz")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return mustReadGzippedOpenRPCDocument(data)
|
||||
}
|
||||
|
Binary file not shown.
BIN
build/openrpc/gateway.json.gz
Normal file
BIN
build/openrpc/gateway.json.gz
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -16,6 +16,7 @@ func TestOpenRPCDiscoverJSON_Version(t *testing.T) {
|
||||
OpenRPCDiscoverJSON_Full,
|
||||
OpenRPCDiscoverJSON_Miner,
|
||||
OpenRPCDiscoverJSON_Worker,
|
||||
OpenRPCDiscoverJSON_Gateway,
|
||||
} {
|
||||
doc := docFn()
|
||||
if got, ok := doc["openrpc"]; !ok || got != openRPCDocVersion {
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.15.3-dev"
|
||||
const BuildVersion = "1.15.4-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
}
|
||||
|
||||
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
|
||||
pl := vm.PricelistByEpochAndNetworkVersion(b.Header.Height, nv)
|
||||
pl := vm.PricelistByEpoch(b.Header.Height)
|
||||
var sumGasLimit int64
|
||||
checkMsg := func(msg types.ChainMsg) error {
|
||||
m := msg.VMMessage()
|
||||
|
@ -26,51 +26,57 @@ func (t *Request) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufRequest); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufRequest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Head ([]cid.Cid) (slice)
|
||||
if len(t.Head) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Head was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Head))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Head))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Head {
|
||||
if err := cbg.WriteCidBuf(scratch, w, v); err != nil {
|
||||
if err := cbg.WriteCid(w, v); err != nil {
|
||||
return xerrors.Errorf("failed writing cid field t.Head: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.Length (uint64) (uint64)
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Length)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Length)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Options (uint64) (uint64)
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Options)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Options)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Request) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *Request) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = Request{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -81,7 +87,7 @@ func (t *Request) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Head ([]cid.Cid) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -100,7 +106,7 @@ func (t *Request) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading cid field t.Head failed: %w", err)
|
||||
}
|
||||
@ -111,7 +117,7 @@ func (t *Request) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -125,7 +131,7 @@ func (t *Request) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -145,15 +151,16 @@ func (t *Response) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufResponse); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Status (exchange.status) (uint64)
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -162,7 +169,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.ErrorMessage was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ErrorMessage))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ErrorMessage))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.ErrorMessage)); err != nil {
|
||||
@ -174,27 +181,32 @@ func (t *Response) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Chain was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Chain))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Chain))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Chain {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Response) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *Response) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = Response{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -207,7 +219,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -220,7 +232,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.ErrorMessage (string) (string)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -229,7 +241,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.Chain ([]*exchange.BSTipSet) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -249,7 +261,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v BSTipSet
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -266,22 +278,23 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufCompactedMessages); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufCompactedMessages); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Bls ([]*types.Message) (slice)
|
||||
if len(t.Bls) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Bls was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Bls))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Bls))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Bls {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -291,7 +304,7 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.BlsIncludes was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BlsIncludes))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.BlsIncludes))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.BlsIncludes {
|
||||
@ -299,11 +312,11 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field v was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(v))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range v {
|
||||
if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
|
||||
if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -314,11 +327,11 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Secpk was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Secpk))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Secpk))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Secpk {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -328,7 +341,7 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.SecpkIncludes was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.SecpkIncludes))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.SecpkIncludes))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.SecpkIncludes {
|
||||
@ -336,11 +349,11 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field v was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(v))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range v {
|
||||
if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil {
|
||||
if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -348,16 +361,21 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = CompactedMessages{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -368,7 +386,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Bls ([]*types.Message) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -388,7 +406,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v types.Message
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -397,7 +415,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.BlsIncludes ([][]uint64) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -420,7 +438,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
var extra uint64
|
||||
var err error
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -439,7 +457,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
for j := 0; j < int(extra); j++ {
|
||||
|
||||
maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, val, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read uint64 for t.BlsIncludes[i] slice: %w", err)
|
||||
}
|
||||
@ -456,7 +474,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Secpk ([]*types.SignedMessage) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -476,7 +494,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v types.SignedMessage
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -485,7 +503,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.SecpkIncludes ([][]uint64) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -508,7 +526,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
var extra uint64
|
||||
var err error
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -527,7 +545,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
for j := 0; j < int(extra); j++ {
|
||||
|
||||
maj, val, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, val, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read uint64 for t.SecpkIncludes[i] slice: %w", err)
|
||||
}
|
||||
@ -552,43 +570,49 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufBSTipSet); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufBSTipSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Blocks ([]*types.BlockHeader) (slice)
|
||||
if len(t.Blocks) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Blocks was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Blocks))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Blocks))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Blocks {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.Messages (exchange.CompactedMessages) (struct)
|
||||
if err := t.Messages.MarshalCBOR(w); err != nil {
|
||||
if err := t.Messages.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = BSTipSet{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -599,7 +623,7 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Blocks ([]*types.BlockHeader) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -619,7 +643,7 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v types.BlockHeader
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -630,16 +654,16 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.Messages = new(CompactedMessages)
|
||||
if err := t.Messages.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Messages.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Messages pointer: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -25,30 +25,31 @@ func (t *FundedAddressState) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufFundedAddressState); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufFundedAddressState); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Addr (address.Address) (struct)
|
||||
if err := t.Addr.MarshalCBOR(w); err != nil {
|
||||
if err := t.Addr.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.AmtReserved (big.Int) (struct)
|
||||
if err := t.AmtReserved.MarshalCBOR(w); err != nil {
|
||||
if err := t.AmtReserved.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.MsgCid (cid.Cid) (struct)
|
||||
|
||||
if t.MsgCid == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
if _, err := cw.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCidBuf(scratch, w, *t.MsgCid); err != nil {
|
||||
if err := cbg.WriteCid(cw, *t.MsgCid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err)
|
||||
}
|
||||
}
|
||||
@ -56,16 +57,21 @@ func (t *FundedAddressState) MarshalCBOR(w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = FundedAddressState{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -78,7 +84,7 @@ func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Addr.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Addr.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Addr: %w", err)
|
||||
}
|
||||
|
||||
@ -87,7 +93,7 @@ func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.AmtReserved.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.AmtReserved.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.AmtReserved: %w", err)
|
||||
}
|
||||
|
||||
@ -96,16 +102,16 @@ func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err)
|
||||
}
|
||||
|
@ -281,11 +281,12 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
|
||||
// gas checks
|
||||
|
||||
// 4. Min Gas
|
||||
minGas := vm.PricelistByEpochAndNetworkVersion(epoch, nv).OnChainMessage(m.ChainLength())
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
|
||||
check = api.MessageCheckStatus{
|
||||
Cid: m.Cid(),
|
||||
CheckStatus: api.CheckStatus{Code: api.CheckStatusMessageMinGas,
|
||||
CheckStatus: api.CheckStatus{
|
||||
Code: api.CheckStatusMessageMinGas,
|
||||
Hint: map[string]interface{}{
|
||||
"minGas": minGas,
|
||||
},
|
||||
|
@ -629,11 +629,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
|
||||
// a (soft) validation error.
|
||||
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
|
||||
epoch := curTs.Height() + 1
|
||||
nv, err := mp.getNtwkVersion(epoch)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
minGas := vm.PricelistByEpochAndNetworkVersion(epoch, nv).OnChainMessage(m.ChainLength())
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
|
||||
return false, xerrors.Errorf("message will not be included in a block: %w", err)
|
||||
|
@ -781,7 +781,6 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
||||
// cannot exceed the block limit; drop all messages that exceed the limit
|
||||
// - the total gasReward cannot exceed the actor's balance; drop all messages that exceed
|
||||
// the balance
|
||||
|
||||
a, err := mp.api.GetActorAfter(actor, ts)
|
||||
if err != nil {
|
||||
log.Errorf("failed to load actor state, not building chain for %s: %v", actor, err)
|
||||
@ -794,12 +793,6 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
||||
skip := 0
|
||||
i := 0
|
||||
rewards := make([]*big.Int, 0, len(msgs))
|
||||
|
||||
nv, err := mp.getNtwkVersion(ts.Height())
|
||||
if err != nil {
|
||||
log.Errorf("getting network version: %v", err)
|
||||
return nil
|
||||
}
|
||||
for i = 0; i < len(msgs); i++ {
|
||||
m := msgs[i]
|
||||
|
||||
@ -815,7 +808,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
||||
}
|
||||
curNonce++
|
||||
|
||||
minGas := vm.PricelistByEpochAndNetworkVersion(ts.Height(), nv).OnChainMessage(m.ChainLength()).Total()
|
||||
minGas := vm.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total()
|
||||
if m.Message.GasLimit < minGas {
|
||||
break
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipld/go-car"
|
||||
carutil "github.com/ipld/go-car/util"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -142,7 +143,18 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
|
||||
|
||||
for _, c := range out {
|
||||
if seen.Visit(c) {
|
||||
if c.Prefix().Codec != cid.DagCBOR {
|
||||
prefix := c.Prefix()
|
||||
|
||||
// Don't include identity CIDs.
|
||||
if prefix.MhType == mh.IDENTITY {
|
||||
continue
|
||||
}
|
||||
|
||||
// We only include raw and dagcbor, for now.
|
||||
// Raw for "code" CIDs.
|
||||
switch prefix.Codec {
|
||||
case cid.Raw, cid.DagCBOR:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -26,19 +26,20 @@ func (t *FvmExecutionTrace) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufFvmExecutionTrace); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufFvmExecutionTrace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Msg (types.Message) (struct)
|
||||
if err := t.Msg.MarshalCBOR(w); err != nil {
|
||||
if err := t.Msg.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.MsgRct (types.MessageReceipt) (struct)
|
||||
if err := t.MsgRct.MarshalCBOR(w); err != nil {
|
||||
if err := t.MsgRct.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -47,7 +48,7 @@ func (t *FvmExecutionTrace) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.Error was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Error))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Error))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Error)); err != nil {
|
||||
@ -59,27 +60,32 @@ func (t *FvmExecutionTrace) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Subcalls was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Subcalls))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Subcalls))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Subcalls {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = FvmExecutionTrace{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -92,16 +98,16 @@ func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.Msg = new(types.Message)
|
||||
if err := t.Msg.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Msg.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Msg pointer: %w", err)
|
||||
}
|
||||
}
|
||||
@ -111,16 +117,16 @@ func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.MsgRct = new(types.MessageReceipt)
|
||||
if err := t.MsgRct.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.MsgRct.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.MsgRct pointer: %w", err)
|
||||
}
|
||||
}
|
||||
@ -129,7 +135,7 @@ func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Error (string) (string)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -138,7 +144,7 @@ func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.Subcalls ([]vm.FvmExecutionTrace) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -158,7 +164,7 @@ func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v FvmExecutionTrace
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,6 @@ type FvmExtern struct {
|
||||
Rand
|
||||
blockstore.Blockstore
|
||||
epoch abi.ChainEpoch
|
||||
nv network.Version
|
||||
lbState LookbackStateGetter
|
||||
base cid.Cid
|
||||
}
|
||||
@ -215,7 +214,7 @@ func (x *FvmExtern) workerKeyAtLookback(ctx context.Context, minerId address.Add
|
||||
}
|
||||
|
||||
cstWithoutGas := cbor.NewCborStore(x.Blockstore)
|
||||
cbb := &gasChargingBlocks{gasAdder, PricelistByEpochAndNetworkVersion(x.epoch, x.nv), x.Blockstore}
|
||||
cbb := &gasChargingBlocks{gasAdder, PricelistByEpoch(x.epoch), x.Blockstore}
|
||||
cstWithGas := cbor.NewCborStore(cbb)
|
||||
|
||||
lbState, err := x.lbState(ctx, height)
|
||||
@ -275,7 +274,7 @@ func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
|
||||
|
||||
fvmOpts := ffi.FVMOpts{
|
||||
FVMVersion: 0,
|
||||
Externs: &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch, nv: opts.NetworkVersion},
|
||||
Externs: &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch},
|
||||
Epoch: opts.Epoch,
|
||||
BaseFee: opts.BaseFee,
|
||||
BaseCircSupply: circToReport,
|
||||
@ -285,6 +284,7 @@ func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
|
||||
}
|
||||
|
||||
fvm, err := ffi.CreateFVM(&fvmOpts)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
254
chain/vm/gas.go
254
chain/vm/gas.go
@ -3,8 +3,6 @@ package vm
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime"
|
||||
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||
|
||||
@ -84,153 +82,145 @@ type Pricelist interface {
|
||||
OnVerifyConsensusFault() GasCharge
|
||||
}
|
||||
|
||||
var priceListGenesis = pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1000,
|
||||
// Prices are the price lists per starting epoch. Public for testing purposes
|
||||
// (concretely to allow the test vector runner to rebase prices).
|
||||
var Prices = map[abi.ChainEpoch]Pricelist{
|
||||
abi.ChainEpoch(0): &pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1000,
|
||||
|
||||
onChainMessageComputeBase: 38863,
|
||||
onChainMessageStorageBase: 36,
|
||||
onChainMessageStoragePerByte: 1,
|
||||
onChainMessageComputeBase: 38863,
|
||||
onChainMessageStorageBase: 36,
|
||||
onChainMessageStoragePerByte: 1,
|
||||
|
||||
onChainReturnValuePerByte: 1,
|
||||
onChainReturnValuePerByte: 1,
|
||||
|
||||
sendBase: 29233,
|
||||
sendTransferFunds: 27500,
|
||||
sendTransferOnlyPremium: 159672,
|
||||
sendInvokeMethod: -5377,
|
||||
sendBase: 29233,
|
||||
sendTransferFunds: 27500,
|
||||
sendTransferOnlyPremium: 159672,
|
||||
sendInvokeMethod: -5377,
|
||||
|
||||
ipldGetBase: 75242,
|
||||
ipldPutBase: 84070,
|
||||
ipldPutPerByte: 1,
|
||||
ipldGetBase: 75242,
|
||||
ipldPutBase: 84070,
|
||||
ipldPutPerByte: 1,
|
||||
|
||||
createActorCompute: 1108454,
|
||||
createActorStorage: 36 + 40,
|
||||
deleteActor: -(36 + 40), // -createActorStorage
|
||||
createActorCompute: 1108454,
|
||||
createActorStorage: 36 + 40,
|
||||
deleteActor: -(36 + 40), // -createActorStorage
|
||||
|
||||
verifySignature: map[crypto.SigType]int64{
|
||||
crypto.SigTypeBLS: 16598605,
|
||||
crypto.SigTypeSecp256k1: 1637292,
|
||||
verifySignature: map[crypto.SigType]int64{
|
||||
crypto.SigTypeBLS: 16598605,
|
||||
crypto.SigTypeSecp256k1: 1637292,
|
||||
},
|
||||
|
||||
hashingBase: 31355,
|
||||
computeUnsealedSectorCidBase: 98647,
|
||||
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
|
||||
verifyAggregateSealBase: 0,
|
||||
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
|
||||
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
|
||||
flat: 123861062,
|
||||
scale: 9226981,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
|
||||
flat: 748593537,
|
||||
scale: 85639,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
|
||||
flat: 748593537,
|
||||
scale: 85639,
|
||||
},
|
||||
},
|
||||
verifyPostDiscount: true,
|
||||
verifyConsensusFault: 495422,
|
||||
},
|
||||
abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1300,
|
||||
|
||||
hashingBase: 31355,
|
||||
computeUnsealedSectorCidBase: 98647,
|
||||
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
|
||||
verifyAggregateSealBase: 0,
|
||||
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
|
||||
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
|
||||
flat: 123861062,
|
||||
scale: 9226981,
|
||||
onChainMessageComputeBase: 38863,
|
||||
onChainMessageStorageBase: 36,
|
||||
onChainMessageStoragePerByte: 1,
|
||||
|
||||
onChainReturnValuePerByte: 1,
|
||||
|
||||
sendBase: 29233,
|
||||
sendTransferFunds: 27500,
|
||||
sendTransferOnlyPremium: 159672,
|
||||
sendInvokeMethod: -5377,
|
||||
|
||||
ipldGetBase: 114617,
|
||||
ipldPutBase: 353640,
|
||||
ipldPutPerByte: 1,
|
||||
|
||||
createActorCompute: 1108454,
|
||||
createActorStorage: 36 + 40,
|
||||
deleteActor: -(36 + 40), // -createActorStorage
|
||||
|
||||
verifySignature: map[crypto.SigType]int64{
|
||||
crypto.SigTypeBLS: 16598605,
|
||||
crypto.SigTypeSecp256k1: 1637292,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
|
||||
flat: 748593537,
|
||||
scale: 85639,
|
||||
|
||||
hashingBase: 31355,
|
||||
computeUnsealedSectorCidBase: 98647,
|
||||
verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used
|
||||
|
||||
verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
|
||||
flat: 748593537,
|
||||
scale: 85639,
|
||||
verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {
|
||||
{4, 103994170},
|
||||
{7, 112356810},
|
||||
{13, 122912610},
|
||||
{26, 137559930},
|
||||
{52, 162039100},
|
||||
{103, 210960780},
|
||||
{205, 318351180},
|
||||
{410, 528274980},
|
||||
},
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {
|
||||
{4, 102581240},
|
||||
{7, 110803030},
|
||||
{13, 120803700},
|
||||
{26, 134642130},
|
||||
{52, 157357890},
|
||||
{103, 203017690},
|
||||
{205, 304253590},
|
||||
{410, 509880640},
|
||||
},
|
||||
},
|
||||
|
||||
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
|
||||
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
},
|
||||
verifyPostDiscount: false,
|
||||
verifyConsensusFault: 495422,
|
||||
|
||||
verifyReplicaUpdate: 36316136,
|
||||
},
|
||||
verifyPostDiscount: true,
|
||||
verifyConsensusFault: 495422,
|
||||
}
|
||||
|
||||
var priceListCalico = pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1300,
|
||||
|
||||
onChainMessageComputeBase: 38863,
|
||||
onChainMessageStorageBase: 36,
|
||||
onChainMessageStoragePerByte: 1,
|
||||
|
||||
onChainReturnValuePerByte: 1,
|
||||
|
||||
sendBase: 29233,
|
||||
sendTransferFunds: 27500,
|
||||
sendTransferOnlyPremium: 159672,
|
||||
sendInvokeMethod: -5377,
|
||||
|
||||
ipldGetBase: 114617,
|
||||
ipldPutBase: 353640,
|
||||
ipldPutPerByte: 1,
|
||||
|
||||
createActorCompute: 1108454,
|
||||
createActorStorage: 36 + 40,
|
||||
deleteActor: -(36 + 40), // -createActorStorage
|
||||
|
||||
verifySignature: map[crypto.SigType]int64{
|
||||
crypto.SigTypeBLS: 16598605,
|
||||
crypto.SigTypeSecp256k1: 1637292,
|
||||
},
|
||||
|
||||
hashingBase: 31355,
|
||||
computeUnsealedSectorCidBase: 98647,
|
||||
verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used
|
||||
|
||||
verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272,
|
||||
},
|
||||
verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {
|
||||
{4, 103994170},
|
||||
{7, 112356810},
|
||||
{13, 122912610},
|
||||
{26, 137559930},
|
||||
{52, 162039100},
|
||||
{103, 210960780},
|
||||
{205, 318351180},
|
||||
{410, 528274980},
|
||||
},
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {
|
||||
{4, 102581240},
|
||||
{7, 110803030},
|
||||
{13, 120803700},
|
||||
{26, 134642130},
|
||||
{52, 157357890},
|
||||
{103, 203017690},
|
||||
{205, 304253590},
|
||||
{410, 509880640},
|
||||
},
|
||||
},
|
||||
|
||||
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
|
||||
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
},
|
||||
verifyPostDiscount: false,
|
||||
verifyConsensusFault: 495422,
|
||||
|
||||
verifyReplicaUpdate: 36316136,
|
||||
}
|
||||
|
||||
// Prices are the price lists per starting epoch.
|
||||
// For network v8 and onwards, this is disregarded; the pricelist is selected by network version.
|
||||
var pricesByEpoch = map[abi.ChainEpoch]Pricelist{
|
||||
abi.ChainEpoch(0): &priceListGenesis,
|
||||
abi.ChainEpoch(build.UpgradeCalicoHeight): &priceListCalico,
|
||||
}
|
||||
|
||||
// PricelistByEpochAndNetworkVersion finds the latest prices for the given epoch
|
||||
func PricelistByEpochAndNetworkVersion(epoch abi.ChainEpoch, nv network.Version) Pricelist {
|
||||
if nv >= network.Version8 {
|
||||
return &priceListCalico
|
||||
}
|
||||
|
||||
// PricelistByEpoch finds the latest prices for the given epoch
|
||||
func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
|
||||
// since we are storing the prices as map or epoch to price
|
||||
// we need to get the price with the highest epoch that is lower or equal to the `epoch` arg
|
||||
bestEpoch := abi.ChainEpoch(0)
|
||||
bestPrice := pricesByEpoch[bestEpoch]
|
||||
for e, pl := range pricesByEpoch {
|
||||
bestPrice := Prices[bestEpoch]
|
||||
for e, pl := range Prices {
|
||||
// if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch`
|
||||
if e > bestEpoch && e <= epoch {
|
||||
bestEpoch = e
|
||||
|
@ -51,7 +51,7 @@ var EmptyObjectCid cid.Cid
|
||||
|
||||
// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
|
||||
func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) {
|
||||
if err := rt.chargeGasSafe(PricelistByEpochAndNetworkVersion(rt.height, rt.NetworkVersion()).OnCreateActor()); err != nil {
|
||||
if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil {
|
||||
return nil, address.Undef, err
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent
|
||||
gasAvailable: msg.GasLimit,
|
||||
depth: 0,
|
||||
numActorsCreated: 0,
|
||||
pricelist: PricelistByEpochAndNetworkVersion(vm.blockHeight, vm.networkVersion),
|
||||
pricelist: PricelistByEpoch(vm.blockHeight),
|
||||
allowInternal: true,
|
||||
callerValidated: false,
|
||||
executionTrace: types.ExecutionTrace{Msg: msg},
|
||||
@ -431,7 +431,7 @@ func (vm *LegacyVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*App
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pl := PricelistByEpochAndNetworkVersion(vm.blockHeight, vm.networkVersion)
|
||||
pl := PricelistByEpoch(vm.blockHeight)
|
||||
|
||||
msgGas := pl.OnChainMessage(cmsg.ChainLength())
|
||||
msgGasCost := msgGas.Total()
|
||||
|
@ -109,6 +109,7 @@ func main() {
|
||||
Commands: []*cli.Command{
|
||||
proveCmd,
|
||||
sealBenchCmd,
|
||||
simpleCmd,
|
||||
importBenchCmd,
|
||||
},
|
||||
}
|
||||
|
990
cmd/lotus-bench/simple.go
Normal file
990
cmd/lotus-bench/simple.go
Normal file
@ -0,0 +1,990 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
prf "github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
var simpleCmd = &cli.Command{
|
||||
Name: "simple",
|
||||
Usage: "Run basic sector operations",
|
||||
Description: `Example sealing steps:
|
||||
|
||||
> Create unsealed sector file
|
||||
|
||||
$ ./lotus-bench simple addpiece --sector-size 2K /dev/zero /tmp/unsealed
|
||||
AddPiece 25.23225ms (79.26 KiB/s)
|
||||
baga6ea4seaqpy7usqklokfx2vxuynmupslkeutzexe2uqurdg5vhtebhxqmpqmy 2048
|
||||
|
||||
> Run PreCommit1
|
||||
|
||||
$ ./lotus-bench simple precommit1 --sector-size 2k /tmp/unsealed /tmp/sealed /tmp/cache baga6ea4seaqpy7usqklokfx2vxuynmupslkeutzexe2uqurdg5vhtebhxqmpqmy 2048
|
||||
PreCommit1 30.151666ms (66.33 KiB/s)
|
||||
eyJfbG90dXNfU2VhbFJhbmRvbW5lc3MiOiJBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB[...]==
|
||||
|
||||
> Run PreCommit2
|
||||
|
||||
$ ./lotus-bench simple precommit2 --sector-size 2k /tmp/sealed /tmp/cache eyJfbG90dXNfU2VhbFJhbmRvbW5lc3MiOiJBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB[...]==
|
||||
PreCommit2 75.320167ms (26.55 KiB/s)
|
||||
d:baga6ea4seaqpy7usqklokfx2vxuynmupslkeutzexe2uqurdg5vhtebhxqmpqmy r:bagboea4b5abcbrshxgmmpaucffwp2elaofbcrvb7hmcu3653o4lsw2arlor4hn3c
|
||||
|
||||
> Run Commit1
|
||||
|
||||
$ ./lotus-bench simple commit1 --sector-size 2k /tmp/sl /tmp/cac baga6ea4seaqpy7usqklokfx2vxuynmupslkeutzexe2uqurdg5vhtebhxqmpqmy bagboea4b5abcbrshxgmmpaucffwp2elaofbcrvb7hmcu3653o4lsw2arlor4hn3c /tmp/c1.json
|
||||
Commit1 20.691875ms (96.66 KiB/s)
|
||||
|
||||
> Run Commit2
|
||||
|
||||
$ ./lotus-bench simple commit2 /tmp/c1.json
|
||||
[...]
|
||||
Commit2: 13.829147792s (148 B/s)
|
||||
proof: 8b624a6a4b272a6196517f858d07205c586cfae77fc026e8e9340acefbb8fc1d5af25b33724756c0a4481a800e14ff1ea914c3ce20bf6e2932296ad8ffa32867989ceae62e50af1479ca56a1ea5228cc8acf5ca54bc0b8e452bf74194b758b2c12ece76599a8b93f6b3dd9f0b1bb2e023bf311e9a404c7d453aeddf284e46025b63b631610de6ff6621bc6f630a14dd3ad59edbe6e940fdebbca3d97bea2708fd21764ea929f4699ebc93d818037a74be3363bdb2e8cc29b3e386c6376ff98fa
|
||||
|
||||
----
|
||||
Example PoSt steps:
|
||||
|
||||
> Try window-post
|
||||
|
||||
$ ./lotus-bench simple window-post --sector-size 2k /tmp/sealed /tmp/cache bagboea4b5abcbrshxgmmpaucffwp2elaofbcrvb7hmcu3653o4lsw2arlor4hn3c 1
|
||||
Vanilla 14.192625ms (140.9 KiB/s)
|
||||
Proof 387.819333ms (5.156 KiB/s)
|
||||
mI6TdveK9wMqHwVsRlVa90q44yGEIsNqLpTQLB...
|
||||
|
||||
> Try winning-post
|
||||
|
||||
$ ./lotus-bench simple winning-post --sector-size 2k /tmp/sealed /tmp/cache bagboea4b5abcbrshxgmmpaucffwp2elaofbcrvb7hmcu3653o4lsw2arlor4hn3c 1
|
||||
Vanilla 19.266625ms (103.8 KiB/s)
|
||||
Proof 1.234634708s (1.619 KiB/s)
|
||||
o4VBUf2wBHuvmm58XY8wgCC/1xBqfujlgmNs...
|
||||
|
||||
----
|
||||
Example SnapDeals steps:
|
||||
|
||||
> Create unsealed update file
|
||||
|
||||
$ ./lotus-bench simple addpiece --sector-size 2K /dev/random /tmp/new-unsealed
|
||||
AddPiece 23.845958ms (83.87 KiB/s)
|
||||
baga6ea4seaqkt24j5gbf2ye2wual5gn7a5yl2tqb52v2sk4nvur4bdy7lg76cdy 2048
|
||||
|
||||
> Create updated sealed file
|
||||
|
||||
$ ./lotus-bench simple replicaupdate --sector-size 2K /tmp/sealed /tmp/cache /tmp/new-unsealed /tmp/update /tmp/update-cache baga6ea4seaqkt24j5gbf2ye2wual5gn7a5yl2tqb52v2sk4nvur4bdy7lg76cdy 2048
|
||||
ReplicaUpdate 63.0815ms (31.7 KiB/s)
|
||||
d:baga6ea4seaqkt24j5gbf2ye2wual5gn7a5yl2tqb52v2sk4nvur4bdy7lg76cdy r:bagboea4b5abcaydcwlbtdx5dph2a3efpqt42emxpn3be76iu4e4lx3ltrpmpi7af
|
||||
|
||||
> Run ProveReplicaUpdate1
|
||||
|
||||
$ ./lotus-bench simple provereplicaupdate1 --sector-size 2K /tmp/sl /tmp/cac /tmp/update /tmp/update-cache bagboea4b5abcbrshxgmmpaucffwp2elaofbcrvb7hmcu3653o4lsw2arlor4hn3c bagboea4b5abcaydcwlbtdx5dph2a3efpqt42emxpn3be76iu4e4lx3ltrpmpi7af baga6ea4seaqkt24j5gbf2ye2wual5gn7a5yl2tqb52v2sk4nvur4bdy7lg76cdy /tmp/pr1.json
|
||||
ProveReplicaUpdate1 18.373375ms (108.9 KiB/s)
|
||||
|
||||
> Run ProveReplicaUpdate2
|
||||
|
||||
$ ./lotus-bench simple provereplicaupdate2 --sector-size 2K bagboea4b5abcbrshxgmmpaucffwp2elaofbcrvb7hmcu3653o4lsw2arlor4hn3c bagboea4b5abcaydcwlbtdx5dph2a3efpqt42emxpn3be76iu4e4lx3ltrpmpi7af baga6ea4seaqkt24j5gbf2ye2wual5gn7a5yl2tqb52v2sk4nvur4bdy7lg76cdy /tmp/pr1.json
|
||||
ProveReplicaUpdate2 7.339033459s (279 B/s)
|
||||
p: pvC0JBrEyUqtIIUvB2UUx/2a24c3Cvnu6AZ0D3IMBYAu...
|
||||
`,
|
||||
Subcommands: []*cli.Command{
|
||||
simpleAddPiece,
|
||||
simplePreCommit1,
|
||||
simplePreCommit2,
|
||||
simpleCommit1,
|
||||
simpleCommit2,
|
||||
simpleWindowPost,
|
||||
simpleWinningPost,
|
||||
simpleReplicaUpdate,
|
||||
simpleProveReplicaUpdate1,
|
||||
simpleProveReplicaUpdate2,
|
||||
},
|
||||
}
|
||||
|
||||
type benchSectorProvider map[storiface.SectorFileType]string
|
||||
|
||||
func (b benchSectorProvider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
out := storiface.SectorPaths{
|
||||
ID: id.ID,
|
||||
Unsealed: b[storiface.FTUnsealed],
|
||||
Sealed: b[storiface.FTSealed],
|
||||
Cache: b[storiface.FTCache],
|
||||
Update: b[storiface.FTUpdate],
|
||||
UpdateCache: b[storiface.FTUpdateCache],
|
||||
}
|
||||
return out, func() {}, nil
|
||||
}
|
||||
|
||||
var _ ffiwrapper.SectorProvider = &benchSectorProvider{}
|
||||
|
||||
var simpleAddPiece = &cli.Command{
|
||||
Name: "addpiece",
|
||||
ArgsUsage: "[data] [unsealed]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
pp := benchSectorProvider{
|
||||
storiface.FTUnsealed: cctx.Args().Get(1),
|
||||
}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
data, err := os.Open(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("open data file: %w", err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
pi, err := sealer.AddPiece(ctx, sr, []abi.UnpaddedPieceSize{}, abi.PaddedPieceSize(sectorSize).Unpadded(), data)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("add piece: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
|
||||
fmt.Printf("AddPiece %s (%s)\n", took, bps(abi.SectorSize(pi.Size), 1, took))
|
||||
fmt.Printf("%s %d\n", pi.PieceCID, pi.Size)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simplePreCommit1 = &cli.Command{
|
||||
Name: "precommit1",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[unsealed] [sealed] [cache] [[piece cid] [piece size]]...",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
pp := benchSectorProvider{
|
||||
storiface.FTUnsealed: cctx.Args().Get(0),
|
||||
storiface.FTSealed: cctx.Args().Get(1),
|
||||
storiface.FTCache: cctx.Args().Get(2),
|
||||
}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
var ticket [32]byte // all zero
|
||||
|
||||
pieces, err := ParsePieceInfos(cctx, 3)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
p1o, err := sealer.SealPreCommit1(ctx, sr, ticket[:], pieces)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("precommit1: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
|
||||
fmt.Printf("PreCommit1 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(p1o))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simplePreCommit2 = &cli.Command{
|
||||
Name: "precommit2",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [pc1 out]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
pp := benchSectorProvider{
|
||||
storiface.FTSealed: cctx.Args().Get(0),
|
||||
storiface.FTCache: cctx.Args().Get(1),
|
||||
}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p1o, err := base64.StdEncoding.DecodeString(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
p2o, err := sealer.SealPreCommit2(ctx, sr, p1o)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("precommit2: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
|
||||
fmt.Printf("PreCommit2 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Printf("d:%s r:%s\n", p2o.Unsealed, p2o.Sealed)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simpleCommit1 = &cli.Command{
|
||||
Name: "commit1",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [comm D] [comm R] [c1out.json]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
pp := benchSectorProvider{
|
||||
storiface.FTSealed: cctx.Args().Get(0),
|
||||
storiface.FTCache: cctx.Args().Get(1),
|
||||
}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var ticket, seed [32]byte // all zero
|
||||
|
||||
commd, err := cid.Parse(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
commr, err := cid.Parse(cctx.Args().Get(3))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
c1o, err := sealer.SealCommit1(ctx, sr, ticket[:], seed[:], []abi.PieceInfo{
|
||||
{
|
||||
Size: abi.PaddedPieceSize(sectorSize),
|
||||
PieceCID: commd,
|
||||
},
|
||||
}, storage.SectorCids{
|
||||
Unsealed: commd,
|
||||
Sealed: commr,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("commit1: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
|
||||
fmt.Printf("Commit1 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
|
||||
c2in := Commit2In{
|
||||
SectorNum: int64(1),
|
||||
Phase1Out: c1o,
|
||||
SectorSize: uint64(sectorSize),
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&c2in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(cctx.Args().Get(4), b, 0664); err != nil {
|
||||
log.Warnf("%+v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simpleCommit2 = &cli.Command{
|
||||
Name: "commit2",
|
||||
ArgsUsage: "[c1out.json]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "no-gpu",
|
||||
Usage: "disable gpu usage for the benchmark run",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
if c.Bool("no-gpu") {
|
||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting no-gpu flag: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !c.Args().Present() {
|
||||
return xerrors.Errorf("Usage: lotus-bench prove [input.json]")
|
||||
}
|
||||
|
||||
inb, err := ioutil.ReadFile(c.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading input file: %w", err)
|
||||
}
|
||||
|
||||
var c2in Commit2In
|
||||
if err := json.Unmarshal(inb, &c2in); err != nil {
|
||||
return xerrors.Errorf("unmarshalling input file: %w", err)
|
||||
}
|
||||
|
||||
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), c2in.SectorSize); err != nil {
|
||||
return xerrors.Errorf("getting params: %w", err)
|
||||
}
|
||||
|
||||
maddr, err := address.NewFromString(c.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(c2in.SectorNum),
|
||||
},
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize)),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sealCommit2 := time.Now()
|
||||
dur := sealCommit2.Sub(start)
|
||||
|
||||
fmt.Printf("Commit2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), 1, dur))
|
||||
fmt.Printf("proof: %x\n", proof)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simpleWindowPost = &cli.Command{
|
||||
Name: "window-post",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [comm R] [sector num]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
var rand [32]byte // all zero
|
||||
|
||||
commr, err := cid.Parse(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := spt(sectorSize).RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snum, err := strconv.ParseUint(cctx.Args().Get(3), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing sector num: %w", err)
|
||||
}
|
||||
sn := abi.SectorNumber(snum)
|
||||
|
||||
ch, err := ffi.GeneratePoStFallbackSectorChallenges(wpt, mid, rand[:], []abi.SectorNumber{sn})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("generating challenges: %w", err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
vp, err := ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{
|
||||
SectorInfo: prf.SectorInfo{
|
||||
SealProof: spt(sectorSize),
|
||||
SectorNumber: sn,
|
||||
SealedCID: commr,
|
||||
},
|
||||
CacheDirPath: cctx.Args().Get(1),
|
||||
PoStProofType: wpt,
|
||||
SealedSectorPath: cctx.Args().Get(0),
|
||||
}, ch.Challenges[sn])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
challenge := time.Now()
|
||||
|
||||
proof, err := ffi.GenerateSinglePartitionWindowPoStWithVanilla(wpt, mid, rand[:], [][]byte{vp}, 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("generate post: %w", err)
|
||||
}
|
||||
|
||||
end := time.Now()
|
||||
|
||||
fmt.Printf("Vanilla %s (%s)\n", challenge.Sub(start), bps(sectorSize, 1, challenge.Sub(start)))
|
||||
fmt.Printf("Proof %s (%s)\n", end.Sub(challenge), bps(sectorSize, 1, end.Sub(challenge)))
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(proof.ProofBytes))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simpleWinningPost = &cli.Command{
|
||||
Name: "winning-post",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [comm R] [sector num]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
var rand [32]byte // all zero
|
||||
|
||||
commr, err := cid.Parse(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := spt(sectorSize).RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snum, err := strconv.ParseUint(cctx.Args().Get(3), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing sector num: %w", err)
|
||||
}
|
||||
sn := abi.SectorNumber(snum)
|
||||
|
||||
ch, err := ffi.GeneratePoStFallbackSectorChallenges(wpt, mid, rand[:], []abi.SectorNumber{sn})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("generating challenges: %w", err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
vp, err := ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{
|
||||
SectorInfo: prf.SectorInfo{
|
||||
SealProof: spt(sectorSize),
|
||||
SectorNumber: sn,
|
||||
SealedCID: commr,
|
||||
},
|
||||
CacheDirPath: cctx.Args().Get(1),
|
||||
PoStProofType: wpt,
|
||||
SealedSectorPath: cctx.Args().Get(0),
|
||||
}, ch.Challenges[sn])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
challenge := time.Now()
|
||||
|
||||
proof, err := ffi.GenerateWinningPoStWithVanilla(wpt, mid, rand[:], [][]byte{vp})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("generate post: %w", err)
|
||||
}
|
||||
|
||||
end := time.Now()
|
||||
|
||||
fmt.Printf("Vanilla %s (%s)\n", challenge.Sub(start), bps(sectorSize, 1, challenge.Sub(start)))
|
||||
fmt.Printf("Proof %s (%s)\n", end.Sub(challenge), bps(sectorSize, 1, end.Sub(challenge)))
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(proof[0].ProofBytes))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simpleReplicaUpdate = &cli.Command{
|
||||
Name: "replicaupdate",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [unsealed] [update] [updatecache] [[piece cid] [piece size]]...",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
pp := benchSectorProvider{
|
||||
storiface.FTSealed: cctx.Args().Get(0),
|
||||
storiface.FTCache: cctx.Args().Get(1),
|
||||
storiface.FTUnsealed: cctx.Args().Get(2),
|
||||
storiface.FTUpdate: cctx.Args().Get(3),
|
||||
storiface.FTUpdateCache: cctx.Args().Get(4),
|
||||
}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pieces, err := ParsePieceInfos(cctx, 5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sr := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
ruo, err := sealer.ReplicaUpdate(ctx, sr, pieces)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("replica update: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
|
||||
fmt.Printf("ReplicaUpdate %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Printf("d:%s r:%s\n", ruo.NewUnsealed, ruo.NewSealed)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simpleProveReplicaUpdate1 = &cli.Command{
|
||||
Name: "provereplicaupdate1",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [update] [updatecache] [sectorKey] [newSealed] [newUnsealed] [vproofs.json]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
pp := benchSectorProvider{
|
||||
storiface.FTSealed: cctx.Args().Get(0),
|
||||
storiface.FTCache: cctx.Args().Get(1),
|
||||
storiface.FTUpdate: cctx.Args().Get(2),
|
||||
storiface.FTUpdateCache: cctx.Args().Get(3),
|
||||
}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
oldcommr, err := cid.Parse(cctx.Args().Get(4))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
commr, err := cid.Parse(cctx.Args().Get(5))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
commd, err := cid.Parse(cctx.Args().Get(6))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
rvp, err := sealer.ProveReplicaUpdate1(ctx, sr, oldcommr, commr, commd)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("replica update: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
|
||||
fmt.Printf("ProveReplicaUpdate1 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
|
||||
vpjb, err := json.Marshal(&rvp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("json marshal vanilla proofs: %w", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(cctx.Args().Get(7), vpjb, 0666); err != nil {
|
||||
return xerrors.Errorf("writing vanilla proofs file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var simpleProveReplicaUpdate2 = &cli.Command{
|
||||
Name: "provereplicaupdate2",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sector-size",
|
||||
Value: "512MiB",
|
||||
Usage: "size of the sectors in bytes, i.e. 32GiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "miner-addr",
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sectorKey] [newSealed] [newUnsealed] [vproofs.json]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner-addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
amid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid := abi.ActorID(amid)
|
||||
|
||||
sectorSizeInt, err := units.RAMInBytes(cctx.String("sector-size"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
pp := benchSectorProvider{}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
oldcommr, err := cid.Parse(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
commr, err := cid.Parse(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
commd, err := cid.Parse(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
vpb, err := ioutil.ReadFile(cctx.Args().Get(3))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading valilla proof file: %w", err)
|
||||
}
|
||||
|
||||
var vp storage.ReplicaVanillaProofs
|
||||
if err := json.Unmarshal(vpb, &vp); err != nil {
|
||||
return xerrors.Errorf("unmarshalling vanilla proofs: %w", err)
|
||||
}
|
||||
|
||||
p, err := sealer.ProveReplicaUpdate2(ctx, sr, oldcommr, commr, commd, vp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prove replica update2: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
|
||||
fmt.Printf("ProveReplicaUpdate2 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Println("p:", base64.StdEncoding.EncodeToString(p))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func ParsePieceInfos(cctx *cli.Context, firstArg int) ([]abi.PieceInfo, error) {
|
||||
args := cctx.Args().Len() - firstArg
|
||||
if args%2 != 0 {
|
||||
return nil, xerrors.Errorf("piece info argunemts need to be supplied in pairs")
|
||||
}
|
||||
if args < 2 {
|
||||
return nil, xerrors.Errorf("need at least one piece info argument")
|
||||
}
|
||||
|
||||
out := make([]abi.PieceInfo, args/2)
|
||||
|
||||
for i := 0; i < args/2; i++ {
|
||||
c, err := cid.Parse(cctx.Args().Get(firstArg + (i * 2)))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse piece cid: %w", err)
|
||||
}
|
||||
|
||||
psize, err := strconv.ParseUint(cctx.Args().Get(firstArg+(i*2)+1), 10, 64)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse piece size: %w", err)
|
||||
}
|
||||
|
||||
out[i] = abi.PieceInfo{
|
||||
Size: abi.PaddedPieceSize(psize),
|
||||
PieceCID: c,
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
@ -198,7 +198,7 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
|
||||
gwapi := gateway.NewNode(api, lookbackCap, waitLookback, rateLimit, rateLimitTimeout)
|
||||
h, err := gateway.Handler(gwapi, perConnRateLimit, connPerMinute, serverOptions...)
|
||||
h, err := gateway.Handler(gwapi, api, perConnRateLimit, connPerMinute, serverOptions...)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to set up gateway HTTP handler")
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ var dagstoreCmd = &cli.Command{
|
||||
Usage: "Manage the dagstore on the markets subsystem",
|
||||
Subcommands: []*cli.Command{
|
||||
dagstoreListShardsCmd,
|
||||
dagstoreRegisterShardCmd,
|
||||
dagstoreInitializeShardCmd,
|
||||
dagstoreRecoverShardCmd,
|
||||
dagstoreInitializeAllCmd,
|
||||
@ -59,6 +60,45 @@ var dagstoreListShardsCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreRegisterShardCmd = &cli.Command{
|
||||
Name: "register-shard",
|
||||
ArgsUsage: "[key]",
|
||||
Usage: "Register a shard",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("must provide a single shard key")
|
||||
}
|
||||
|
||||
marketsAPI, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
shardKey := cctx.Args().First()
|
||||
err = marketsAPI.DagstoreRegisterShard(ctx, shardKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Registered shard " + shardKey)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreInitializeShardCmd = &cli.Command{
|
||||
Name: "initialize-shard",
|
||||
ArgsUsage: "[key]",
|
||||
|
54
cmd/lotus-miner/precommits-info.go
Normal file
54
cmd/lotus-miner/precommits-info.go
Normal file
@ -0,0 +1,54 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/util/adt"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var sectorPreCommitsCmd = &cli.Command{
|
||||
Name: "precommits",
|
||||
Usage: "Print on-chain precommit info",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
mapi, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
maddr, err := getActorAddress(ctx, cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mact, err := mapi.StateGetActor(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(mapi)))
|
||||
mst, err := miner.Load(store, mact)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preCommitSector := make([]miner.SectorPreCommitOnChainInfo, 0)
|
||||
err = mst.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error {
|
||||
preCommitSector = append(preCommitSector, info)
|
||||
return err
|
||||
})
|
||||
less := func(i, j int) bool {
|
||||
return preCommitSector[i].Info.SectorNumber <= preCommitSector[j].Info.SectorNumber
|
||||
}
|
||||
sort.Slice(preCommitSector, less)
|
||||
for _, info := range preCommitSector {
|
||||
fmt.Printf("%s: %s\n", info.Info.SectorNumber, info.PreCommitEpoch)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -4,20 +4,27 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/fatih/color"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-padreader"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/lib/httpreader"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
@ -31,6 +38,7 @@ var sealingCmd = &cli.Command{
|
||||
workersCmd(true),
|
||||
sealingSchedDiagCmd,
|
||||
sealingAbortCmd,
|
||||
sealingDataCidCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -97,13 +105,20 @@ func workersCmd(sealing bool) *cli.Command {
|
||||
return st[i].id.String() < st[j].id.String()
|
||||
})
|
||||
|
||||
/*
|
||||
Example output:
|
||||
|
||||
Worker c4d65451-07f8-4230-98ad-4f33dea2a8cc, host myhostname
|
||||
TASK: PC1(1/4) AP(15/15) GET(3)
|
||||
CPU: [|||||||| ] 16/128 core(s) in use
|
||||
RAM: [|||||||| ] 12% 125.8 GiB/1008 GiB
|
||||
VMEM: [|||||||| ] 12% 125.8 GiB/1008 GiB
|
||||
GPU: [ ] 0% 0.00/1 gpu(s) in use
|
||||
GPU: NVIDIA GeForce RTX 3090, not used
|
||||
*/
|
||||
|
||||
for _, stat := range st {
|
||||
gpuUse := "not "
|
||||
gpuCol := color.FgBlue
|
||||
if stat.GpuUsed > 0 {
|
||||
gpuCol = color.FgGreen
|
||||
gpuUse = ""
|
||||
}
|
||||
// Worker uuid + name
|
||||
|
||||
var disabled string
|
||||
if !stat.Enabled {
|
||||
@ -112,9 +127,53 @@ func workersCmd(sealing bool) *cli.Command {
|
||||
|
||||
fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled)
|
||||
|
||||
// Task counts
|
||||
tc := make([][]string, 0, len(stat.TaskCounts))
|
||||
|
||||
for st, c := range stat.TaskCounts {
|
||||
if c == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
stt, err := sealtasks.SttFromString(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
str := fmt.Sprint(c)
|
||||
if max := stat.Info.Resources.ResourceSpec(stt.RegisteredSealProof, stt.TaskType).MaxConcurrent; max > 0 {
|
||||
switch {
|
||||
case c < max:
|
||||
str = color.GreenString(str)
|
||||
case c >= max:
|
||||
str = color.YellowString(str)
|
||||
}
|
||||
str = fmt.Sprintf("%s/%d", str, max)
|
||||
} else {
|
||||
str = color.CyanString(str)
|
||||
}
|
||||
str = fmt.Sprintf("%s(%s)", color.BlueString(stt.Short()), str)
|
||||
|
||||
tc = append(tc, []string{string(stt.TaskType), str})
|
||||
}
|
||||
sort.Slice(tc, func(i, j int) bool {
|
||||
return sealtasks.TaskType(tc[i][0]).Less(sealtasks.TaskType(tc[j][0]))
|
||||
})
|
||||
var taskStr string
|
||||
for _, t := range tc {
|
||||
taskStr = t[1] + " "
|
||||
}
|
||||
if taskStr != "" {
|
||||
fmt.Printf("\tTASK: %s\n", taskStr)
|
||||
}
|
||||
|
||||
// CPU use
|
||||
|
||||
fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n",
|
||||
barString(float64(stat.Info.Resources.CPUs), 0, float64(stat.CpuUse)), stat.CpuUse, stat.Info.Resources.CPUs)
|
||||
|
||||
// RAM use
|
||||
|
||||
ramTotal := stat.Info.Resources.MemPhysical
|
||||
ramTasks := stat.MemUsedMin
|
||||
ramUsed := stat.Info.Resources.MemUsed
|
||||
@ -129,6 +188,8 @@ func workersCmd(sealing bool) *cli.Command {
|
||||
types.SizeStr(types.NewInt(ramTasks+ramUsed)),
|
||||
types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical)))
|
||||
|
||||
// VMEM use (ram+swap)
|
||||
|
||||
vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
|
||||
vmemTasks := stat.MemUsedMax
|
||||
vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed
|
||||
@ -143,12 +204,21 @@ func workersCmd(sealing bool) *cli.Command {
|
||||
types.SizeStr(types.NewInt(vmemTasks+vmemReserved)),
|
||||
types.SizeStr(types.NewInt(vmemTotal)))
|
||||
|
||||
// GPU use
|
||||
|
||||
if len(stat.Info.Resources.GPUs) > 0 {
|
||||
gpuBar := barString(float64(len(stat.Info.Resources.GPUs)), 0, stat.GpuUsed)
|
||||
fmt.Printf("\tGPU: [%s] %.f%% %.2f/%d gpu(s) in use\n", color.GreenString(gpuBar),
|
||||
stat.GpuUsed*100/float64(len(stat.Info.Resources.GPUs)),
|
||||
stat.GpuUsed, len(stat.Info.Resources.GPUs))
|
||||
}
|
||||
|
||||
gpuUse := "not "
|
||||
gpuCol := color.FgBlue
|
||||
if stat.GpuUsed > 0 {
|
||||
gpuCol = color.FgGreen
|
||||
gpuUse = ""
|
||||
}
|
||||
for _, gpu := range stat.Info.Resources.GPUs {
|
||||
fmt.Printf("\tGPU: %s\n", color.New(gpuCol).Sprintf("%s, %sused", gpu, gpuUse))
|
||||
}
|
||||
@ -349,3 +419,94 @@ var sealingAbortCmd = &cli.Command{
|
||||
return nodeApi.SealingAbort(ctx, job.ID)
|
||||
},
|
||||
}
|
||||
|
||||
var sealingDataCidCmd = &cli.Command{
|
||||
Name: "data-cid",
|
||||
Usage: "Compute data CID using workers",
|
||||
ArgsUsage: "[file/url] <padded piece size>",
|
||||
Flags: []cli.Flag{
|
||||
&cli.Uint64Flag{
|
||||
Name: "file-size",
|
||||
Usage: "real file size",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() < 1 || cctx.Args().Len() > 2 {
|
||||
return xerrors.Errorf("expected 1 or 2 arguments")
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
var r io.Reader
|
||||
sz := cctx.Uint64("file-size")
|
||||
|
||||
if strings.HasPrefix(cctx.Args().First(), "http://") || strings.HasPrefix(cctx.Args().First(), "https://") {
|
||||
r = &httpreader.HttpReader{
|
||||
URL: cctx.Args().First(),
|
||||
}
|
||||
|
||||
if !cctx.IsSet("file-size") {
|
||||
resp, err := http.Head(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("http head: %w", err)
|
||||
}
|
||||
|
||||
if resp.ContentLength < 0 {
|
||||
return xerrors.Errorf("head response didn't contain content length; specify --file-size")
|
||||
}
|
||||
sz = uint64(resp.ContentLength)
|
||||
}
|
||||
} else {
|
||||
p, err := homedir.Expand(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding path: %w", err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(p, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening source file: %w", err)
|
||||
}
|
||||
|
||||
if !cctx.IsSet("file-size") {
|
||||
st, err := f.Stat()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat: %w", err)
|
||||
}
|
||||
sz = uint64(st.Size())
|
||||
}
|
||||
|
||||
r = f
|
||||
}
|
||||
|
||||
var psize abi.PaddedPieceSize
|
||||
if cctx.Args().Len() == 2 {
|
||||
rps, err := humanize.ParseBytes(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing piece size: %w", err)
|
||||
}
|
||||
psize = abi.PaddedPieceSize(rps)
|
||||
if err := psize.Validate(); err != nil {
|
||||
return xerrors.Errorf("checking piece size: %w", err)
|
||||
}
|
||||
if sz > uint64(psize.Unpadded()) {
|
||||
return xerrors.Errorf("file larger than the piece")
|
||||
}
|
||||
} else {
|
||||
psize = padreader.PaddedSize(sz).Padded()
|
||||
}
|
||||
|
||||
pc, err := nodeApi.ComputeDataCid(ctx, psize.Unpadded(), r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("computing data CID: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println(pc.PieceCID, " ", pc.Size)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsRefsCmd,
|
||||
sectorsUpdateCmd,
|
||||
sectorsPledgeCmd,
|
||||
sectorPreCommitsCmd,
|
||||
sectorsCheckExpireCmd,
|
||||
sectorsExpiredCmd,
|
||||
sectorsRenewCmd,
|
||||
@ -58,6 +59,7 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsCapacityCollateralCmd,
|
||||
sectorsBatching,
|
||||
sectorsRefreshPieceMatchingCmd,
|
||||
sectorsCompactPartitionsCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -2088,3 +2090,89 @@ func yesno(b bool) string {
|
||||
}
|
||||
return color.RedString("NO")
|
||||
}
|
||||
|
||||
var sectorsCompactPartitionsCmd = &cli.Command{
|
||||
Name: "compact-partitions",
|
||||
Usage: "removes dead sectors from partitions and reduces the number of partitions used if possible",
|
||||
Flags: []cli.Flag{
|
||||
&cli.Uint64Flag{
|
||||
Name: "deadline",
|
||||
Usage: "the deadline to compact the partitions in",
|
||||
Required: true,
|
||||
},
|
||||
&cli.Int64SliceFlag{
|
||||
Name: "partitions",
|
||||
Usage: "list of partitions to compact sectors in",
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "really-do-it",
|
||||
Usage: "Actually send transaction performing the action",
|
||||
Value: false,
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Bool("really-do-it") {
|
||||
fmt.Println("Pass --really-do-it to actually execute this action")
|
||||
return nil
|
||||
}
|
||||
|
||||
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer acloser()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
maddr, err := getActorAddress(ctx, cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deadline := cctx.Uint64("deadline")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parts := cctx.Int64Slice("partitions")
|
||||
if len(parts) <= 0 {
|
||||
return fmt.Errorf("must include at least one partition to compact")
|
||||
}
|
||||
|
||||
partitions := bitfield.BitField{}
|
||||
for _, partition := range parts {
|
||||
partitions.Set(uint64(partition))
|
||||
}
|
||||
|
||||
params := miner5.CompactPartitionsParams{
|
||||
Deadline: deadline,
|
||||
Partitions: partitions,
|
||||
}
|
||||
|
||||
sp, err := actors.SerializeParams(¶ms)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("serializing params: %w", err)
|
||||
}
|
||||
|
||||
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
||||
From: minfo.Worker,
|
||||
To: maddr,
|
||||
Method: miner.Methods.CompactPartitions,
|
||||
Value: big.Zero(),
|
||||
Params: sp,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("mpool push: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Message CID:", smsg.Cid())
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ var ledgerCmd = &cli.Command{
|
||||
ledgerKeyInfoCmd,
|
||||
ledgerSignTestCmd,
|
||||
ledgerShowCmd,
|
||||
ledgerNewAddressesCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -291,3 +292,68 @@ var ledgerShowCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var ledgerNewAddressesCmd = &cli.Command{
|
||||
Name: "new",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("must pass account index")
|
||||
}
|
||||
|
||||
index, err := strconv.ParseUint(cctx.Args().First(), 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
fl, err := ledgerfil.FindLedgerFilecoinApp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fl.Close() // nolint
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := []uint32{hdHard | 44, hdHard | 461, hdHard, 0, uint32(index)}
|
||||
pubk, err := fl.GetPublicKeySECP256K1(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addr, err := address.NewSecp256k1Address(pubk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pd ledgerwallet.LedgerKeyInfo
|
||||
pd.Address = addr
|
||||
pd.Path = p
|
||||
|
||||
b, err := json.Marshal(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ki types.KeyInfo
|
||||
ki.Type = types.KTSecp256k1Ledger
|
||||
ki.PrivateKey = b
|
||||
|
||||
_, err = api.WalletImport(ctx, &ki)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", addr, printHDPath(p))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -23,18 +23,19 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{161}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{161}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Sub ([]cid.Cid) (slice)
|
||||
if len("Sub") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Sub\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sub"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sub"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Sub")); err != nil {
|
||||
@ -45,27 +46,32 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Sub was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sub))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Sub))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Sub {
|
||||
if err := cbg.WriteCidBuf(scratch, w, v); err != nil {
|
||||
if err := cbg.WriteCid(w, v); err != nil {
|
||||
return xerrors.Errorf("failed writing cid field t.Sub: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *CarbNode) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = CarbNode{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -80,7 +86,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -92,7 +98,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Sub ([]cid.Cid) (slice)
|
||||
case "Sub":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -111,7 +117,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading cid field t.Sub failed: %w", err)
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ var verifRegVerifyClientCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus filplus`")
|
||||
froms := cctx.String("from")
|
||||
if froms == "" {
|
||||
return fmt.Errorf("must specify from address with --from")
|
||||
@ -262,7 +262,7 @@ var verifRegListVerifiersCmd = &cli.Command{
|
||||
Usage: "list all verifiers",
|
||||
Hidden: true,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus filplus`")
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -294,7 +294,7 @@ var verifRegListClientsCmd = &cli.Command{
|
||||
Usage: "list all verified clients",
|
||||
Hidden: true,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus filplus`")
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -326,7 +326,7 @@ var verifRegCheckClientCmd = &cli.Command{
|
||||
Usage: "check verified client remaining bytes",
|
||||
Hidden: true,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus filplus`")
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must specify client address to check")
|
||||
}
|
||||
@ -362,7 +362,7 @@ var verifRegCheckVerifierCmd = &cli.Command{
|
||||
Usage: "check verifiers remaining bytes",
|
||||
Hidden: true,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
|
||||
fmt.Println("DEPRECATED: This behavior is being moved to `lotus filplus`")
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must specify verifier address to check")
|
||||
}
|
||||
|
@ -182,12 +182,16 @@ var runCmd = &cli.Command{
|
||||
Usage: "enable window post",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
&cli.BoolFlag{
|
||||
Name: "winningpost",
|
||||
Usage: "enable winning post",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-default",
|
||||
Usage: "disable all default compute tasks, use the worker for storage/fetching only",
|
||||
Value: false,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "parallel-fetch-limit",
|
||||
Usage: "maximum fetch operations to run in parallel",
|
||||
@ -289,27 +293,27 @@ var runCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
if cctx.Bool("commit") || cctx.Bool("prove-replica-update2") {
|
||||
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
|
||||
return xerrors.Errorf("get params: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var taskTypes []sealtasks.TaskType
|
||||
var workerType string
|
||||
var needParams bool
|
||||
|
||||
if cctx.Bool("windowpost") {
|
||||
needParams = true
|
||||
workerType = sealtasks.WorkerWindowPoSt
|
||||
taskTypes = append(taskTypes, sealtasks.TTGenerateWindowPoSt)
|
||||
}
|
||||
if cctx.Bool("winningpost") {
|
||||
needParams = true
|
||||
workerType = sealtasks.WorkerWinningPoSt
|
||||
taskTypes = append(taskTypes, sealtasks.TTGenerateWinningPoSt)
|
||||
}
|
||||
|
||||
if workerType == "" {
|
||||
workerType = sealtasks.WorkerSealing
|
||||
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFinalizeReplicaUpdate)
|
||||
|
||||
if !cctx.Bool("no-default") {
|
||||
workerType = sealtasks.WorkerSealing
|
||||
}
|
||||
}
|
||||
|
||||
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("addpiece")) && cctx.Bool("addpiece") {
|
||||
@ -325,18 +329,24 @@ var runCmd = &cli.Command{
|
||||
taskTypes = append(taskTypes, sealtasks.TTPreCommit2)
|
||||
}
|
||||
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("commit")) && cctx.Bool("commit") {
|
||||
needParams = true
|
||||
taskTypes = append(taskTypes, sealtasks.TTCommit2)
|
||||
}
|
||||
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("replica-update")) && cctx.Bool("replica-update") {
|
||||
taskTypes = append(taskTypes, sealtasks.TTReplicaUpdate)
|
||||
}
|
||||
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("prove-replica-update2")) && cctx.Bool("prove-replica-update2") {
|
||||
needParams = true
|
||||
taskTypes = append(taskTypes, sealtasks.TTProveReplicaUpdate2)
|
||||
}
|
||||
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("regen-sector-key")) && cctx.Bool("regen-sector-key") {
|
||||
taskTypes = append(taskTypes, sealtasks.TTRegenSectorKey)
|
||||
}
|
||||
|
||||
if cctx.Bool("no-default") && workerType == "" {
|
||||
workerType = sealtasks.WorkerSealing
|
||||
}
|
||||
|
||||
if len(taskTypes) == 0 {
|
||||
return xerrors.Errorf("no task types specified")
|
||||
}
|
||||
@ -346,6 +356,12 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
if needParams {
|
||||
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
|
||||
return xerrors.Errorf("get params: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open repo
|
||||
|
||||
repoPath := cctx.String(FlagWorkerRepo)
|
||||
|
@ -34,6 +34,7 @@ func WorkerHandler(authv func(ctx context.Context, token string) ([]auth.Permiss
|
||||
}
|
||||
|
||||
rpcServer.Register("Filecoin", wapi)
|
||||
rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover")
|
||||
|
||||
mux.Handle("/rpc/v0", rpcServer)
|
||||
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
|
||||
|
@ -28,18 +28,19 @@ func (t *State) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufState); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufState); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Value (string) (string)
|
||||
if len(t.Value) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field t.Value was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Value))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Value)); err != nil {
|
||||
@ -51,27 +52,32 @@ func (t *State) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Unmarshallable was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Unmarshallable))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Unmarshallable))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Unmarshallable {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *State) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *State) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = State{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -83,7 +89,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Value (string) (string)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -92,7 +98,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.Unmarshallable ([]*chaos.UnmarshallableCBOR) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -112,7 +118,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v UnmarshallableCBOR
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -129,19 +135,20 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufCallerValidationArgs); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufCallerValidationArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Branch (chaos.CallerValidationBranch) (int64)
|
||||
if t.Branch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Branch)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Branch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -151,11 +158,11 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Addrs was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Addrs))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Addrs))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Addrs {
|
||||
if err := v.MarshalCBOR(w); err != nil {
|
||||
if err := v.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -165,27 +172,32 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Slice value in field t.Types was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Types))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Types))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Types {
|
||||
if err := cbg.WriteCidBuf(scratch, w, v); err != nil {
|
||||
if err := cbg.WriteCid(w, v); err != nil {
|
||||
return xerrors.Errorf("failed writing cid field t.Types: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = CallerValidationArgs{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -196,7 +208,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Branch (chaos.CallerValidationBranch) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -221,7 +233,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.Addrs ([]address.Address) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -241,7 +253,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v address.Address
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
if err := v.UnmarshalCBOR(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -250,7 +262,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Types ([]cid.Cid) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -269,7 +281,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading cid field t.Types failed: %w", err)
|
||||
}
|
||||
@ -286,12 +298,13 @@ func (t *CreateActorArgs) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufCreateActorArgs); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufCreateActorArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.UndefActorCID (bool) (bool)
|
||||
if err := cbg.WriteBool(w, t.UndefActorCID); err != nil {
|
||||
return err
|
||||
@ -299,7 +312,7 @@ func (t *CreateActorArgs) MarshalCBOR(w io.Writer) error {
|
||||
|
||||
// t.ActorCID (cid.Cid) (struct)
|
||||
|
||||
if err := cbg.WriteCidBuf(scratch, w, t.ActorCID); err != nil {
|
||||
if err := cbg.WriteCid(cw, t.ActorCID); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.ActorCID: %w", err)
|
||||
}
|
||||
|
||||
@ -309,22 +322,27 @@ func (t *CreateActorArgs) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
// t.Address (address.Address) (struct)
|
||||
if err := t.Address.MarshalCBOR(w); err != nil {
|
||||
if err := t.Address.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = CreateActorArgs{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -335,7 +353,7 @@ func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.UndefActorCID (bool) (bool)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -354,7 +372,7 @@ func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.ActorCID: %w", err)
|
||||
}
|
||||
@ -364,7 +382,7 @@ func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.UndefAddress (bool) (bool)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -383,7 +401,7 @@ func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Address.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Address.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Address: %w", err)
|
||||
}
|
||||
|
||||
@ -398,12 +416,15 @@ func (t *ResolveAddressResponse) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufResolveAddressResponse); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufResolveAddressResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Address (address.Address) (struct)
|
||||
if err := t.Address.MarshalCBOR(w); err != nil {
|
||||
if err := t.Address.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -414,16 +435,21 @@ func (t *ResolveAddressResponse) MarshalCBOR(w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *ResolveAddressResponse) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *ResolveAddressResponse) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = ResolveAddressResponse{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -436,14 +462,14 @@ func (t *ResolveAddressResponse) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Address.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Address.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Address: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.Success (bool) (bool)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -468,25 +494,26 @@ func (t *SendArgs) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufSendArgs); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufSendArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.To (address.Address) (struct)
|
||||
if err := t.To.MarshalCBOR(w); err != nil {
|
||||
if err := t.To.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Value (big.Int) (struct)
|
||||
if err := t.Value.MarshalCBOR(w); err != nil {
|
||||
if err := t.Value.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Method (abi.MethodNum) (uint64)
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Method)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -495,26 +522,31 @@ func (t *SendArgs) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Byte array in field t.Params was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Params))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Params))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(t.Params[:]); err != nil {
|
||||
if _, err := cw.Write(t.Params[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SendArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *SendArgs) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = SendArgs{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -527,7 +559,7 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.To.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.To.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.To: %w", err)
|
||||
}
|
||||
|
||||
@ -536,7 +568,7 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Value.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Value.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Value: %w", err)
|
||||
}
|
||||
|
||||
@ -545,7 +577,7 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -557,7 +589,7 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.Params ([]uint8) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -573,7 +605,7 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
t.Params = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(br, t.Params[:]); err != nil {
|
||||
if _, err := io.ReadFull(cr, t.Params[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -586,48 +618,54 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufSendReturn); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufSendReturn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Return (builtin.CBORBytes) (slice)
|
||||
if len(t.Return) > cbg.ByteArrayMaxLen {
|
||||
return xerrors.Errorf("Byte array in field t.Return was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Return))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Return))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(t.Return[:]); err != nil {
|
||||
if _, err := cw.Write(t.Return[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Code (exitcode.ExitCode) (int64)
|
||||
if t.Code >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Code)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SendReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *SendReturn) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = SendReturn{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -638,7 +676,7 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Return (builtin.CBORBytes) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -654,12 +692,12 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
t.Return = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(br, t.Return[:]); err != nil {
|
||||
if _, err := io.ReadFull(cr, t.Return[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Code (exitcode.ExitCode) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -692,18 +730,19 @@ func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufMutateStateArgs); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufMutateStateArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Value (string) (string)
|
||||
if len(t.Value) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field t.Value was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Value))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Value)); err != nil {
|
||||
@ -712,27 +751,32 @@ func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error {
|
||||
|
||||
// t.Branch (chaos.MutateStateBranch) (int64)
|
||||
if t.Branch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Branch)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Branch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = MutateStateArgs{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -744,7 +788,7 @@ func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Value (string) (string)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -753,7 +797,7 @@ func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.Branch (chaos.MutateStateBranch) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -786,19 +830,20 @@ func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufAbortWithArgs); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufAbortWithArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Code (exitcode.ExitCode) (int64)
|
||||
if t.Code >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Code)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -808,7 +853,7 @@ func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.Message was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Message)); err != nil {
|
||||
@ -822,16 +867,21 @@ func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = AbortWithArgs{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -842,7 +892,7 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.Code (exitcode.ExitCode) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -868,7 +918,7 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.Message (string) (string)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -877,7 +927,7 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.Uncontrolled (bool) (bool)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -902,60 +952,66 @@ func (t *InspectRuntimeReturn) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufInspectRuntimeReturn); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufInspectRuntimeReturn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Caller (address.Address) (struct)
|
||||
if err := t.Caller.MarshalCBOR(w); err != nil {
|
||||
if err := t.Caller.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Receiver (address.Address) (struct)
|
||||
if err := t.Receiver.MarshalCBOR(w); err != nil {
|
||||
if err := t.Receiver.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.ValueReceived (big.Int) (struct)
|
||||
if err := t.ValueReceived.MarshalCBOR(w); err != nil {
|
||||
if err := t.ValueReceived.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.CurrEpoch (abi.ChainEpoch) (int64)
|
||||
if t.CurrEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrEpoch)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CurrEpoch-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.CurrEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.CurrentBalance (big.Int) (struct)
|
||||
if err := t.CurrentBalance.MarshalCBOR(w); err != nil {
|
||||
if err := t.CurrentBalance.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.State (chaos.State) (struct)
|
||||
if err := t.State.MarshalCBOR(w); err != nil {
|
||||
if err := t.State.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = InspectRuntimeReturn{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -968,7 +1024,7 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Caller.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Caller.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Caller: %w", err)
|
||||
}
|
||||
|
||||
@ -977,7 +1033,7 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Receiver.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Receiver.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Receiver: %w", err)
|
||||
}
|
||||
|
||||
@ -986,14 +1042,14 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.ValueReceived.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.ValueReceived.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.ValueReceived: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.CurrEpoch (abi.ChainEpoch) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1020,7 +1076,7 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.CurrentBalance.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.CurrentBalance.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.CurrentBalance: %w", err)
|
||||
}
|
||||
|
||||
@ -1029,7 +1085,7 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.State.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.State.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.State: %w", err)
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
@ -28,6 +29,7 @@ import (
|
||||
"github.com/filecoin-project/test-vectors/schema"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
)
|
||||
@ -51,6 +53,52 @@ var TipsetVectorOpts struct {
|
||||
OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult)
|
||||
}
|
||||
|
||||
type GasPricingRestoreFn func()
|
||||
|
||||
// adjustGasPricing adjusts the global gas price mapping to make sure that the
|
||||
// gas pricelist for vector's network version is used at the vector's epoch.
|
||||
// Because it manipulates a global, it returns a function that reverts the
|
||||
// change. The caller MUST invoke this function or the test vector runner will
|
||||
// become invalid.
|
||||
func adjustGasPricing(vectorEpoch abi.ChainEpoch, vectorNv network.Version) GasPricingRestoreFn {
|
||||
// Stash the current pricing mapping.
|
||||
// Ok to take a reference instead of a copy, because we override the map
|
||||
// with a new one below.
|
||||
var old = vm.Prices
|
||||
|
||||
// Resolve the epoch at which the vector network version kicks in.
|
||||
var epoch abi.ChainEpoch = math.MaxInt64
|
||||
if vectorNv == network.Version0 {
|
||||
// genesis is not an upgrade.
|
||||
epoch = 0
|
||||
} else {
|
||||
for _, u := range filcns.DefaultUpgradeSchedule() {
|
||||
if u.Network == vectorNv {
|
||||
epoch = u.Height
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epoch == math.MaxInt64 {
|
||||
panic(fmt.Sprintf("could not resolve network version %d to height", vectorNv))
|
||||
}
|
||||
|
||||
// Find the right pricelist for this network version.
|
||||
pricelist := vm.PricelistByEpoch(epoch)
|
||||
|
||||
// Override the pricing mapping by setting the relevant pricelist for the
|
||||
// network version at the epoch where the vector runs.
|
||||
vm.Prices = map[abi.ChainEpoch]vm.Pricelist{
|
||||
vectorEpoch: pricelist,
|
||||
}
|
||||
|
||||
// Return a function to restore the original mapping.
|
||||
return func() {
|
||||
vm.Prices = old
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteMessageVector executes a message-class test vector.
|
||||
func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) {
|
||||
var (
|
||||
@ -69,6 +117,10 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema
|
||||
// Create a new Driver.
|
||||
driver := NewDriver(ctx, vector.Selector, DriverOpts{DisableVMFlush: true})
|
||||
|
||||
// Monkey patch the gas pricing.
|
||||
revertFn := adjustGasPricing(baseEpoch, nv)
|
||||
defer revertFn()
|
||||
|
||||
// Apply every message.
|
||||
for i, m := range vector.ApplyMessages {
|
||||
msg, err := types.DecodeMessage(m.Bytes)
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -7,7 +7,7 @@ USAGE:
|
||||
lotus-miner [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
1.15.3-dev
|
||||
1.15.4-dev
|
||||
|
||||
COMMANDS:
|
||||
init Initialize a lotus miner repo
|
||||
@ -1035,6 +1035,7 @@ USAGE:
|
||||
|
||||
COMMANDS:
|
||||
list-shards List all shards known to the dagstore, with their current status
|
||||
register-shard Register a shard
|
||||
initialize-shard Initialize the specified shard
|
||||
recover-shard Attempt to recover a shard in errored state
|
||||
initialize-all Initialize all uninitialized shards, streaming results as they're produced; only shards for unsealed pieces are initialized by default
|
||||
@ -1061,6 +1062,20 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner dagstore register-shard
|
||||
```
|
||||
NAME:
|
||||
lotus-miner dagstore register-shard - Register a shard
|
||||
|
||||
USAGE:
|
||||
lotus-miner dagstore register-shard [command options] [key]
|
||||
|
||||
OPTIONS:
|
||||
--color use color in display output (default: depends on output being a TTY)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner dagstore initialize-shard
|
||||
```
|
||||
NAME:
|
||||
@ -1652,6 +1667,7 @@ COMMANDS:
|
||||
refs List References to sectors
|
||||
update-state ADVANCED: manually update the state of a sector, this may aid in error recovery
|
||||
pledge store random data in a sector
|
||||
precommits Print on-chain precommit info
|
||||
check-expire Inspect expiring sectors
|
||||
expired Get or cleanup expired sectors
|
||||
renew Renew expiring sectors while not exceeding each sector's max life
|
||||
@ -1665,6 +1681,7 @@ COMMANDS:
|
||||
get-cc-collateral Get the collateral required to pledge a committed capacity sector
|
||||
batching manage batch sector operations
|
||||
match-pending-pieces force a refreshed match of pending pieces to open sectors without manually waiting for more deals
|
||||
compact-partitions removes dead sectors from partitions and reduces the number of partitions used if possible
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
OPTIONS:
|
||||
@ -1750,6 +1767,19 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner sectors precommits
|
||||
```
|
||||
NAME:
|
||||
lotus-miner sectors precommits - Print on-chain precommit info
|
||||
|
||||
USAGE:
|
||||
lotus-miner sectors precommits [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner sectors check-expire
|
||||
```
|
||||
NAME:
|
||||
@ -2006,6 +2036,22 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner sectors compact-partitions
|
||||
```
|
||||
NAME:
|
||||
lotus-miner sectors compact-partitions - removes dead sectors from partitions and reduces the number of partitions used if possible
|
||||
|
||||
USAGE:
|
||||
lotus-miner sectors compact-partitions [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--deadline value the deadline to compact the partitions in (default: 0)
|
||||
--partitions value list of partitions to compact sectors in
|
||||
--really-do-it Actually send transaction performing the action (default: false)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
## lotus-miner proving
|
||||
```
|
||||
NAME:
|
||||
@ -2285,6 +2331,7 @@ COMMANDS:
|
||||
workers list workers
|
||||
sched-diag Dump internal scheduler state
|
||||
abort Abort a running job
|
||||
data-cid Compute data CID using workers
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
OPTIONS:
|
||||
@ -2347,3 +2394,17 @@ OPTIONS:
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner sealing data-cid
|
||||
```
|
||||
NAME:
|
||||
lotus-miner sealing data-cid - Compute data CID using workers
|
||||
|
||||
USAGE:
|
||||
lotus-miner sealing data-cid [command options] [file/url] <padded piece size>
|
||||
|
||||
OPTIONS:
|
||||
--file-size value real file size (default: 0)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
@ -7,7 +7,7 @@ USAGE:
|
||||
lotus-worker [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
1.15.3-dev
|
||||
1.15.4-dev
|
||||
|
||||
COMMANDS:
|
||||
run Start lotus worker
|
||||
@ -49,6 +49,7 @@ OPTIONS:
|
||||
--regen-sector-key enable regen sector key (default: true)
|
||||
--windowpost enable window post (default: false)
|
||||
--winningpost enable winning post (default: false)
|
||||
--no-default disable all default compute tasks, use the worker for storage/fetching only (default: false)
|
||||
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5)
|
||||
--post-parallel-reads value maximum number of parallel challenge reads (0 = no limit) (default: 128)
|
||||
--post-read-timeout value time limit for reading PoSt challenges (0 = no limit) (default: 0s)
|
||||
|
@ -7,7 +7,7 @@ USAGE:
|
||||
lotus [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
1.15.3-dev
|
||||
1.15.4-dev
|
||||
|
||||
COMMANDS:
|
||||
daemon Start a lotus daemon process
|
||||
|
@ -444,12 +444,14 @@
|
||||
# env var: LOTUS_SEALING_AGGREGATECOMMITS
|
||||
#AggregateCommits = true
|
||||
|
||||
# maximum batched commit size - batches will be sent immediately above this size
|
||||
# minimum batched commit size - batches above this size will eventually be sent on a timeout
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_SEALING_MINCOMMITBATCH
|
||||
#MinCommitBatch = 4
|
||||
|
||||
# maximum batched commit size - batches will be sent immediately above this size
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_SEALING_MAXCOMMITBATCH
|
||||
#MaxCommitBatch = 819
|
||||
@ -532,6 +534,30 @@
|
||||
# env var: LOTUS_STORAGE_ALLOWREGENSECTORKEY
|
||||
#AllowRegenSectorKey = true
|
||||
|
||||
# Assigner specifies the worker assigner to use when scheduling tasks.
|
||||
# "utilization" (default) - assign tasks to workers with lowest utilization.
|
||||
# "spread" - assign tasks to as many distinct workers as possible.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_STORAGE_ASSIGNER
|
||||
#Assigner = "utilization"
|
||||
|
||||
# DisallowRemoteFinalize when set to true will force all Finalize tasks to
|
||||
# run on workers with local access to both long-term storage and the sealing
|
||||
# path containing the sector.
|
||||
# --
|
||||
# WARNING: Only set this if all workers have access to long-term storage
|
||||
# paths. If this flag is enabled, and there are workers without long-term
|
||||
# storage access, sectors will not be moved from them, and Finalize tasks
|
||||
# will appear to be stuck.
|
||||
# --
|
||||
# If you see stuck Finalize tasks after enabling this setting, check
|
||||
# 'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]'
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_STORAGE_DISALLOWREMOTEFINALIZE
|
||||
#DisallowRemoteFinalize = false
|
||||
|
||||
# ResourceFiltering instructs the system which resource filtering strategy
|
||||
# to use when evaluating tasks against this worker. An empty value defaults
|
||||
# to "hardware".
|
||||
|
144
extern/sector-storage/cbor_gen.go
vendored
144
extern/sector-storage/cbor_gen.go
vendored
@ -24,25 +24,26 @@ func (t *Call) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{164}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{164}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.ID (storiface.CallID) (struct)
|
||||
if len("ID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"ID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("ID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.ID.MarshalCBOR(w); err != nil {
|
||||
if err := t.ID.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -51,7 +52,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"RetType\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("RetType"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RetType"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("RetType")); err != nil {
|
||||
@ -62,7 +63,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.RetType was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.RetType))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.RetType))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.RetType)); err != nil {
|
||||
@ -74,14 +75,14 @@ func (t *Call) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"State\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("State")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -90,29 +91,34 @@ func (t *Call) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Result\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Result"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Result"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Result")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Result.MarshalCBOR(w); err != nil {
|
||||
if err := t.Result.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Call) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *Call) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = Call{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -127,7 +133,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -141,7 +147,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.ID.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.ID.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.ID: %w", err)
|
||||
}
|
||||
|
||||
@ -150,7 +156,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) error {
|
||||
case "RetType":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -162,7 +168,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -177,16 +183,16 @@ func (t *Call) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.Result = new(ManyBytes)
|
||||
if err := t.Result.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Result.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Result pointer: %w", err)
|
||||
}
|
||||
}
|
||||
@ -206,25 +212,26 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{166}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{166}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.ID (sectorstorage.WorkID) (struct)
|
||||
if len("ID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"ID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("ID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.ID.MarshalCBOR(w); err != nil {
|
||||
if err := t.ID.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -233,7 +240,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Status\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Status")); err != nil {
|
||||
@ -244,7 +251,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.Status was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Status))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Status))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Status)); err != nil {
|
||||
@ -256,14 +263,14 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"WorkerCall\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkerCall"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WorkerCall"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("WorkerCall")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.WorkerCall.MarshalCBOR(w); err != nil {
|
||||
if err := t.WorkerCall.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -272,7 +279,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"WorkError\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkError"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WorkError"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("WorkError")); err != nil {
|
||||
@ -283,7 +290,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.WorkError was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.WorkError))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.WorkError))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.WorkError)); err != nil {
|
||||
@ -295,7 +302,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"WorkerHostname\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkerHostname"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WorkerHostname"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("WorkerHostname")); err != nil {
|
||||
@ -306,7 +313,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.WorkerHostname was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.WorkerHostname))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.WorkerHostname))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.WorkerHostname)); err != nil {
|
||||
@ -318,7 +325,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"StartTime\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartTime"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartTime"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("StartTime")); err != nil {
|
||||
@ -326,27 +333,32 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
if t.StartTime >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartTime)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartTime)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartTime-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartTime-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = WorkState{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -361,7 +373,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -375,7 +387,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.ID.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.ID.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.ID: %w", err)
|
||||
}
|
||||
|
||||
@ -384,7 +396,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
case "Status":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -396,7 +408,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.WorkerCall.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.WorkerCall.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.WorkerCall: %w", err)
|
||||
}
|
||||
|
||||
@ -405,7 +417,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
case "WorkError":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -416,7 +428,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
case "WorkerHostname":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -426,7 +438,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.StartTime (int64) (int64)
|
||||
case "StartTime":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -463,18 +475,19 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Method (sealtasks.TaskType) (string)
|
||||
if len("Method") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Method\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Method"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Method"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Method")); err != nil {
|
||||
@ -485,7 +498,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.Method was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Method))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Method))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Method)); err != nil {
|
||||
@ -497,7 +510,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"Params\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Params"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Params"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Params")); err != nil {
|
||||
@ -508,7 +521,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field t.Params was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Params))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Params))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Params)); err != nil {
|
||||
@ -517,16 +530,21 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *WorkID) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *WorkID) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = WorkID{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -541,7 +559,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -554,7 +572,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) error {
|
||||
case "Method":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -565,7 +583,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) error {
|
||||
case "Params":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
96
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
96
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
@ -31,6 +31,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
nr "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
|
||||
"github.com/filecoin-project/lotus/lib/nullreader"
|
||||
)
|
||||
|
||||
var _ Storage = &Sealer{}
|
||||
@ -52,6 +54,11 @@ func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error
|
||||
}
|
||||
|
||||
func (sb *Sealer) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||
pieceData = io.LimitReader(io.MultiReader(
|
||||
pieceData,
|
||||
nullreader.Reader{},
|
||||
), int64(pieceSize))
|
||||
|
||||
// TODO: allow tuning those:
|
||||
chunk := abi.PaddedPieceSize(4 << 20)
|
||||
parallel := runtime.NumCPU()
|
||||
@ -72,6 +79,7 @@ func (sb *Sealer) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize,
|
||||
for {
|
||||
var read int
|
||||
for rbuf := buf; len(rbuf) > 0; {
|
||||
|
||||
n, err := pieceData.Read(rbuf)
|
||||
if err != nil && err != io.EOF {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("pr read error: %w", err)
|
||||
@ -369,8 +377,8 @@ func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, err
|
||||
return pieceCID, werr()
|
||||
}
|
||||
|
||||
func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storage.SectorRef, commD cid.Cid, unsealedPath string) (bool, error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
|
||||
func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storage.SectorRef, commD cid.Cid, unsealedPath string, randomness abi.SealRandomness) (bool, error) {
|
||||
replicaPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage)
|
||||
if xerrors.Is(err, storiface.ErrSectorNotFound) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
@ -378,12 +386,47 @@ func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storage.Se
|
||||
}
|
||||
defer done()
|
||||
|
||||
sealedPaths, done2, err := sb.AcquireSectorKeyOrRegenerate(ctx, sector, randomness)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("acquiring sealed sector: %w", err)
|
||||
}
|
||||
defer done2()
|
||||
|
||||
// Sector data stored in replica update
|
||||
updateProof, err := sector.ProofType.RegisteredUpdateProof()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, paths.Update, paths.Sealed, paths.Cache, commD)
|
||||
return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, replicaPath.Update, sealedPaths.Sealed, sealedPaths.Cache, commD)
|
||||
}
|
||||
|
||||
func (sb *Sealer) AcquireSectorKeyOrRegenerate(ctx context.Context, sector storage.SectorRef, randomness abi.SealRandomness) (storiface.SectorPaths, func(), error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
|
||||
if err == nil {
|
||||
return paths, done, err
|
||||
} else if !xerrors.Is(err, storiface.ErrSectorNotFound) {
|
||||
return paths, done, xerrors.Errorf("reading sector key: %w", err)
|
||||
}
|
||||
|
||||
// Sector key can't be found, so let's regenerate it
|
||||
sectorSize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return paths, done, xerrors.Errorf("retrieving sector size: %w", err)
|
||||
}
|
||||
paddedSize := abi.PaddedPieceSize(sectorSize)
|
||||
|
||||
_, err = sb.AddPiece(ctx, sector, nil, paddedSize.Unpadded(), nr.NewNullReader(paddedSize.Unpadded()))
|
||||
if err != nil {
|
||||
return paths, done, xerrors.Errorf("recomputing empty data: %w", err)
|
||||
}
|
||||
|
||||
err = sb.RegenerateSectorKey(ctx, sector, randomness, []abi.PieceInfo{{PieceCID: zerocomm.ZeroPieceCommitment(paddedSize.Unpadded()), Size: paddedSize}})
|
||||
if err != nil {
|
||||
return paths, done, xerrors.Errorf("during pc1: %w", err)
|
||||
}
|
||||
|
||||
// Sector key should exist now, let's grab the paths
|
||||
return sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
|
||||
}
|
||||
|
||||
func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
@ -437,7 +480,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off
|
||||
}
|
||||
|
||||
// If piece data stored in updated replica decode whole sector
|
||||
decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed)
|
||||
decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed, randomness)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding sector from replica: %w", err)
|
||||
}
|
||||
@ -618,6 +661,51 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storag
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) RegenerateSectorKey(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) error {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTCache, storiface.FTSealed, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquiring sector paths: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec
|
||||
if err != nil {
|
||||
return xerrors.Errorf("ensuring sealed file exists: %w", err)
|
||||
}
|
||||
if err := e.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var sum abi.UnpaddedPieceSize
|
||||
for _, piece := range pieces {
|
||||
sum += piece.Size.Unpadded()
|
||||
}
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ussize := abi.PaddedPieceSize(ssize).Unpadded()
|
||||
if sum != ussize {
|
||||
return xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
|
||||
}
|
||||
|
||||
// TODO: context cancellation respect
|
||||
_, err = ffi.SealPreCommitPhase1(
|
||||
sector.ProofType,
|
||||
paths.Cache,
|
||||
paths.Unsealed,
|
||||
paths.Sealed,
|
||||
sector.ID.Number,
|
||||
sector.ID.Miner,
|
||||
ticket,
|
||||
pieces,
|
||||
)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||
if err != nil {
|
||||
|
49
extern/sector-storage/manager.go
vendored
49
extern/sector-storage/manager.go
vendored
@ -62,7 +62,7 @@ type Manager struct {
|
||||
remoteHnd *stores.FetchHandler
|
||||
index stores.SectorIndex
|
||||
|
||||
sched *scheduler
|
||||
sched *Scheduler
|
||||
windowPoStSched *poStScheduler
|
||||
winningPoStSched *poStScheduler
|
||||
|
||||
@ -71,7 +71,8 @@ type Manager struct {
|
||||
workLk sync.Mutex
|
||||
work *statestore.StateStore
|
||||
|
||||
parallelCheckLimit int
|
||||
parallelCheckLimit int
|
||||
disallowRemoteFinalize bool
|
||||
|
||||
callToWork map[storiface.CallID]WorkID
|
||||
// used when we get an early return and there's no callToWork mapping
|
||||
@ -122,6 +123,10 @@ type Config struct {
|
||||
|
||||
// PoSt config
|
||||
ParallelCheckLimit int
|
||||
|
||||
DisallowRemoteFinalize bool
|
||||
|
||||
Assigner string
|
||||
}
|
||||
|
||||
type StorageAuth http.Header
|
||||
@ -135,6 +140,11 @@ func New(ctx context.Context, lstor *stores.Local, stor stores.Store, ls stores.
|
||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||
}
|
||||
|
||||
sh, err := newScheduler(sc.Assigner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
ls: ls,
|
||||
storage: stor,
|
||||
@ -142,13 +152,14 @@ func New(ctx context.Context, lstor *stores.Local, stor stores.Store, ls stores.
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor, PfHandler: &stores.DefaultPartialFileHandler{}},
|
||||
index: si,
|
||||
|
||||
sched: newScheduler(),
|
||||
sched: sh,
|
||||
windowPoStSched: newPoStScheduler(sealtasks.TTGenerateWindowPoSt),
|
||||
winningPoStSched: newPoStScheduler(sealtasks.TTGenerateWinningPoSt),
|
||||
|
||||
localProver: prover,
|
||||
|
||||
parallelCheckLimit: sc.ParallelCheckLimit,
|
||||
parallelCheckLimit: sc.ParallelCheckLimit,
|
||||
disallowRemoteFinalize: sc.DisallowRemoteFinalize,
|
||||
|
||||
work: mss,
|
||||
callToWork: map[storiface.CallID]WorkID{},
|
||||
@ -273,7 +284,7 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storage.SectorR
|
||||
defer cancel()
|
||||
|
||||
log.Debugf("acquire unseal sector lock for sector %d", sector.ID)
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTUnsealed); err != nil {
|
||||
return xerrors.Errorf("acquiring unseal sector lock: %w", err)
|
||||
}
|
||||
|
||||
@ -281,8 +292,11 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storage.SectorR
|
||||
// put it in the sealing scratch space.
|
||||
sealFetch := func(ctx context.Context, worker Worker) error {
|
||||
log.Debugf("copy sealed/cache sector data for sector %d", sector.ID)
|
||||
if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)); err != nil {
|
||||
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
||||
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy))
|
||||
_, err2 := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy))
|
||||
|
||||
if err != nil && err2 != nil {
|
||||
return xerrors.Errorf("cannot unseal piece. error fetching sealed data: %w. error fetching replica data: %w", err, err2)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -582,6 +596,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef,
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
// first check if the unsealed file exists anywhere; If it doesn't ignore it
|
||||
unsealed := storiface.FTUnsealed
|
||||
{
|
||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false)
|
||||
@ -594,6 +609,8 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef,
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that the sealed file is still in sealing storage; In case it already
|
||||
// isn't, we want to do finalize in long-term storage
|
||||
pathType := storiface.PathStorage
|
||||
{
|
||||
sealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTSealed, 0, false)
|
||||
@ -609,6 +626,8 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef,
|
||||
}
|
||||
}
|
||||
|
||||
// do the cache trimming wherever the likely still very large cache lives.
|
||||
// we really don't want to move it.
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache, false)
|
||||
|
||||
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||
@ -621,7 +640,10 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef,
|
||||
return err
|
||||
}
|
||||
|
||||
fetchSel := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed, storiface.PathStorage)
|
||||
// get a selector for moving stuff into long-term storage
|
||||
fetchSel := newMoveSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, storiface.PathStorage, !m.disallowRemoteFinalize)
|
||||
|
||||
// only move the unsealed file if it still exists and needs moving
|
||||
moveUnsealed := unsealed
|
||||
{
|
||||
if len(keepUnsealed) == 0 {
|
||||
@ -629,6 +651,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef,
|
||||
}
|
||||
}
|
||||
|
||||
// move stuff to long-term storage
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel,
|
||||
m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|moveUnsealed, storiface.PathStorage, storiface.AcquireMove),
|
||||
func(ctx context.Context, w Worker) error {
|
||||
@ -650,6 +673,7 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
// first check if the unsealed file exists anywhere; If it doesn't ignore it
|
||||
moveUnsealed := storiface.FTUnsealed
|
||||
{
|
||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false)
|
||||
@ -662,6 +686,8 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that the update file is still in sealing storage; In case it already
|
||||
// isn't, we want to do finalize in long-term storage
|
||||
pathType := storiface.PathStorage
|
||||
{
|
||||
sealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUpdate, 0, false)
|
||||
@ -677,7 +703,9 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect
|
||||
}
|
||||
}
|
||||
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTUpdateCache, false)
|
||||
// do the cache trimming wherever the likely still large cache lives.
|
||||
// we really don't want to move it.
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdateCache, false)
|
||||
|
||||
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalizeReplicaUpdate, selector,
|
||||
m.schedFetch(sector, storiface.FTCache|storiface.FTUpdateCache|moveUnsealed, pathType, storiface.AcquireMove),
|
||||
@ -690,7 +718,8 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect
|
||||
}
|
||||
|
||||
move := func(types storiface.SectorFileType) error {
|
||||
fetchSel := newAllocSelector(m.index, types, storiface.PathStorage)
|
||||
// get a selector for moving stuff into long-term storage
|
||||
fetchSel := newMoveSelector(m.index, sector.ID, types, storiface.PathStorage, !m.disallowRemoteFinalize)
|
||||
{
|
||||
if len(keepUnsealed) == 0 {
|
||||
moveUnsealed = storiface.FTNone
|
||||
|
11
extern/sector-storage/manager_test.go
vendored
11
extern/sector-storage/manager_test.go
vendored
@ -109,6 +109,9 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man
|
||||
|
||||
stor := stores.NewRemote(lstor, si, nil, 6000, &stores.DefaultPartialFileHandler{})
|
||||
|
||||
sh, err := newScheduler("")
|
||||
require.NoError(t, err)
|
||||
|
||||
m := &Manager{
|
||||
ls: st,
|
||||
storage: stor,
|
||||
@ -116,7 +119,7 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||
index: si,
|
||||
|
||||
sched: newScheduler(),
|
||||
sched: sh,
|
||||
windowPoStSched: newPoStScheduler(sealtasks.TTGenerateWindowPoSt),
|
||||
winningPoStSched: newPoStScheduler(sealtasks.TTGenerateWinningPoSt),
|
||||
|
||||
@ -315,6 +318,12 @@ func TestSnapDeals(t *testing.T) {
|
||||
require.NoError(t, m.GenerateSectorKeyFromData(ctx, sid, out.NewUnsealed))
|
||||
fmt.Printf("GSK duration (%s): %s\n", ss.ShortString(), time.Since(startGSK))
|
||||
|
||||
fmt.Printf("Remove data\n")
|
||||
require.NoError(t, m.FinalizeSector(ctx, sid, nil))
|
||||
fmt.Printf("Release Sector Key\n")
|
||||
require.NoError(t, m.ReleaseSectorKey(ctx, sid))
|
||||
fmt.Printf("Unseal Replica\n")
|
||||
require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed))
|
||||
}
|
||||
|
||||
func TestRedoPC1(t *testing.T) {
|
||||
|
24
extern/sector-storage/request_queue.go
vendored
24
extern/sector-storage/request_queue.go
vendored
@ -2,34 +2,34 @@ package sectorstorage
|
||||
|
||||
import "sort"
|
||||
|
||||
type requestQueue []*workerRequest
|
||||
type RequestQueue []*WorkerRequest
|
||||
|
||||
func (q requestQueue) Len() int { return len(q) }
|
||||
func (q RequestQueue) Len() int { return len(q) }
|
||||
|
||||
func (q requestQueue) Less(i, j int) bool {
|
||||
oneMuchLess, muchLess := q[i].taskType.MuchLess(q[j].taskType)
|
||||
func (q RequestQueue) Less(i, j int) bool {
|
||||
oneMuchLess, muchLess := q[i].TaskType.MuchLess(q[j].TaskType)
|
||||
if oneMuchLess {
|
||||
return muchLess
|
||||
}
|
||||
|
||||
if q[i].priority != q[j].priority {
|
||||
return q[i].priority > q[j].priority
|
||||
if q[i].Priority != q[j].Priority {
|
||||
return q[i].Priority > q[j].Priority
|
||||
}
|
||||
|
||||
if q[i].taskType != q[j].taskType {
|
||||
return q[i].taskType.Less(q[j].taskType)
|
||||
if q[i].TaskType != q[j].TaskType {
|
||||
return q[i].TaskType.Less(q[j].TaskType)
|
||||
}
|
||||
|
||||
return q[i].sector.ID.Number < q[j].sector.ID.Number // optimize minerActor.NewSectors bitfield
|
||||
return q[i].Sector.ID.Number < q[j].Sector.ID.Number // optimize minerActor.NewSectors bitfield
|
||||
}
|
||||
|
||||
func (q requestQueue) Swap(i, j int) {
|
||||
func (q RequestQueue) Swap(i, j int) {
|
||||
q[i], q[j] = q[j], q[i]
|
||||
q[i].index = i
|
||||
q[j].index = j
|
||||
}
|
||||
|
||||
func (q *requestQueue) Push(x *workerRequest) {
|
||||
func (q *RequestQueue) Push(x *WorkerRequest) {
|
||||
n := len(*q)
|
||||
item := x
|
||||
item.index = n
|
||||
@ -37,7 +37,7 @@ func (q *requestQueue) Push(x *workerRequest) {
|
||||
sort.Sort(q)
|
||||
}
|
||||
|
||||
func (q *requestQueue) Remove(i int) *workerRequest {
|
||||
func (q *RequestQueue) Remove(i int) *WorkerRequest {
|
||||
old := *q
|
||||
n := len(old)
|
||||
item := old[i]
|
||||
|
30
extern/sector-storage/request_queue_test.go
vendored
30
extern/sector-storage/request_queue_test.go
vendored
@ -8,13 +8,13 @@ import (
|
||||
)
|
||||
|
||||
func TestRequestQueue(t *testing.T) {
|
||||
rq := &requestQueue{}
|
||||
rq := &RequestQueue{}
|
||||
|
||||
rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece})
|
||||
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1})
|
||||
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit2})
|
||||
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1})
|
||||
rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece})
|
||||
rq.Push(&WorkerRequest{TaskType: sealtasks.TTAddPiece})
|
||||
rq.Push(&WorkerRequest{TaskType: sealtasks.TTPreCommit1})
|
||||
rq.Push(&WorkerRequest{TaskType: sealtasks.TTPreCommit2})
|
||||
rq.Push(&WorkerRequest{TaskType: sealtasks.TTPreCommit1})
|
||||
rq.Push(&WorkerRequest{TaskType: sealtasks.TTAddPiece})
|
||||
|
||||
dump := func(s string) {
|
||||
fmt.Println("---")
|
||||
@ -22,7 +22,7 @@ func TestRequestQueue(t *testing.T) {
|
||||
|
||||
for sqi := 0; sqi < rq.Len(); sqi++ {
|
||||
task := (*rq)[sqi]
|
||||
fmt.Println(sqi, task.taskType)
|
||||
fmt.Println(sqi, task.TaskType)
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,31 +32,31 @@ func TestRequestQueue(t *testing.T) {
|
||||
|
||||
dump("pop 1")
|
||||
|
||||
if pt.taskType != sealtasks.TTPreCommit2 {
|
||||
t.Error("expected precommit2, got", pt.taskType)
|
||||
if pt.TaskType != sealtasks.TTPreCommit2 {
|
||||
t.Error("expected precommit2, got", pt.TaskType)
|
||||
}
|
||||
|
||||
pt = rq.Remove(0)
|
||||
|
||||
dump("pop 2")
|
||||
|
||||
if pt.taskType != sealtasks.TTPreCommit1 {
|
||||
t.Error("expected precommit1, got", pt.taskType)
|
||||
if pt.TaskType != sealtasks.TTPreCommit1 {
|
||||
t.Error("expected precommit1, got", pt.TaskType)
|
||||
}
|
||||
|
||||
pt = rq.Remove(1)
|
||||
|
||||
dump("pop 3")
|
||||
|
||||
if pt.taskType != sealtasks.TTAddPiece {
|
||||
t.Error("expected addpiece, got", pt.taskType)
|
||||
if pt.TaskType != sealtasks.TTAddPiece {
|
||||
t.Error("expected addpiece, got", pt.TaskType)
|
||||
}
|
||||
|
||||
pt = rq.Remove(0)
|
||||
|
||||
dump("pop 4")
|
||||
|
||||
if pt.taskType != sealtasks.TTPreCommit1 {
|
||||
t.Error("expected precommit1, got", pt.taskType)
|
||||
if pt.TaskType != sealtasks.TTPreCommit1 {
|
||||
t.Error("expected precommit1, got", pt.TaskType)
|
||||
}
|
||||
}
|
||||
|
415
extern/sector-storage/sched.go
vendored
415
extern/sector-storage/sched.go
vendored
@ -2,9 +2,6 @@ package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -47,23 +44,28 @@ const mib = 1 << 20
|
||||
type WorkerAction func(ctx context.Context, w Worker) error
|
||||
|
||||
type WorkerSelector interface {
|
||||
Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, a *workerHandle) (bool, error) // true if worker is acceptable for performing a task
|
||||
// Ok is true if worker is acceptable for performing a task.
|
||||
// If any worker is preferred for a task, other workers won't be considered for that task.
|
||||
Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, a *WorkerHandle) (ok, preferred bool, err error)
|
||||
|
||||
Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) // true if a is preferred over b
|
||||
Cmp(ctx context.Context, task sealtasks.TaskType, a, b *WorkerHandle) (bool, error) // true if a is preferred over b
|
||||
}
|
||||
|
||||
type scheduler struct {
|
||||
workersLk sync.RWMutex
|
||||
workers map[storiface.WorkerID]*workerHandle
|
||||
type Scheduler struct {
|
||||
assigner Assigner
|
||||
|
||||
schedule chan *workerRequest
|
||||
windowRequests chan *schedWindowRequest
|
||||
workersLk sync.RWMutex
|
||||
|
||||
Workers map[storiface.WorkerID]*WorkerHandle
|
||||
|
||||
schedule chan *WorkerRequest
|
||||
windowRequests chan *SchedWindowRequest
|
||||
workerChange chan struct{} // worker added / changed/freed resources
|
||||
workerDisable chan workerDisableReq
|
||||
|
||||
// owned by the sh.runSched goroutine
|
||||
schedQueue *requestQueue
|
||||
openWindows []*schedWindowRequest
|
||||
SchedQueue *RequestQueue
|
||||
OpenWindows []*SchedWindowRequest
|
||||
|
||||
workTracker *workTracker
|
||||
|
||||
@ -74,24 +76,24 @@ type scheduler struct {
|
||||
testSync chan struct{} // used for testing
|
||||
}
|
||||
|
||||
type workerHandle struct {
|
||||
type WorkerHandle struct {
|
||||
workerRpc Worker
|
||||
|
||||
tasksCache map[sealtasks.TaskType]struct{}
|
||||
tasksUpdate time.Time
|
||||
tasksLk sync.Mutex
|
||||
|
||||
info storiface.WorkerInfo
|
||||
Info storiface.WorkerInfo
|
||||
|
||||
preparing *activeResources // use with workerHandle.lk
|
||||
active *activeResources // use with workerHandle.lk
|
||||
preparing *ActiveResources // use with WorkerHandle.lk
|
||||
active *ActiveResources // use with WorkerHandle.lk
|
||||
|
||||
lk sync.Mutex // can be taken inside sched.workersLk.RLock
|
||||
|
||||
wndLk sync.Mutex // can be taken inside sched.workersLk.RLock
|
||||
activeWindows []*schedWindow
|
||||
activeWindows []*SchedWindow
|
||||
|
||||
enabled bool
|
||||
Enabled bool
|
||||
|
||||
// for sync manager goroutine closing
|
||||
cleanupStarted bool
|
||||
@ -99,38 +101,28 @@ type workerHandle struct {
|
||||
closingMgr chan struct{}
|
||||
}
|
||||
|
||||
type schedWindowRequest struct {
|
||||
worker storiface.WorkerID
|
||||
type SchedWindowRequest struct {
|
||||
Worker storiface.WorkerID
|
||||
|
||||
done chan *schedWindow
|
||||
Done chan *SchedWindow
|
||||
}
|
||||
|
||||
type schedWindow struct {
|
||||
allocated activeResources
|
||||
todo []*workerRequest
|
||||
type SchedWindow struct {
|
||||
Allocated ActiveResources
|
||||
Todo []*WorkerRequest
|
||||
}
|
||||
|
||||
type workerDisableReq struct {
|
||||
activeWindows []*schedWindow
|
||||
activeWindows []*SchedWindow
|
||||
wid storiface.WorkerID
|
||||
done func()
|
||||
}
|
||||
|
||||
type activeResources struct {
|
||||
memUsedMin uint64
|
||||
memUsedMax uint64
|
||||
gpuUsed float64
|
||||
cpuUse uint64
|
||||
|
||||
cond *sync.Cond
|
||||
waiting int
|
||||
}
|
||||
|
||||
type workerRequest struct {
|
||||
sector storage.SectorRef
|
||||
taskType sealtasks.TaskType
|
||||
priority int // larger values more important
|
||||
sel WorkerSelector
|
||||
type WorkerRequest struct {
|
||||
Sector storage.SectorRef
|
||||
TaskType sealtasks.TaskType
|
||||
Priority int // larger values more important
|
||||
Sel WorkerSelector
|
||||
|
||||
prepare WorkerAction
|
||||
work WorkerAction
|
||||
@ -139,25 +131,37 @@ type workerRequest struct {
|
||||
|
||||
index int // The index of the item in the heap.
|
||||
|
||||
indexHeap int
|
||||
IndexHeap int
|
||||
ret chan<- workerResponse
|
||||
ctx context.Context
|
||||
Ctx context.Context
|
||||
}
|
||||
|
||||
type workerResponse struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func newScheduler() *scheduler {
|
||||
return &scheduler{
|
||||
workers: map[storiface.WorkerID]*workerHandle{},
|
||||
func newScheduler(assigner string) (*Scheduler, error) {
|
||||
var a Assigner
|
||||
switch assigner {
|
||||
case "", "utilization":
|
||||
a = NewLowestUtilizationAssigner()
|
||||
case "spread":
|
||||
a = NewSpreadAssigner()
|
||||
default:
|
||||
return nil, xerrors.Errorf("unknown assigner '%s'", assigner)
|
||||
}
|
||||
|
||||
schedule: make(chan *workerRequest),
|
||||
windowRequests: make(chan *schedWindowRequest, 20),
|
||||
return &Scheduler{
|
||||
assigner: a,
|
||||
|
||||
Workers: map[storiface.WorkerID]*WorkerHandle{},
|
||||
|
||||
schedule: make(chan *WorkerRequest),
|
||||
windowRequests: make(chan *SchedWindowRequest, 20),
|
||||
workerChange: make(chan struct{}, 20),
|
||||
workerDisable: make(chan workerDisableReq),
|
||||
|
||||
schedQueue: &requestQueue{},
|
||||
SchedQueue: &RequestQueue{},
|
||||
|
||||
workTracker: &workTracker{
|
||||
done: map[storiface.CallID]struct{}{},
|
||||
@ -169,18 +173,18 @@ func newScheduler() *scheduler {
|
||||
|
||||
closing: make(chan struct{}),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sh *scheduler) Schedule(ctx context.Context, sector storage.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error {
|
||||
func (sh *Scheduler) Schedule(ctx context.Context, sector storage.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error {
|
||||
ret := make(chan workerResponse)
|
||||
|
||||
select {
|
||||
case sh.schedule <- &workerRequest{
|
||||
sector: sector,
|
||||
taskType: taskType,
|
||||
priority: getPriority(ctx),
|
||||
sel: sel,
|
||||
case sh.schedule <- &WorkerRequest{
|
||||
Sector: sector,
|
||||
TaskType: taskType,
|
||||
Priority: getPriority(ctx),
|
||||
Sel: sel,
|
||||
|
||||
prepare: prepare,
|
||||
work: work,
|
||||
@ -188,7 +192,7 @@ func (sh *scheduler) Schedule(ctx context.Context, sector storage.SectorRef, tas
|
||||
start: time.Now(),
|
||||
|
||||
ret: ret,
|
||||
ctx: ctx,
|
||||
Ctx: ctx,
|
||||
}:
|
||||
case <-sh.closing:
|
||||
return xerrors.New("closing")
|
||||
@ -206,14 +210,21 @@ func (sh *scheduler) Schedule(ctx context.Context, sector storage.SectorRef, tas
|
||||
}
|
||||
}
|
||||
|
||||
func (r *workerRequest) respond(err error) {
|
||||
func (r *WorkerRequest) respond(err error) {
|
||||
select {
|
||||
case r.ret <- workerResponse{err: err}:
|
||||
case <-r.ctx.Done():
|
||||
case <-r.Ctx.Done():
|
||||
log.Warnf("request got cancelled before we could respond")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *WorkerRequest) SealTask() sealtasks.SealTaskType {
|
||||
return sealtasks.SealTaskType{
|
||||
TaskType: r.TaskType,
|
||||
RegisteredSealProof: r.Sector.ProofType,
|
||||
}
|
||||
}
|
||||
|
||||
type SchedDiagRequestInfo struct {
|
||||
Sector abi.SectorID
|
||||
TaskType sealtasks.TaskType
|
||||
@ -225,7 +236,7 @@ type SchedDiagInfo struct {
|
||||
OpenWindows []string
|
||||
}
|
||||
|
||||
func (sh *scheduler) runSched() {
|
||||
func (sh *Scheduler) runSched() {
|
||||
defer close(sh.closed)
|
||||
|
||||
iw := time.After(InitWait)
|
||||
@ -242,14 +253,14 @@ func (sh *scheduler) runSched() {
|
||||
toDisable = append(toDisable, dreq)
|
||||
doSched = true
|
||||
case req := <-sh.schedule:
|
||||
sh.schedQueue.Push(req)
|
||||
sh.SchedQueue.Push(req)
|
||||
doSched = true
|
||||
|
||||
if sh.testSync != nil {
|
||||
sh.testSync <- struct{}{}
|
||||
}
|
||||
case req := <-sh.windowRequests:
|
||||
sh.openWindows = append(sh.openWindows, req)
|
||||
sh.OpenWindows = append(sh.OpenWindows, req)
|
||||
doSched = true
|
||||
case ireq := <-sh.info:
|
||||
ireq(sh.diag())
|
||||
@ -273,12 +284,12 @@ func (sh *scheduler) runSched() {
|
||||
case dreq := <-sh.workerDisable:
|
||||
toDisable = append(toDisable, dreq)
|
||||
case req := <-sh.schedule:
|
||||
sh.schedQueue.Push(req)
|
||||
sh.SchedQueue.Push(req)
|
||||
if sh.testSync != nil {
|
||||
sh.testSync <- struct{}{}
|
||||
}
|
||||
case req := <-sh.windowRequests:
|
||||
sh.openWindows = append(sh.openWindows, req)
|
||||
sh.OpenWindows = append(sh.OpenWindows, req)
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
@ -286,21 +297,21 @@ func (sh *scheduler) runSched() {
|
||||
|
||||
for _, req := range toDisable {
|
||||
for _, window := range req.activeWindows {
|
||||
for _, request := range window.todo {
|
||||
sh.schedQueue.Push(request)
|
||||
for _, request := range window.Todo {
|
||||
sh.SchedQueue.Push(request)
|
||||
}
|
||||
}
|
||||
|
||||
openWindows := make([]*schedWindowRequest, 0, len(sh.openWindows))
|
||||
for _, window := range sh.openWindows {
|
||||
if window.worker != req.wid {
|
||||
openWindows := make([]*SchedWindowRequest, 0, len(sh.OpenWindows))
|
||||
for _, window := range sh.OpenWindows {
|
||||
if window.Worker != req.wid {
|
||||
openWindows = append(openWindows, window)
|
||||
}
|
||||
}
|
||||
sh.openWindows = openWindows
|
||||
sh.OpenWindows = openWindows
|
||||
|
||||
sh.workersLk.Lock()
|
||||
sh.workers[req.wid].enabled = false
|
||||
sh.Workers[req.wid].Enabled = false
|
||||
sh.workersLk.Unlock()
|
||||
|
||||
req.done()
|
||||
@ -312,281 +323,51 @@ func (sh *scheduler) runSched() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sh *scheduler) diag() SchedDiagInfo {
|
||||
func (sh *Scheduler) diag() SchedDiagInfo {
|
||||
var out SchedDiagInfo
|
||||
|
||||
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
for sqi := 0; sqi < sh.SchedQueue.Len(); sqi++ {
|
||||
task := (*sh.SchedQueue)[sqi]
|
||||
|
||||
out.Requests = append(out.Requests, SchedDiagRequestInfo{
|
||||
Sector: task.sector.ID,
|
||||
TaskType: task.taskType,
|
||||
Priority: task.priority,
|
||||
Sector: task.Sector.ID,
|
||||
TaskType: task.TaskType,
|
||||
Priority: task.Priority,
|
||||
})
|
||||
}
|
||||
|
||||
sh.workersLk.RLock()
|
||||
defer sh.workersLk.RUnlock()
|
||||
|
||||
for _, window := range sh.openWindows {
|
||||
out.OpenWindows = append(out.OpenWindows, uuid.UUID(window.worker).String())
|
||||
for _, window := range sh.OpenWindows {
|
||||
out.OpenWindows = append(out.OpenWindows, uuid.UUID(window.Worker).String())
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (sh *scheduler) trySched() {
|
||||
/*
|
||||
This assigns tasks to workers based on:
|
||||
- Task priority (achieved by handling sh.schedQueue in order, since it's already sorted by priority)
|
||||
- Worker resource availability
|
||||
- Task-specified worker preference (acceptableWindows array below sorted by this preference)
|
||||
- Window request age
|
||||
|
||||
1. For each task in the schedQueue find windows which can handle them
|
||||
1.1. Create list of windows capable of handling a task
|
||||
1.2. Sort windows according to task selector preferences
|
||||
2. Going through schedQueue again, assign task to first acceptable window
|
||||
with resources available
|
||||
3. Submit windows with scheduled tasks to workers
|
||||
|
||||
*/
|
||||
type Assigner interface {
|
||||
TrySched(sh *Scheduler)
|
||||
}
|
||||
|
||||
func (sh *Scheduler) trySched() {
|
||||
sh.workersLk.RLock()
|
||||
defer sh.workersLk.RUnlock()
|
||||
|
||||
windowsLen := len(sh.openWindows)
|
||||
queueLen := sh.schedQueue.Len()
|
||||
|
||||
log.Debugf("SCHED %d queued; %d open windows", queueLen, windowsLen)
|
||||
|
||||
if windowsLen == 0 || queueLen == 0 {
|
||||
// nothing to schedule on
|
||||
return
|
||||
}
|
||||
|
||||
windows := make([]schedWindow, windowsLen)
|
||||
acceptableWindows := make([][]int, queueLen) // QueueIndex -> []OpenWindowIndex
|
||||
|
||||
// Step 1
|
||||
throttle := make(chan struct{}, windowsLen)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(queueLen)
|
||||
for i := 0; i < queueLen; i++ {
|
||||
throttle <- struct{}{}
|
||||
|
||||
go func(sqi int) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
<-throttle
|
||||
}()
|
||||
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
|
||||
task.indexHeap = sqi
|
||||
for wnd, windowRequest := range sh.openWindows {
|
||||
worker, ok := sh.workers[windowRequest.worker]
|
||||
if !ok {
|
||||
log.Errorf("worker referenced by windowRequest not found (worker: %s)", windowRequest.worker)
|
||||
// TODO: How to move forward here?
|
||||
continue
|
||||
}
|
||||
|
||||
if !worker.enabled {
|
||||
log.Debugw("skipping disabled worker", "worker", windowRequest.worker)
|
||||
continue
|
||||
}
|
||||
|
||||
needRes := worker.info.Resources.ResourceSpec(task.sector.ProofType, task.taskType)
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) {
|
||||
continue
|
||||
}
|
||||
|
||||
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
|
||||
ok, err := task.sel.Ok(rpcCtx, task.taskType, task.sector.ProofType, worker)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Errorf("trySched(1) req.sel.Ok error: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
acceptableWindows[sqi] = append(acceptableWindows[sqi], wnd)
|
||||
}
|
||||
|
||||
if len(acceptableWindows[sqi]) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Pick best worker (shuffle in case some workers are equally as good)
|
||||
rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) {
|
||||
acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i] // nolint:scopelint
|
||||
})
|
||||
sort.SliceStable(acceptableWindows[sqi], func(i, j int) bool {
|
||||
wii := sh.openWindows[acceptableWindows[sqi][i]].worker // nolint:scopelint
|
||||
wji := sh.openWindows[acceptableWindows[sqi][j]].worker // nolint:scopelint
|
||||
|
||||
if wii == wji {
|
||||
// for the same worker prefer older windows
|
||||
return acceptableWindows[sqi][i] < acceptableWindows[sqi][j] // nolint:scopelint
|
||||
}
|
||||
|
||||
wi := sh.workers[wii]
|
||||
wj := sh.workers[wji]
|
||||
|
||||
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
|
||||
defer cancel()
|
||||
|
||||
r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj)
|
||||
if err != nil {
|
||||
log.Errorf("selecting best worker: %s", err)
|
||||
}
|
||||
return r
|
||||
})
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log.Debugf("SCHED windows: %+v", windows)
|
||||
log.Debugf("SCHED Acceptable win: %+v", acceptableWindows)
|
||||
|
||||
// Step 2
|
||||
scheduled := 0
|
||||
rmQueue := make([]int, 0, queueLen)
|
||||
workerUtil := map[storiface.WorkerID]float64{}
|
||||
|
||||
for sqi := 0; sqi < queueLen; sqi++ {
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
|
||||
selectedWindow := -1
|
||||
var needRes storiface.Resources
|
||||
var info storiface.WorkerInfo
|
||||
var bestWid storiface.WorkerID
|
||||
bestUtilization := math.MaxFloat64 // smaller = better
|
||||
|
||||
for i, wnd := range acceptableWindows[task.indexHeap] {
|
||||
wid := sh.openWindows[wnd].worker
|
||||
w := sh.workers[wid]
|
||||
|
||||
res := info.Resources.ResourceSpec(task.sector.ProofType, task.taskType)
|
||||
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.sector.ID.Number, wnd, i)
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) {
|
||||
continue
|
||||
}
|
||||
|
||||
wu, found := workerUtil[wid]
|
||||
if !found {
|
||||
wu = w.utilization()
|
||||
workerUtil[wid] = wu
|
||||
}
|
||||
if wu >= bestUtilization {
|
||||
// acceptable worker list is initially sorted by utilization, and the initially-best workers
|
||||
// will be assigned tasks first. This means that if we find a worker which isn't better, it
|
||||
// probably means that the other workers aren't better either.
|
||||
//
|
||||
// utilization
|
||||
// ^
|
||||
// | /
|
||||
// | \ /
|
||||
// | \ /
|
||||
// | *
|
||||
// #--------> acceptableWindow index
|
||||
//
|
||||
// * -> we're here
|
||||
break
|
||||
}
|
||||
|
||||
info = w.info
|
||||
needRes = res
|
||||
bestWid = wid
|
||||
selectedWindow = wnd
|
||||
bestUtilization = wu
|
||||
}
|
||||
|
||||
if selectedWindow < 0 {
|
||||
// all windows full
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugw("SCHED ASSIGNED",
|
||||
"sqi", sqi,
|
||||
"sector", task.sector.ID.Number,
|
||||
"task", task.taskType,
|
||||
"window", selectedWindow,
|
||||
"worker", bestWid,
|
||||
"utilization", bestUtilization)
|
||||
|
||||
workerUtil[bestWid] += windows[selectedWindow].allocated.add(info.Resources, needRes)
|
||||
windows[selectedWindow].todo = append(windows[selectedWindow].todo, task)
|
||||
|
||||
rmQueue = append(rmQueue, sqi)
|
||||
scheduled++
|
||||
}
|
||||
|
||||
if len(rmQueue) > 0 {
|
||||
for i := len(rmQueue) - 1; i >= 0; i-- {
|
||||
sh.schedQueue.Remove(rmQueue[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3
|
||||
|
||||
if scheduled == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
scheduledWindows := map[int]struct{}{}
|
||||
for wnd, window := range windows {
|
||||
if len(window.todo) == 0 {
|
||||
// Nothing scheduled here, keep the window open
|
||||
continue
|
||||
}
|
||||
|
||||
scheduledWindows[wnd] = struct{}{}
|
||||
|
||||
window := window // copy
|
||||
select {
|
||||
case sh.openWindows[wnd].done <- &window:
|
||||
default:
|
||||
log.Error("expected sh.openWindows[wnd].done to be buffered")
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite sh.openWindows array, removing scheduled windows
|
||||
newOpenWindows := make([]*schedWindowRequest, 0, windowsLen-len(scheduledWindows))
|
||||
for wnd, window := range sh.openWindows {
|
||||
if _, scheduled := scheduledWindows[wnd]; scheduled {
|
||||
// keep unscheduled windows open
|
||||
continue
|
||||
}
|
||||
|
||||
newOpenWindows = append(newOpenWindows, window)
|
||||
}
|
||||
|
||||
sh.openWindows = newOpenWindows
|
||||
sh.assigner.TrySched(sh)
|
||||
}
|
||||
|
||||
func (sh *scheduler) schedClose() {
|
||||
func (sh *Scheduler) schedClose() {
|
||||
sh.workersLk.Lock()
|
||||
defer sh.workersLk.Unlock()
|
||||
log.Debugf("closing scheduler")
|
||||
|
||||
for i, w := range sh.workers {
|
||||
for i, w := range sh.Workers {
|
||||
sh.workerCleanup(i, w)
|
||||
}
|
||||
}
|
||||
|
||||
func (sh *scheduler) Info(ctx context.Context) (interface{}, error) {
|
||||
func (sh *Scheduler) Info(ctx context.Context) (interface{}, error) {
|
||||
ch := make(chan interface{}, 1)
|
||||
|
||||
sh.info <- func(res interface{}) {
|
||||
@ -601,7 +382,7 @@ func (sh *scheduler) Info(ctx context.Context) (interface{}, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sh *scheduler) Close(ctx context.Context) error {
|
||||
func (sh *Scheduler) Close(ctx context.Context) error {
|
||||
close(sh.closing)
|
||||
select {
|
||||
case <-sh.closed:
|
||||
|
192
extern/sector-storage/sched_assigner_common.go
vendored
Normal file
192
extern/sector-storage/sched_assigner_common.go
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WindowSelector func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int
|
||||
|
||||
// AssignerCommon is a task assigner with customizable parts
|
||||
type AssignerCommon struct {
|
||||
WindowSel WindowSelector
|
||||
}
|
||||
|
||||
var _ Assigner = &AssignerCommon{}
|
||||
|
||||
func (a *AssignerCommon) TrySched(sh *Scheduler) {
|
||||
/*
|
||||
This assigns tasks to workers based on:
|
||||
- Task priority (achieved by handling sh.SchedQueue in order, since it's already sorted by priority)
|
||||
- Worker resource availability
|
||||
- Task-specified worker preference (acceptableWindows array below sorted by this preference)
|
||||
- Window request age
|
||||
|
||||
1. For each task in the SchedQueue find windows which can handle them
|
||||
1.1. Create list of windows capable of handling a task
|
||||
1.2. Sort windows according to task selector preferences
|
||||
2. Going through SchedQueue again, assign task to first acceptable window
|
||||
with resources available
|
||||
3. Submit windows with scheduled tasks to workers
|
||||
|
||||
*/
|
||||
|
||||
windowsLen := len(sh.OpenWindows)
|
||||
queueLen := sh.SchedQueue.Len()
|
||||
|
||||
log.Debugf("SCHED %d queued; %d open windows", queueLen, windowsLen)
|
||||
|
||||
if windowsLen == 0 || queueLen == 0 {
|
||||
// nothing to schedule on
|
||||
return
|
||||
}
|
||||
|
||||
windows := make([]SchedWindow, windowsLen)
|
||||
for i := range windows {
|
||||
windows[i].Allocated = *NewActiveResources()
|
||||
}
|
||||
acceptableWindows := make([][]int, queueLen) // QueueIndex -> []OpenWindowIndex
|
||||
|
||||
// Step 1
|
||||
throttle := make(chan struct{}, windowsLen)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(queueLen)
|
||||
for i := 0; i < queueLen; i++ {
|
||||
throttle <- struct{}{}
|
||||
|
||||
go func(sqi int) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
<-throttle
|
||||
}()
|
||||
|
||||
task := (*sh.SchedQueue)[sqi]
|
||||
task.IndexHeap = sqi
|
||||
|
||||
var havePreferred bool
|
||||
|
||||
for wnd, windowRequest := range sh.OpenWindows {
|
||||
worker, ok := sh.Workers[windowRequest.Worker]
|
||||
if !ok {
|
||||
log.Errorf("worker referenced by windowRequest not found (worker: %s)", windowRequest.Worker)
|
||||
// TODO: How to move forward here?
|
||||
continue
|
||||
}
|
||||
|
||||
if !worker.Enabled {
|
||||
log.Debugw("skipping disabled worker", "worker", windowRequest.Worker)
|
||||
continue
|
||||
}
|
||||
|
||||
needRes := worker.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), needRes, windowRequest.Worker, "schedAcceptable", worker.Info) {
|
||||
continue
|
||||
}
|
||||
|
||||
rpcCtx, cancel := context.WithTimeout(task.Ctx, SelectorTimeout)
|
||||
ok, preferred, err := task.Sel.Ok(rpcCtx, task.TaskType, task.Sector.ProofType, worker)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Errorf("trySched(1) req.Sel.Ok error: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if havePreferred && !preferred {
|
||||
// we have a way better worker for this task
|
||||
continue
|
||||
}
|
||||
|
||||
if preferred && !havePreferred {
|
||||
// all workers we considered previously are much worse choice
|
||||
acceptableWindows[sqi] = acceptableWindows[sqi][:0]
|
||||
havePreferred = true
|
||||
}
|
||||
|
||||
acceptableWindows[sqi] = append(acceptableWindows[sqi], wnd)
|
||||
}
|
||||
|
||||
if len(acceptableWindows[sqi]) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Pick best worker (shuffle in case some workers are equally as good)
|
||||
rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) {
|
||||
acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i] // nolint:scopelint
|
||||
})
|
||||
sort.SliceStable(acceptableWindows[sqi], func(i, j int) bool {
|
||||
wii := sh.OpenWindows[acceptableWindows[sqi][i]].Worker // nolint:scopelint
|
||||
wji := sh.OpenWindows[acceptableWindows[sqi][j]].Worker // nolint:scopelint
|
||||
|
||||
if wii == wji {
|
||||
// for the same worker prefer older windows
|
||||
return acceptableWindows[sqi][i] < acceptableWindows[sqi][j] // nolint:scopelint
|
||||
}
|
||||
|
||||
wi := sh.Workers[wii]
|
||||
wj := sh.Workers[wji]
|
||||
|
||||
rpcCtx, cancel := context.WithTimeout(task.Ctx, SelectorTimeout)
|
||||
defer cancel()
|
||||
|
||||
r, err := task.Sel.Cmp(rpcCtx, task.TaskType, wi, wj)
|
||||
if err != nil {
|
||||
log.Errorf("selecting best worker: %s", err)
|
||||
}
|
||||
return r
|
||||
})
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log.Debugf("SCHED windows: %+v", windows)
|
||||
log.Debugf("SCHED Acceptable win: %+v", acceptableWindows)
|
||||
|
||||
// Step 2
|
||||
scheduled := a.WindowSel(sh, queueLen, acceptableWindows, windows)
|
||||
|
||||
// Step 3
|
||||
|
||||
if scheduled == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
scheduledWindows := map[int]struct{}{}
|
||||
for wnd, window := range windows {
|
||||
if len(window.Todo) == 0 {
|
||||
// Nothing scheduled here, keep the window open
|
||||
continue
|
||||
}
|
||||
|
||||
scheduledWindows[wnd] = struct{}{}
|
||||
|
||||
window := window // copy
|
||||
select {
|
||||
case sh.OpenWindows[wnd].Done <- &window:
|
||||
default:
|
||||
log.Error("expected sh.OpenWindows[wnd].Done to be buffered")
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite sh.OpenWindows array, removing scheduled windows
|
||||
newOpenWindows := make([]*SchedWindowRequest, 0, windowsLen-len(scheduledWindows))
|
||||
for wnd, window := range sh.OpenWindows {
|
||||
if _, scheduled := scheduledWindows[wnd]; scheduled {
|
||||
// keep unscheduled windows open
|
||||
continue
|
||||
}
|
||||
|
||||
newOpenWindows = append(newOpenWindows, window)
|
||||
}
|
||||
|
||||
sh.OpenWindows = newOpenWindows
|
||||
}
|
81
extern/sector-storage/sched_assigner_spread.go
vendored
Normal file
81
extern/sector-storage/sched_assigner_spread.go
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
func NewSpreadAssigner() Assigner {
|
||||
return &AssignerCommon{
|
||||
WindowSel: SpreadWS,
|
||||
}
|
||||
}
|
||||
|
||||
func SpreadWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
|
||||
scheduled := 0
|
||||
rmQueue := make([]int, 0, queueLen)
|
||||
workerAssigned := map[storiface.WorkerID]int{}
|
||||
|
||||
for sqi := 0; sqi < queueLen; sqi++ {
|
||||
task := (*sh.SchedQueue)[sqi]
|
||||
|
||||
selectedWindow := -1
|
||||
var needRes storiface.Resources
|
||||
var info storiface.WorkerInfo
|
||||
var bestWid storiface.WorkerID
|
||||
bestAssigned := math.MaxInt // smaller = better
|
||||
|
||||
for i, wnd := range acceptableWindows[task.IndexHeap] {
|
||||
wid := sh.OpenWindows[wnd].Worker
|
||||
w := sh.Workers[wid]
|
||||
|
||||
res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
|
||||
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i)
|
||||
|
||||
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) {
|
||||
continue
|
||||
}
|
||||
|
||||
wu, _ := workerAssigned[wid]
|
||||
if wu >= bestAssigned {
|
||||
continue
|
||||
}
|
||||
|
||||
info = w.Info
|
||||
needRes = res
|
||||
bestWid = wid
|
||||
selectedWindow = wnd
|
||||
bestAssigned = wu
|
||||
}
|
||||
|
||||
if selectedWindow < 0 {
|
||||
// all windows full
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugw("SCHED ASSIGNED",
|
||||
"sqi", sqi,
|
||||
"sector", task.Sector.ID.Number,
|
||||
"task", task.TaskType,
|
||||
"window", selectedWindow,
|
||||
"worker", bestWid,
|
||||
"assigned", bestAssigned)
|
||||
|
||||
workerAssigned[bestWid]++
|
||||
windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
|
||||
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
|
||||
|
||||
rmQueue = append(rmQueue, sqi)
|
||||
scheduled++
|
||||
}
|
||||
|
||||
if len(rmQueue) > 0 {
|
||||
for i := len(rmQueue) - 1; i >= 0; i-- {
|
||||
sh.SchedQueue.Remove(rmQueue[i])
|
||||
}
|
||||
}
|
||||
|
||||
return scheduled
|
||||
}
|
98
extern/sector-storage/sched_assigner_utilization.go
vendored
Normal file
98
extern/sector-storage/sched_assigner_utilization.go
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
func NewLowestUtilizationAssigner() Assigner {
|
||||
return &AssignerCommon{
|
||||
WindowSel: LowestUtilizationWS,
|
||||
}
|
||||
}
|
||||
|
||||
func LowestUtilizationWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
|
||||
scheduled := 0
|
||||
rmQueue := make([]int, 0, queueLen)
|
||||
workerUtil := map[storiface.WorkerID]float64{}
|
||||
|
||||
for sqi := 0; sqi < queueLen; sqi++ {
|
||||
task := (*sh.SchedQueue)[sqi]
|
||||
|
||||
selectedWindow := -1
|
||||
var needRes storiface.Resources
|
||||
var info storiface.WorkerInfo
|
||||
var bestWid storiface.WorkerID
|
||||
bestUtilization := math.MaxFloat64 // smaller = better
|
||||
|
||||
for i, wnd := range acceptableWindows[task.IndexHeap] {
|
||||
wid := sh.OpenWindows[wnd].Worker
|
||||
w := sh.Workers[wid]
|
||||
|
||||
res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
|
||||
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i)
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) {
|
||||
continue
|
||||
}
|
||||
|
||||
wu, found := workerUtil[wid]
|
||||
if !found {
|
||||
wu = w.Utilization()
|
||||
workerUtil[wid] = wu
|
||||
}
|
||||
if wu >= bestUtilization {
|
||||
// acceptable worker list is initially sorted by utilization, and the initially-best workers
|
||||
// will be assigned tasks first. This means that if we find a worker which isn't better, it
|
||||
// probably means that the other workers aren't better either.
|
||||
//
|
||||
// utilization
|
||||
// ^
|
||||
// | /
|
||||
// | \ /
|
||||
// | \ /
|
||||
// | *
|
||||
// #--------> acceptableWindow index
|
||||
//
|
||||
// * -> we're here
|
||||
break
|
||||
}
|
||||
|
||||
info = w.Info
|
||||
needRes = res
|
||||
bestWid = wid
|
||||
selectedWindow = wnd
|
||||
bestUtilization = wu
|
||||
}
|
||||
|
||||
if selectedWindow < 0 {
|
||||
// all windows full
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugw("SCHED ASSIGNED",
|
||||
"sqi", sqi,
|
||||
"sector", task.Sector.ID.Number,
|
||||
"task", task.TaskType,
|
||||
"window", selectedWindow,
|
||||
"worker", bestWid,
|
||||
"utilization", bestUtilization)
|
||||
|
||||
workerUtil[bestWid] += windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
|
||||
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
|
||||
|
||||
rmQueue = append(rmQueue, sqi)
|
||||
scheduled++
|
||||
}
|
||||
|
||||
if len(rmQueue) > 0 {
|
||||
for i := len(rmQueue) - 1; i >= 0; i-- {
|
||||
sh.SchedQueue.Remove(rmQueue[i])
|
||||
}
|
||||
}
|
||||
|
||||
return scheduled
|
||||
}
|
28
extern/sector-storage/sched_post.go
vendored
28
extern/sector-storage/sched_post.go
vendored
@ -17,7 +17,7 @@ import (
|
||||
|
||||
type poStScheduler struct {
|
||||
lk sync.RWMutex
|
||||
workers map[storiface.WorkerID]*workerHandle
|
||||
workers map[storiface.WorkerID]*WorkerHandle
|
||||
cond *sync.Cond
|
||||
|
||||
postType sealtasks.TaskType
|
||||
@ -25,14 +25,14 @@ type poStScheduler struct {
|
||||
|
||||
func newPoStScheduler(t sealtasks.TaskType) *poStScheduler {
|
||||
ps := &poStScheduler{
|
||||
workers: map[storiface.WorkerID]*workerHandle{},
|
||||
workers: map[storiface.WorkerID]*WorkerHandle{},
|
||||
postType: t,
|
||||
}
|
||||
ps.cond = sync.NewCond(&ps.lk)
|
||||
return ps
|
||||
}
|
||||
|
||||
func (ps *poStScheduler) MaybeAddWorker(wid storiface.WorkerID, tasks map[sealtasks.TaskType]struct{}, w *workerHandle) bool {
|
||||
func (ps *poStScheduler) MaybeAddWorker(wid storiface.WorkerID, tasks map[sealtasks.TaskType]struct{}, w *WorkerHandle) bool {
|
||||
if _, ok := tasks[ps.postType]; !ok {
|
||||
return false
|
||||
}
|
||||
@ -49,10 +49,10 @@ func (ps *poStScheduler) MaybeAddWorker(wid storiface.WorkerID, tasks map[sealta
|
||||
return true
|
||||
}
|
||||
|
||||
func (ps *poStScheduler) delWorker(wid storiface.WorkerID) *workerHandle {
|
||||
func (ps *poStScheduler) delWorker(wid storiface.WorkerID) *WorkerHandle {
|
||||
ps.lk.Lock()
|
||||
defer ps.lk.Unlock()
|
||||
var w *workerHandle = nil
|
||||
var w *WorkerHandle = nil
|
||||
if wh, ok := ps.workers[wid]; ok {
|
||||
w = wh
|
||||
delete(ps.workers, wid)
|
||||
@ -68,7 +68,7 @@ func (ps *poStScheduler) CanSched(ctx context.Context) bool {
|
||||
}
|
||||
|
||||
for _, w := range ps.workers {
|
||||
if w.enabled {
|
||||
if w.Enabled {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (ps *poStScheduler) Schedule(ctx context.Context, primary bool, spt abi.Reg
|
||||
selected := candidates[0]
|
||||
worker := ps.workers[selected.id]
|
||||
|
||||
return worker.active.withResources(selected.id, worker.info, selected.res, &ps.lk, func() error {
|
||||
return worker.active.withResources(selected.id, worker.Info, ps.postType.SealTask(spt), selected.res, &ps.lk, func() error {
|
||||
ps.lk.Unlock()
|
||||
defer ps.lk.Lock()
|
||||
|
||||
@ -122,9 +122,9 @@ func (ps *poStScheduler) readyWorkers(spt abi.RegisteredSealProof) (bool, []cand
|
||||
var accepts []candidateWorker
|
||||
//if the gpus of the worker are insufficient or it's disabled, it cannot be scheduled
|
||||
for wid, wr := range ps.workers {
|
||||
needRes := wr.info.Resources.ResourceSpec(spt, ps.postType)
|
||||
needRes := wr.Info.Resources.ResourceSpec(spt, ps.postType)
|
||||
|
||||
if !wr.active.canHandleRequest(needRes, wid, "post-readyWorkers", wr.info) {
|
||||
if !wr.active.CanHandleRequest(ps.postType.SealTask(spt), needRes, wid, "post-readyWorkers", wr.Info) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -145,16 +145,16 @@ func (ps *poStScheduler) readyWorkers(spt abi.RegisteredSealProof) (bool, []cand
|
||||
func (ps *poStScheduler) disable(wid storiface.WorkerID) {
|
||||
ps.lk.Lock()
|
||||
defer ps.lk.Unlock()
|
||||
ps.workers[wid].enabled = false
|
||||
ps.workers[wid].Enabled = false
|
||||
}
|
||||
|
||||
func (ps *poStScheduler) enable(wid storiface.WorkerID) {
|
||||
ps.lk.Lock()
|
||||
defer ps.lk.Unlock()
|
||||
ps.workers[wid].enabled = true
|
||||
ps.workers[wid].Enabled = true
|
||||
}
|
||||
|
||||
func (ps *poStScheduler) watch(wid storiface.WorkerID, worker *workerHandle) {
|
||||
func (ps *poStScheduler) watch(wid storiface.WorkerID, worker *WorkerHandle) {
|
||||
heartbeatTimer := time.NewTicker(stores.HeartbeatInterval)
|
||||
defer heartbeatTimer.Stop()
|
||||
|
||||
@ -197,7 +197,7 @@ func (ps *poStScheduler) watch(wid storiface.WorkerID, worker *workerHandle) {
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *poStScheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) {
|
||||
func (ps *poStScheduler) workerCleanup(wid storiface.WorkerID, w *WorkerHandle) {
|
||||
select {
|
||||
case <-w.closingMgr:
|
||||
default:
|
||||
@ -223,7 +223,7 @@ func (ps *poStScheduler) schedClose() {
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *poStScheduler) WorkerStats(ctx context.Context, cb func(ctx context.Context, wid storiface.WorkerID, worker *workerHandle)) {
|
||||
func (ps *poStScheduler) WorkerStats(ctx context.Context, cb func(ctx context.Context, wid storiface.WorkerID, worker *WorkerHandle)) {
|
||||
ps.lk.RLock()
|
||||
defer ps.lk.RUnlock()
|
||||
for id, w := range ps.workers {
|
||||
|
59
extern/sector-storage/sched_resources.go
vendored
59
extern/sector-storage/sched_resources.go
vendored
@ -9,8 +9,26 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
func (a *activeResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, r storiface.Resources, locker sync.Locker, cb func() error) error {
|
||||
for !a.canHandleRequest(r, id, "withResources", wr) {
|
||||
type ActiveResources struct {
|
||||
memUsedMin uint64
|
||||
memUsedMax uint64
|
||||
gpuUsed float64
|
||||
cpuUse uint64
|
||||
|
||||
taskCounters map[sealtasks.SealTaskType]int
|
||||
|
||||
cond *sync.Cond
|
||||
waiting int
|
||||
}
|
||||
|
||||
func NewActiveResources() *ActiveResources {
|
||||
return &ActiveResources{
|
||||
taskCounters: map[sealtasks.SealTaskType]int{},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ActiveResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, tt sealtasks.SealTaskType, r storiface.Resources, locker sync.Locker, cb func() error) error {
|
||||
for !a.CanHandleRequest(tt, r, id, "withResources", wr) {
|
||||
if a.cond == nil {
|
||||
a.cond = sync.NewCond(locker)
|
||||
}
|
||||
@ -19,22 +37,22 @@ func (a *activeResources) withResources(id storiface.WorkerID, wr storiface.Work
|
||||
a.waiting--
|
||||
}
|
||||
|
||||
a.add(wr.Resources, r)
|
||||
a.Add(tt, wr.Resources, r)
|
||||
|
||||
err := cb()
|
||||
|
||||
a.free(wr.Resources, r)
|
||||
a.Free(tt, wr.Resources, r)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// must be called with the same lock as the one passed to withResources
|
||||
func (a *activeResources) hasWorkWaiting() bool {
|
||||
func (a *ActiveResources) hasWorkWaiting() bool {
|
||||
return a.waiting > 0
|
||||
}
|
||||
|
||||
// add task resources to activeResources and return utilization difference
|
||||
func (a *activeResources) add(wr storiface.WorkerResources, r storiface.Resources) float64 {
|
||||
// add task resources to ActiveResources and return utilization difference
|
||||
func (a *ActiveResources) Add(tt sealtasks.SealTaskType, wr storiface.WorkerResources, r storiface.Resources) float64 {
|
||||
startUtil := a.utilization(wr)
|
||||
|
||||
if r.GPUUtilization > 0 {
|
||||
@ -43,26 +61,35 @@ func (a *activeResources) add(wr storiface.WorkerResources, r storiface.Resource
|
||||
a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs))
|
||||
a.memUsedMin += r.MinMemory
|
||||
a.memUsedMax += r.MaxMemory
|
||||
a.taskCounters[tt]++
|
||||
|
||||
return a.utilization(wr) - startUtil
|
||||
}
|
||||
|
||||
func (a *activeResources) free(wr storiface.WorkerResources, r storiface.Resources) {
|
||||
func (a *ActiveResources) Free(tt sealtasks.SealTaskType, wr storiface.WorkerResources, r storiface.Resources) {
|
||||
if r.GPUUtilization > 0 {
|
||||
a.gpuUsed -= r.GPUUtilization
|
||||
}
|
||||
a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs))
|
||||
a.memUsedMin -= r.MinMemory
|
||||
a.memUsedMax -= r.MaxMemory
|
||||
a.taskCounters[tt]--
|
||||
|
||||
if a.cond != nil {
|
||||
a.cond.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
// canHandleRequest evaluates if the worker has enough available resources to
|
||||
// CanHandleRequest evaluates if the worker has enough available resources to
|
||||
// handle the request.
|
||||
func (a *activeResources) canHandleRequest(needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool {
|
||||
func (a *ActiveResources) CanHandleRequest(tt sealtasks.SealTaskType, needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool {
|
||||
if needRes.MaxConcurrent > 0 {
|
||||
if a.taskCounters[tt] >= needRes.MaxConcurrent {
|
||||
log.Debugf("sched: not scheduling on worker %s for %s; at task limit tt=%s, curcount=%d", wid, caller, tt, a.taskCounters[tt])
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if info.IgnoreResources {
|
||||
// shortcircuit; if this worker is ignoring resources, it can always handle the request.
|
||||
return true
|
||||
@ -110,7 +137,7 @@ func (a *activeResources) canHandleRequest(needRes storiface.Resources, wid stor
|
||||
}
|
||||
|
||||
// utilization returns a number in 0..1 range indicating fraction of used resources
|
||||
func (a *activeResources) utilization(wr storiface.WorkerResources) float64 {
|
||||
func (a *ActiveResources) utilization(wr storiface.WorkerResources) float64 { // todo task type
|
||||
var max float64
|
||||
|
||||
cpu := float64(a.cpuUse) / float64(wr.CPUs)
|
||||
@ -145,14 +172,14 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 {
|
||||
return max
|
||||
}
|
||||
|
||||
func (wh *workerHandle) utilization() float64 {
|
||||
func (wh *WorkerHandle) Utilization() float64 {
|
||||
wh.lk.Lock()
|
||||
u := wh.active.utilization(wh.info.Resources)
|
||||
u += wh.preparing.utilization(wh.info.Resources)
|
||||
u := wh.active.utilization(wh.Info.Resources)
|
||||
u += wh.preparing.utilization(wh.Info.Resources)
|
||||
wh.lk.Unlock()
|
||||
wh.wndLk.Lock()
|
||||
for _, window := range wh.activeWindows {
|
||||
u += window.allocated.utilization(wh.info.Resources)
|
||||
u += window.Allocated.utilization(wh.Info.Resources)
|
||||
}
|
||||
wh.wndLk.Unlock()
|
||||
|
||||
@ -161,7 +188,7 @@ func (wh *workerHandle) utilization() float64 {
|
||||
|
||||
var tasksCacheTimeout = 30 * time.Second
|
||||
|
||||
func (wh *workerHandle) TaskTypes(ctx context.Context) (t map[sealtasks.TaskType]struct{}, err error) {
|
||||
func (wh *WorkerHandle) TaskTypes(ctx context.Context) (t map[sealtasks.TaskType]struct{}, err error) {
|
||||
wh.tasksLk.Lock()
|
||||
defer wh.tasksLk.Unlock()
|
||||
|
||||
|
85
extern/sector-storage/sched_test.go
vendored
85
extern/sector-storage/sched_test.go
vendored
@ -183,7 +183,7 @@ func (s *schedTestWorker) Close() error {
|
||||
|
||||
var _ Worker = &schedTestWorker{}
|
||||
|
||||
func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}, resources storiface.WorkerResources, ignoreResources bool) {
|
||||
func addTestWorker(t *testing.T, sched *Scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}, resources storiface.WorkerResources, ignoreResources bool) {
|
||||
w := &schedTestWorker{
|
||||
name: name,
|
||||
taskTypes: taskTypes,
|
||||
@ -223,7 +223,8 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
||||
}
|
||||
|
||||
func TestSchedStartStop(t *testing.T) {
|
||||
sched := newScheduler()
|
||||
sched, err := newScheduler("")
|
||||
require.NoError(t, err)
|
||||
go sched.runSched()
|
||||
|
||||
addTestWorker(t, sched, stores.NewIndex(), "fred", nil, decentWorkerResources, false)
|
||||
@ -259,13 +260,13 @@ func TestSched(t *testing.T) {
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
type task func(*testing.T, *scheduler, *stores.Index, *runMeta)
|
||||
type task func(*testing.T, *Scheduler, *stores.Index, *runMeta)
|
||||
|
||||
sched := func(taskName, expectWorker string, sid abi.SectorNumber, taskType sealtasks.TaskType) task {
|
||||
_, _, l, _ := runtime.Caller(1)
|
||||
_, _, l2, _ := runtime.Caller(2)
|
||||
|
||||
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||
return func(t *testing.T, sched *Scheduler, index *stores.Index, rm *runMeta) {
|
||||
done := make(chan struct{})
|
||||
rm.done[taskName] = done
|
||||
|
||||
@ -314,7 +315,7 @@ func TestSched(t *testing.T) {
|
||||
taskStarted := func(name string) task {
|
||||
_, _, l, _ := runtime.Caller(1)
|
||||
_, _, l2, _ := runtime.Caller(2)
|
||||
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||
return func(t *testing.T, sched *Scheduler, index *stores.Index, rm *runMeta) {
|
||||
select {
|
||||
case rm.done[name] <- struct{}{}:
|
||||
case <-ctx.Done():
|
||||
@ -326,7 +327,7 @@ func TestSched(t *testing.T) {
|
||||
taskDone := func(name string) task {
|
||||
_, _, l, _ := runtime.Caller(1)
|
||||
_, _, l2, _ := runtime.Caller(2)
|
||||
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||
return func(t *testing.T, sched *Scheduler, index *stores.Index, rm *runMeta) {
|
||||
select {
|
||||
case rm.done[name] <- struct{}{}:
|
||||
case <-ctx.Done():
|
||||
@ -339,7 +340,7 @@ func TestSched(t *testing.T) {
|
||||
taskNotScheduled := func(name string) task {
|
||||
_, _, l, _ := runtime.Caller(1)
|
||||
_, _, l2, _ := runtime.Caller(2)
|
||||
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||
return func(t *testing.T, sched *Scheduler, index *stores.Index, rm *runMeta) {
|
||||
select {
|
||||
case rm.done[name] <- struct{}{}:
|
||||
t.Fatal("not expected", l, l2)
|
||||
@ -352,7 +353,8 @@ func TestSched(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
index := stores.NewIndex()
|
||||
|
||||
sched := newScheduler()
|
||||
sched, err := newScheduler("")
|
||||
require.NoError(t, err)
|
||||
sched.testSync = make(chan struct{})
|
||||
|
||||
go sched.runSched()
|
||||
@ -378,7 +380,7 @@ func TestSched(t *testing.T) {
|
||||
}
|
||||
|
||||
multTask := func(tasks ...task) task {
|
||||
return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) {
|
||||
return func(t *testing.T, s *Scheduler, index *stores.Index, meta *runMeta) {
|
||||
for _, tsk := range tasks {
|
||||
tsk(t, s, index, meta)
|
||||
}
|
||||
@ -492,7 +494,7 @@ func TestSched(t *testing.T) {
|
||||
}
|
||||
|
||||
diag := func() task {
|
||||
return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) {
|
||||
return func(t *testing.T, s *Scheduler, index *stores.Index, meta *runMeta) {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
for _, request := range s.diag().Requests {
|
||||
log.Infof("!!! sDIAG: sid(%d) task(%s)", request.Sector.Number, request.TaskType)
|
||||
@ -582,12 +584,12 @@ func TestSched(t *testing.T) {
|
||||
|
||||
type slowishSelector bool
|
||||
|
||||
func (s slowishSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, a *workerHandle) (bool, error) {
|
||||
func (s slowishSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, a *WorkerHandle) (bool, bool, error) {
|
||||
time.Sleep(200 * time.Microsecond)
|
||||
return bool(s), nil
|
||||
return bool(s), false, nil
|
||||
}
|
||||
|
||||
func (s slowishSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
||||
func (s slowishSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *WorkerHandle) (bool, error) {
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
return true, nil
|
||||
}
|
||||
@ -604,29 +606,30 @@ func BenchmarkTrySched(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
||||
sched := newScheduler()
|
||||
sched.workers[storiface.WorkerID{}] = &workerHandle{
|
||||
sched, err := newScheduler("")
|
||||
require.NoError(b, err)
|
||||
sched.Workers[storiface.WorkerID{}] = &WorkerHandle{
|
||||
workerRpc: nil,
|
||||
info: storiface.WorkerInfo{
|
||||
Info: storiface.WorkerInfo{
|
||||
Hostname: "t",
|
||||
Resources: decentWorkerResources,
|
||||
},
|
||||
preparing: &activeResources{},
|
||||
active: &activeResources{},
|
||||
preparing: NewActiveResources(),
|
||||
active: NewActiveResources(),
|
||||
}
|
||||
|
||||
for i := 0; i < windows; i++ {
|
||||
sched.openWindows = append(sched.openWindows, &schedWindowRequest{
|
||||
worker: storiface.WorkerID{},
|
||||
done: make(chan *schedWindow, 1000),
|
||||
sched.OpenWindows = append(sched.OpenWindows, &SchedWindowRequest{
|
||||
Worker: storiface.WorkerID{},
|
||||
Done: make(chan *SchedWindow, 1000),
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < queue; i++ {
|
||||
sched.schedQueue.Push(&workerRequest{
|
||||
taskType: sealtasks.TTCommit2,
|
||||
sel: slowishSelector(true),
|
||||
ctx: ctx,
|
||||
sched.SchedQueue.Push(&WorkerRequest{
|
||||
TaskType: sealtasks.TTCommit2,
|
||||
Sel: slowishSelector(true),
|
||||
Ctx: ctx,
|
||||
})
|
||||
}
|
||||
|
||||
@ -644,26 +647,28 @@ func BenchmarkTrySched(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestWindowCompact(t *testing.T) {
|
||||
sh := scheduler{}
|
||||
sh := Scheduler{}
|
||||
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||
|
||||
test := func(start [][]sealtasks.TaskType, expect [][]sealtasks.TaskType) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
wh := &workerHandle{
|
||||
info: storiface.WorkerInfo{
|
||||
wh := &WorkerHandle{
|
||||
Info: storiface.WorkerInfo{
|
||||
Resources: decentWorkerResources,
|
||||
},
|
||||
}
|
||||
|
||||
for _, windowTasks := range start {
|
||||
window := &schedWindow{}
|
||||
window := &SchedWindow{
|
||||
Allocated: *NewActiveResources(),
|
||||
}
|
||||
|
||||
for _, task := range windowTasks {
|
||||
window.todo = append(window.todo, &workerRequest{
|
||||
taskType: task,
|
||||
sector: storage.SectorRef{ProofType: spt},
|
||||
window.Todo = append(window.Todo, &WorkerRequest{
|
||||
TaskType: task,
|
||||
Sector: storage.SectorRef{ProofType: spt},
|
||||
})
|
||||
window.allocated.add(wh.info.Resources, storiface.ResourceTable[task][spt])
|
||||
window.Allocated.Add(task.SealTask(spt), wh.Info.Resources, storiface.ResourceTable[task][spt])
|
||||
}
|
||||
|
||||
wh.activeWindows = append(wh.activeWindows, window)
|
||||
@ -678,17 +683,17 @@ func TestWindowCompact(t *testing.T) {
|
||||
require.Equal(t, len(start)-len(expect), -sw.windowsRequested)
|
||||
|
||||
for wi, tasks := range expect {
|
||||
var expectRes activeResources
|
||||
expectRes := NewActiveResources()
|
||||
|
||||
for ti, task := range tasks {
|
||||
require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti)
|
||||
expectRes.add(wh.info.Resources, storiface.ResourceTable[task][spt])
|
||||
require.Equal(t, task, wh.activeWindows[wi].Todo[ti].TaskType, "%d, %d", wi, ti)
|
||||
expectRes.Add(task.SealTask(spt), wh.Info.Resources, storiface.ResourceTable[task][spt])
|
||||
}
|
||||
|
||||
require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi)
|
||||
require.Equal(t, expectRes.gpuUsed, wh.activeWindows[wi].allocated.gpuUsed, "%d", wi)
|
||||
require.Equal(t, expectRes.memUsedMin, wh.activeWindows[wi].allocated.memUsedMin, "%d", wi)
|
||||
require.Equal(t, expectRes.memUsedMax, wh.activeWindows[wi].allocated.memUsedMax, "%d", wi)
|
||||
require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].Allocated.cpuUse, "%d", wi)
|
||||
require.Equal(t, expectRes.gpuUsed, wh.activeWindows[wi].Allocated.gpuUsed, "%d", wi)
|
||||
require.Equal(t, expectRes.memUsedMin, wh.activeWindows[wi].Allocated.memUsedMin, "%d", wi)
|
||||
require.Equal(t, expectRes.memUsedMax, wh.activeWindows[wi].Allocated.memUsedMax, "%d", wi)
|
||||
}
|
||||
|
||||
}
|
||||
|
148
extern/sector-storage/sched_worker.go
vendored
148
extern/sector-storage/sched_worker.go
vendored
@ -12,31 +12,31 @@ import (
|
||||
)
|
||||
|
||||
type schedWorker struct {
|
||||
sched *scheduler
|
||||
worker *workerHandle
|
||||
sched *Scheduler
|
||||
worker *WorkerHandle
|
||||
|
||||
wid storiface.WorkerID
|
||||
|
||||
heartbeatTimer *time.Ticker
|
||||
scheduledWindows chan *schedWindow
|
||||
scheduledWindows chan *SchedWindow
|
||||
taskDone chan struct{}
|
||||
|
||||
windowsRequested int
|
||||
}
|
||||
|
||||
func newWorkerHandle(ctx context.Context, w Worker) (*workerHandle, error) {
|
||||
func newWorkerHandle(ctx context.Context, w Worker) (*WorkerHandle, error) {
|
||||
info, err := w.Info(ctx)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting worker info: %w", err)
|
||||
}
|
||||
|
||||
worker := &workerHandle{
|
||||
worker := &WorkerHandle{
|
||||
workerRpc: w,
|
||||
info: info,
|
||||
Info: info,
|
||||
|
||||
preparing: &activeResources{},
|
||||
active: &activeResources{},
|
||||
enabled: true,
|
||||
preparing: NewActiveResources(),
|
||||
active: NewActiveResources(),
|
||||
Enabled: true,
|
||||
|
||||
closingMgr: make(chan struct{}),
|
||||
closedMgr: make(chan struct{}),
|
||||
@ -46,9 +46,9 @@ func newWorkerHandle(ctx context.Context, w Worker) (*workerHandle, error) {
|
||||
}
|
||||
|
||||
// context only used for startup
|
||||
func (sh *scheduler) runWorker(ctx context.Context, wid storiface.WorkerID, worker *workerHandle) error {
|
||||
func (sh *Scheduler) runWorker(ctx context.Context, wid storiface.WorkerID, worker *WorkerHandle) error {
|
||||
sh.workersLk.Lock()
|
||||
_, exist := sh.workers[wid]
|
||||
_, exist := sh.Workers[wid]
|
||||
if exist {
|
||||
log.Warnw("duplicated worker added", "id", wid)
|
||||
|
||||
@ -57,7 +57,7 @@ func (sh *scheduler) runWorker(ctx context.Context, wid storiface.WorkerID, work
|
||||
return nil
|
||||
}
|
||||
|
||||
sh.workers[wid] = worker
|
||||
sh.Workers[wid] = worker
|
||||
sh.workersLk.Unlock()
|
||||
|
||||
sw := &schedWorker{
|
||||
@ -67,7 +67,7 @@ func (sh *scheduler) runWorker(ctx context.Context, wid storiface.WorkerID, work
|
||||
wid: wid,
|
||||
|
||||
heartbeatTimer: time.NewTicker(stores.HeartbeatInterval),
|
||||
scheduledWindows: make(chan *schedWindow, SchedWindows),
|
||||
scheduledWindows: make(chan *SchedWindow, SchedWindows),
|
||||
taskDone: make(chan struct{}, 1),
|
||||
|
||||
windowsRequested: 0,
|
||||
@ -94,7 +94,7 @@ func (sw *schedWorker) handleWorker() {
|
||||
}
|
||||
|
||||
sched.workersLk.Lock()
|
||||
delete(sched.workers, sw.wid)
|
||||
delete(sched.Workers, sw.wid)
|
||||
sched.workersLk.Unlock()
|
||||
}()
|
||||
|
||||
@ -103,7 +103,7 @@ func (sw *schedWorker) handleWorker() {
|
||||
for {
|
||||
{
|
||||
sched.workersLk.Lock()
|
||||
enabled := worker.enabled
|
||||
enabled := worker.Enabled
|
||||
sched.workersLk.Unlock()
|
||||
|
||||
// ask for more windows if we need them (non-blocking)
|
||||
@ -124,8 +124,8 @@ func (sw *schedWorker) handleWorker() {
|
||||
// session looks good
|
||||
{
|
||||
sched.workersLk.Lock()
|
||||
enabled := worker.enabled
|
||||
worker.enabled = true
|
||||
enabled := worker.Enabled
|
||||
worker.Enabled = true
|
||||
sched.workersLk.Unlock()
|
||||
|
||||
if !enabled {
|
||||
@ -248,9 +248,9 @@ func (sw *schedWorker) checkSession(ctx context.Context) bool {
|
||||
func (sw *schedWorker) requestWindows() bool {
|
||||
for ; sw.windowsRequested < SchedWindows; sw.windowsRequested++ {
|
||||
select {
|
||||
case sw.sched.windowRequests <- &schedWindowRequest{
|
||||
worker: sw.wid,
|
||||
done: sw.scheduledWindows,
|
||||
case sw.sched.windowRequests <- &SchedWindowRequest{
|
||||
Worker: sw.wid,
|
||||
Done: sw.scheduledWindows,
|
||||
}:
|
||||
case <-sw.sched.closing:
|
||||
return false
|
||||
@ -290,21 +290,21 @@ func (sw *schedWorker) workerCompactWindows() {
|
||||
lower := worker.activeWindows[wi]
|
||||
var moved []int
|
||||
|
||||
for ti, todo := range window.todo {
|
||||
needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType)
|
||||
if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) {
|
||||
for ti, todo := range window.Todo {
|
||||
needRes := worker.Info.Resources.ResourceSpec(todo.Sector.ProofType, todo.TaskType)
|
||||
if !lower.Allocated.CanHandleRequest(todo.SealTask(), needRes, sw.wid, "compactWindows", worker.Info) {
|
||||
continue
|
||||
}
|
||||
|
||||
moved = append(moved, ti)
|
||||
lower.todo = append(lower.todo, todo)
|
||||
lower.allocated.add(worker.info.Resources, needRes)
|
||||
window.allocated.free(worker.info.Resources, needRes)
|
||||
lower.Todo = append(lower.Todo, todo)
|
||||
lower.Allocated.Add(todo.SealTask(), worker.Info.Resources, needRes)
|
||||
window.Allocated.Free(todo.SealTask(), worker.Info.Resources, needRes)
|
||||
}
|
||||
|
||||
if len(moved) > 0 {
|
||||
newTodo := make([]*workerRequest, 0, len(window.todo)-len(moved))
|
||||
for i, t := range window.todo {
|
||||
newTodo := make([]*WorkerRequest, 0, len(window.Todo)-len(moved))
|
||||
for i, t := range window.Todo {
|
||||
if len(moved) > 0 && moved[0] == i {
|
||||
moved = moved[1:]
|
||||
continue
|
||||
@ -312,16 +312,16 @@ func (sw *schedWorker) workerCompactWindows() {
|
||||
|
||||
newTodo = append(newTodo, t)
|
||||
}
|
||||
window.todo = newTodo
|
||||
window.Todo = newTodo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var compacted int
|
||||
var newWindows []*schedWindow
|
||||
var newWindows []*SchedWindow
|
||||
|
||||
for _, window := range worker.activeWindows {
|
||||
if len(window.todo) == 0 {
|
||||
if len(window.Todo) == 0 {
|
||||
compacted++
|
||||
continue
|
||||
}
|
||||
@ -347,13 +347,13 @@ assignLoop:
|
||||
firstWindow := worker.activeWindows[0]
|
||||
|
||||
// process tasks within a window, preferring tasks at lower indexes
|
||||
for len(firstWindow.todo) > 0 {
|
||||
for len(firstWindow.Todo) > 0 {
|
||||
tidx := -1
|
||||
|
||||
worker.lk.Lock()
|
||||
for t, todo := range firstWindow.todo {
|
||||
needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType)
|
||||
if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) {
|
||||
for t, todo := range firstWindow.Todo {
|
||||
needRes := worker.Info.Resources.ResourceSpec(todo.Sector.ProofType, todo.TaskType)
|
||||
if worker.preparing.CanHandleRequest(todo.SealTask(), needRes, sw.wid, "startPreparing", worker.Info) {
|
||||
tidx = t
|
||||
break
|
||||
}
|
||||
@ -364,9 +364,9 @@ assignLoop:
|
||||
break assignLoop
|
||||
}
|
||||
|
||||
todo := firstWindow.todo[tidx]
|
||||
todo := firstWindow.Todo[tidx]
|
||||
|
||||
log.Debugf("assign worker sector %d", todo.sector.ID.Number)
|
||||
log.Debugf("assign worker sector %d", todo.Sector.ID.Number)
|
||||
err := sw.startProcessingTask(todo)
|
||||
|
||||
if err != nil {
|
||||
@ -375,9 +375,9 @@ assignLoop:
|
||||
}
|
||||
|
||||
// Note: we're not freeing window.allocated resources here very much on purpose
|
||||
copy(firstWindow.todo[tidx:], firstWindow.todo[tidx+1:])
|
||||
firstWindow.todo[len(firstWindow.todo)-1] = nil
|
||||
firstWindow.todo = firstWindow.todo[:len(firstWindow.todo)-1]
|
||||
copy(firstWindow.Todo[tidx:], firstWindow.Todo[tidx+1:])
|
||||
firstWindow.Todo[len(firstWindow.Todo)-1] = nil
|
||||
firstWindow.Todo = firstWindow.Todo[:len(firstWindow.Todo)-1]
|
||||
}
|
||||
|
||||
copy(worker.activeWindows, worker.activeWindows[1:])
|
||||
@ -405,16 +405,16 @@ assignLoop:
|
||||
firstWindow := worker.activeWindows[0]
|
||||
|
||||
// process tasks within a window, preferring tasks at lower indexes
|
||||
for len(firstWindow.todo) > 0 {
|
||||
for len(firstWindow.Todo) > 0 {
|
||||
tidx := -1
|
||||
|
||||
for t, todo := range firstWindow.todo {
|
||||
if todo.taskType != sealtasks.TTCommit1 && todo.taskType != sealtasks.TTCommit2 { // todo put in task
|
||||
for t, todo := range firstWindow.Todo {
|
||||
if todo.TaskType != sealtasks.TTCommit1 && todo.TaskType != sealtasks.TTCommit2 { // todo put in task
|
||||
continue
|
||||
}
|
||||
|
||||
needRes := storiface.ResourceTable[todo.taskType][todo.sector.ProofType]
|
||||
if worker.active.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) {
|
||||
needRes := worker.Info.Resources.ResourceSpec(todo.Sector.ProofType, todo.TaskType)
|
||||
if worker.active.CanHandleRequest(todo.SealTask(), needRes, sw.wid, "startPreparing", worker.Info) {
|
||||
tidx = t
|
||||
break
|
||||
}
|
||||
@ -424,9 +424,9 @@ assignLoop:
|
||||
break assignLoop
|
||||
}
|
||||
|
||||
todo := firstWindow.todo[tidx]
|
||||
todo := firstWindow.Todo[tidx]
|
||||
|
||||
log.Debugf("assign worker sector %d (ready)", todo.sector.ID.Number)
|
||||
log.Debugf("assign worker sector %d (ready)", todo.Sector.ID.Number)
|
||||
err := sw.startProcessingReadyTask(todo)
|
||||
|
||||
if err != nil {
|
||||
@ -435,9 +435,9 @@ assignLoop:
|
||||
}
|
||||
|
||||
// Note: we're not freeing window.allocated resources here very much on purpose
|
||||
copy(firstWindow.todo[tidx:], firstWindow.todo[tidx+1:])
|
||||
firstWindow.todo[len(firstWindow.todo)-1] = nil
|
||||
firstWindow.todo = firstWindow.todo[:len(firstWindow.todo)-1]
|
||||
copy(firstWindow.Todo[tidx:], firstWindow.Todo[tidx+1:])
|
||||
firstWindow.Todo[len(firstWindow.Todo)-1] = nil
|
||||
firstWindow.Todo = firstWindow.Todo[:len(firstWindow.Todo)-1]
|
||||
}
|
||||
|
||||
copy(worker.activeWindows, worker.activeWindows[1:])
|
||||
@ -448,24 +448,24 @@ assignLoop:
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *schedWorker) startProcessingTask(req *workerRequest) error {
|
||||
func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error {
|
||||
w, sh := sw.worker, sw.sched
|
||||
|
||||
needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType)
|
||||
needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType)
|
||||
|
||||
w.lk.Lock()
|
||||
w.preparing.add(w.info.Resources, needRes)
|
||||
w.preparing.Add(req.SealTask(), w.Info.Resources, needRes)
|
||||
w.lk.Unlock()
|
||||
|
||||
go func() {
|
||||
// first run the prepare step (e.g. fetching sector data from other worker)
|
||||
tw := sh.workTracker.worker(sw.wid, w.info, w.workerRpc)
|
||||
tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
|
||||
tw.start()
|
||||
err := req.prepare(req.ctx, tw)
|
||||
err := req.prepare(req.Ctx, tw)
|
||||
w.lk.Lock()
|
||||
|
||||
if err != nil {
|
||||
w.preparing.free(w.info.Resources, needRes)
|
||||
w.preparing.Free(req.SealTask(), w.Info.Resources, needRes)
|
||||
w.lk.Unlock()
|
||||
|
||||
select {
|
||||
@ -477,7 +477,7 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error {
|
||||
|
||||
select {
|
||||
case req.ret <- workerResponse{err: err}:
|
||||
case <-req.ctx.Done():
|
||||
case <-req.Ctx.Done():
|
||||
log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err)
|
||||
case <-sh.closing:
|
||||
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
||||
@ -485,17 +485,17 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error {
|
||||
return
|
||||
}
|
||||
|
||||
tw = sh.workTracker.worker(sw.wid, w.info, w.workerRpc)
|
||||
tw = sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
|
||||
|
||||
// start tracking work first early in case we need to wait for resources
|
||||
werr := make(chan error, 1)
|
||||
go func() {
|
||||
werr <- req.work(req.ctx, tw)
|
||||
werr <- req.work(req.Ctx, tw)
|
||||
}()
|
||||
|
||||
// wait (if needed) for resources in the 'active' window
|
||||
err = w.active.withResources(sw.wid, w.info, needRes, &w.lk, func() error {
|
||||
w.preparing.free(w.info.Resources, needRes)
|
||||
err = w.active.withResources(sw.wid, w.Info, req.SealTask(), needRes, &w.lk, func() error {
|
||||
w.preparing.Free(req.SealTask(), w.Info.Resources, needRes)
|
||||
w.lk.Unlock()
|
||||
defer w.lk.Lock() // we MUST return locked from this function
|
||||
|
||||
@ -511,7 +511,7 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error {
|
||||
|
||||
select {
|
||||
case req.ret <- workerResponse{err: err}:
|
||||
case <-req.ctx.Done():
|
||||
case <-req.Ctx.Done():
|
||||
log.Warnf("request got cancelled before we could respond")
|
||||
case <-sh.closing:
|
||||
log.Warnf("scheduler closed while sending response")
|
||||
@ -531,22 +531,22 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error {
|
||||
func (sw *schedWorker) startProcessingReadyTask(req *WorkerRequest) error {
|
||||
w, sh := sw.worker, sw.sched
|
||||
|
||||
needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType)
|
||||
needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType)
|
||||
|
||||
w.active.add(w.info.Resources, needRes)
|
||||
w.active.Add(req.SealTask(), w.Info.Resources, needRes)
|
||||
|
||||
go func() {
|
||||
// Do the work!
|
||||
tw := sh.workTracker.worker(sw.wid, w.info, w.workerRpc)
|
||||
tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
|
||||
tw.start()
|
||||
err := req.work(req.ctx, tw)
|
||||
err := req.work(req.Ctx, tw)
|
||||
|
||||
select {
|
||||
case req.ret <- workerResponse{err: err}:
|
||||
case <-req.ctx.Done():
|
||||
case <-req.Ctx.Done():
|
||||
log.Warnf("request got cancelled before we could respond")
|
||||
case <-sh.closing:
|
||||
log.Warnf("scheduler closed while sending response")
|
||||
@ -554,7 +554,7 @@ func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error {
|
||||
|
||||
w.lk.Lock()
|
||||
|
||||
w.active.free(w.info.Resources, needRes)
|
||||
w.active.Free(req.SealTask(), w.Info.Resources, needRes)
|
||||
|
||||
select {
|
||||
case sw.taskDone <- struct{}{}:
|
||||
@ -574,7 +574,7 @@ func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sh *scheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) {
|
||||
func (sh *Scheduler) workerCleanup(wid storiface.WorkerID, w *WorkerHandle) {
|
||||
select {
|
||||
case <-w.closingMgr:
|
||||
default:
|
||||
@ -592,13 +592,13 @@ func (sh *scheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) {
|
||||
if !w.cleanupStarted {
|
||||
w.cleanupStarted = true
|
||||
|
||||
newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows))
|
||||
for _, window := range sh.openWindows {
|
||||
if window.worker != wid {
|
||||
newWindows := make([]*SchedWindowRequest, 0, len(sh.OpenWindows))
|
||||
for _, window := range sh.OpenWindows {
|
||||
if window.Worker != wid {
|
||||
newWindows = append(newWindows, window)
|
||||
}
|
||||
}
|
||||
sh.openWindows = newWindows
|
||||
sh.OpenWindows = newWindows
|
||||
|
||||
log.Debugf("worker %s dropped", wid)
|
||||
}
|
||||
|
44
extern/sector-storage/sealtasks/task.go
vendored
44
extern/sector-storage/sealtasks/task.go
vendored
@ -1,5 +1,15 @@
|
||||
package sealtasks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
type TaskType string
|
||||
|
||||
const (
|
||||
@ -104,3 +114,37 @@ func (a TaskType) Short() string {
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
type SealTaskType struct {
|
||||
TaskType
|
||||
abi.RegisteredSealProof
|
||||
}
|
||||
|
||||
func (a TaskType) SealTask(spt abi.RegisteredSealProof) SealTaskType {
|
||||
return SealTaskType{
|
||||
TaskType: a,
|
||||
RegisteredSealProof: spt,
|
||||
}
|
||||
}
|
||||
|
||||
func SttFromString(s string) (SealTaskType, error) {
|
||||
var res SealTaskType
|
||||
|
||||
sub := strings.SplitN(s, ":", 2)
|
||||
if len(sub) != 2 {
|
||||
return res, xerrors.Errorf("seal task type string invalid")
|
||||
}
|
||||
|
||||
res.TaskType = TaskType(sub[1])
|
||||
spt, err := strconv.ParseInt(sub[0], 10, 64)
|
||||
if err != nil {
|
||||
return SealTaskType{}, err
|
||||
}
|
||||
res.RegisteredSealProof = abi.RegisteredSealProof(spt)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (a SealTaskType) String() string {
|
||||
return fmt.Sprintf("%d:%s", a.RegisteredSealProof, a.TaskType)
|
||||
}
|
||||
|
20
extern/sector-storage/selector_alloc.go
vendored
20
extern/sector-storage/selector_alloc.go
vendored
@ -26,18 +26,18 @@ func newAllocSelector(index stores.SectorIndex, alloc storiface.SectorFileType,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||
func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *WorkerHandle) (bool, bool, error) {
|
||||
tasks, err := whnd.TaskTypes(ctx)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
return false, false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
}
|
||||
if _, supported := tasks[task]; !supported {
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
paths, err := whnd.workerRpc.Paths(ctx)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting worker paths: %w", err)
|
||||
return false, false, xerrors.Errorf("getting worker paths: %w", err)
|
||||
}
|
||||
|
||||
have := map[storiface.ID]struct{}{}
|
||||
@ -47,25 +47,25 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi
|
||||
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting sector size: %w", err)
|
||||
return false, false, xerrors.Errorf("getting sector size: %w", err)
|
||||
}
|
||||
|
||||
best, err := s.index.StorageBestAlloc(ctx, s.alloc, ssize, s.ptype)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("finding best alloc storage: %w", err)
|
||||
return false, false, xerrors.Errorf("finding best alloc storage: %w", err)
|
||||
}
|
||||
|
||||
for _, info := range best {
|
||||
if _, ok := have[info.ID]; ok {
|
||||
return true, nil
|
||||
return true, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
func (s *allocSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
||||
return a.utilization() < b.utilization(), nil
|
||||
func (s *allocSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *WorkerHandle) (bool, error) {
|
||||
return a.Utilization() < b.Utilization(), nil
|
||||
}
|
||||
|
||||
var _ WorkerSelector = &allocSelector{}
|
||||
|
20
extern/sector-storage/selector_existing.go
vendored
20
extern/sector-storage/selector_existing.go
vendored
@ -28,18 +28,18 @@ func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc st
|
||||
}
|
||||
}
|
||||
|
||||
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *WorkerHandle) (bool, bool, error) {
|
||||
tasks, err := whnd.TaskTypes(ctx)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
return false, false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
}
|
||||
if _, supported := tasks[task]; !supported {
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
paths, err := whnd.workerRpc.Paths(ctx)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting worker paths: %w", err)
|
||||
return false, false, xerrors.Errorf("getting worker paths: %w", err)
|
||||
}
|
||||
|
||||
have := map[storiface.ID]struct{}{}
|
||||
@ -49,25 +49,25 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt
|
||||
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting sector size: %w", err)
|
||||
return false, false, xerrors.Errorf("getting sector size: %w", err)
|
||||
}
|
||||
|
||||
best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, ssize, s.allowFetch)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("finding best storage: %w", err)
|
||||
return false, false, xerrors.Errorf("finding best storage: %w", err)
|
||||
}
|
||||
|
||||
for _, info := range best {
|
||||
if _, ok := have[info.ID]; ok {
|
||||
return true, nil
|
||||
return true, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
func (s *existingSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
||||
return a.utilization() < b.utilization(), nil
|
||||
func (s *existingSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *WorkerHandle) (bool, error) {
|
||||
return a.Utilization() < b.Utilization(), nil
|
||||
}
|
||||
|
||||
var _ WorkerSelector = &existingSelector{}
|
||||
|
98
extern/sector-storage/selector_move.go
vendored
Normal file
98
extern/sector-storage/selector_move.go
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
type moveSelector struct {
|
||||
index stores.SectorIndex
|
||||
sector abi.SectorID
|
||||
alloc storiface.SectorFileType
|
||||
destPtype storiface.PathType
|
||||
allowRemote bool
|
||||
}
|
||||
|
||||
func newMoveSelector(index stores.SectorIndex, sector abi.SectorID, alloc storiface.SectorFileType, destPtype storiface.PathType, allowRemote bool) *moveSelector {
|
||||
return &moveSelector{
|
||||
index: index,
|
||||
sector: sector,
|
||||
alloc: alloc,
|
||||
destPtype: destPtype,
|
||||
allowRemote: allowRemote,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *moveSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *WorkerHandle) (bool, bool, error) {
|
||||
tasks, err := whnd.TaskTypes(ctx)
|
||||
if err != nil {
|
||||
return false, false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
}
|
||||
if _, supported := tasks[task]; !supported {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
paths, err := whnd.workerRpc.Paths(ctx)
|
||||
if err != nil {
|
||||
return false, false, xerrors.Errorf("getting worker paths: %w", err)
|
||||
}
|
||||
|
||||
workerPaths := map[storiface.ID]int{}
|
||||
for _, path := range paths {
|
||||
workerPaths[path.ID] = 0
|
||||
}
|
||||
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return false, false, xerrors.Errorf("getting sector size: %w", err)
|
||||
}
|
||||
|
||||
// note: allowFetch is always false here, because we want to find workers with
|
||||
// the sector available locally
|
||||
preferred, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, ssize, false)
|
||||
if err != nil {
|
||||
return false, false, xerrors.Errorf("finding preferred storage: %w", err)
|
||||
}
|
||||
|
||||
for _, info := range preferred {
|
||||
if _, ok := workerPaths[info.ID]; ok {
|
||||
workerPaths[info.ID]++
|
||||
}
|
||||
}
|
||||
|
||||
best, err := s.index.StorageBestAlloc(ctx, s.alloc, ssize, s.destPtype)
|
||||
if err != nil {
|
||||
return false, false, xerrors.Errorf("finding best dest storage: %w", err)
|
||||
}
|
||||
|
||||
var ok bool
|
||||
|
||||
for _, info := range best {
|
||||
if n, has := workerPaths[info.ID]; has {
|
||||
ok = true
|
||||
|
||||
// if the worker has a local path with the sector already in it
|
||||
// prefer that worker; This usually meant that the move operation is
|
||||
// either a no-op because the sector is already in the correct path,
|
||||
// or the move a local move.
|
||||
if n > 0 {
|
||||
return true, true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ok && s.allowRemote, false, nil
|
||||
}
|
||||
|
||||
func (s *moveSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *WorkerHandle) (bool, error) {
|
||||
return a.Utilization() < b.Utilization(), nil
|
||||
}
|
||||
|
||||
var _ WorkerSelector = &moveSelector{}
|
10
extern/sector-storage/selector_task.go
vendored
10
extern/sector-storage/selector_task.go
vendored
@ -19,17 +19,17 @@ func newTaskSelector() *taskSelector {
|
||||
return &taskSelector{}
|
||||
}
|
||||
|
||||
func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||
func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *WorkerHandle) (bool, bool, error) {
|
||||
tasks, err := whnd.TaskTypes(ctx)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
return false, false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
}
|
||||
_, supported := tasks[task]
|
||||
|
||||
return supported, nil
|
||||
return supported, false, nil
|
||||
}
|
||||
|
||||
func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
||||
func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *WorkerHandle) (bool, error) {
|
||||
atasks, err := a.TaskTypes(ctx)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||
@ -43,7 +43,7 @@ func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *work
|
||||
return len(atasks) < len(btasks), nil // prefer workers which can do less
|
||||
}
|
||||
|
||||
return a.utilization() < b.utilization(), nil
|
||||
return a.Utilization() < b.Utilization(), nil
|
||||
}
|
||||
|
||||
var _ WorkerSelector = &taskSelector{}
|
||||
|
23
extern/sector-storage/stats.go
vendored
23
extern/sector-storage/stats.go
vendored
@ -15,7 +15,7 @@ func (m *Manager) WorkerStats(ctx context.Context) map[uuid.UUID]storiface.Worke
|
||||
|
||||
out := map[uuid.UUID]storiface.WorkerStats{}
|
||||
|
||||
cb := func(ctx context.Context, id storiface.WorkerID, handle *workerHandle) {
|
||||
cb := func(ctx context.Context, id storiface.WorkerID, handle *WorkerHandle) {
|
||||
handle.lk.Lock()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||
@ -32,18 +32,25 @@ func (m *Manager) WorkerStats(ctx context.Context) map[uuid.UUID]storiface.Worke
|
||||
}
|
||||
|
||||
out[uuid.UUID(id)] = storiface.WorkerStats{
|
||||
Info: handle.info,
|
||||
Info: handle.Info,
|
||||
Tasks: taskList,
|
||||
Enabled: handle.enabled,
|
||||
Enabled: handle.Enabled,
|
||||
MemUsedMin: handle.active.memUsedMin,
|
||||
MemUsedMax: handle.active.memUsedMax,
|
||||
GpuUsed: handle.active.gpuUsed,
|
||||
CpuUse: handle.active.cpuUse,
|
||||
|
||||
TaskCounts: map[string]int{},
|
||||
}
|
||||
|
||||
for tt, count := range handle.active.taskCounters {
|
||||
out[uuid.UUID(id)].TaskCounts[tt.String()] = count
|
||||
}
|
||||
|
||||
handle.lk.Unlock()
|
||||
}
|
||||
|
||||
for id, handle := range m.sched.workers {
|
||||
for id, handle := range m.sched.Workers {
|
||||
cb(ctx, id, handle)
|
||||
}
|
||||
|
||||
@ -72,14 +79,14 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob {
|
||||
|
||||
m.sched.workersLk.RLock()
|
||||
|
||||
for id, handle := range m.sched.workers {
|
||||
for id, handle := range m.sched.Workers {
|
||||
handle.wndLk.Lock()
|
||||
for wi, window := range handle.activeWindows {
|
||||
for _, request := range window.todo {
|
||||
for _, request := range window.Todo {
|
||||
out[uuid.UUID(id)] = append(out[uuid.UUID(id)], storiface.WorkerJob{
|
||||
ID: storiface.UndefCall,
|
||||
Sector: request.sector.ID,
|
||||
Task: request.taskType,
|
||||
Sector: request.Sector.ID,
|
||||
Task: request.TaskType,
|
||||
RunWait: wi + 2,
|
||||
Start: request.start,
|
||||
})
|
||||
|
4
extern/sector-storage/stores/local.go
vendored
4
extern/sector-storage/stores/local.go
vendored
@ -157,7 +157,9 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) {
|
||||
}
|
||||
}
|
||||
|
||||
log.Infow("storage stat", "took", time.Now().Sub(start), "reservations", len(p.reservations))
|
||||
if time.Now().Sub(start) > 5*time.Second {
|
||||
log.Warnw("slow storage stat", "took", time.Now().Sub(start), "reservations", len(p.reservations))
|
||||
}
|
||||
|
||||
return stat, err
|
||||
}
|
||||
|
38
extern/sector-storage/storiface/cbor_gen.go
vendored
38
extern/sector-storage/storiface/cbor_gen.go
vendored
@ -23,25 +23,26 @@ func (t *CallID) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Sector (abi.SectorID) (struct)
|
||||
if len("Sector") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Sector\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sector"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sector"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Sector")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Sector.MarshalCBOR(w); err != nil {
|
||||
if err := t.Sector.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -50,7 +51,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Value in field \"ID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("ID")); err != nil {
|
||||
@ -61,26 +62,31 @@ func (t *CallID) MarshalCBOR(w io.Writer) error {
|
||||
return xerrors.Errorf("Byte array in field t.ID was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ID))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.ID))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(t.ID[:]); err != nil {
|
||||
if _, err := cw.Write(t.ID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *CallID) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *CallID) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = CallID{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
@ -95,7 +101,7 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) error {
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -109,7 +115,7 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.Sector.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.Sector.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Sector: %w", err)
|
||||
}
|
||||
|
||||
@ -117,7 +123,7 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) error {
|
||||
// t.ID (uuid.UUID) (array)
|
||||
case "ID":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -135,7 +141,7 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
t.ID = [16]uint8{}
|
||||
|
||||
if _, err := io.ReadFull(br, t.ID[:]); err != nil {
|
||||
if _, err := io.ReadFull(cr, t.ID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
2
extern/sector-storage/storiface/resources.go
vendored
2
extern/sector-storage/storiface/resources.go
vendored
@ -26,6 +26,8 @@ type Resources struct {
|
||||
MaxParallelismGPU int `envname:"MAX_PARALLELISM_GPU"` // when 0, inherits MaxParallelism
|
||||
|
||||
BaseMinMemory uint64 `envname:"BASE_MIN_MEMORY"` // What Must be in RAM for decent perf (shared between threads)
|
||||
|
||||
MaxConcurrent int `envname:"MAX_CONCURRENT"` // Maximum number of tasks of this type that can be scheduled on a worker (0=default, no limit)
|
||||
}
|
||||
|
||||
/*
|
||||
|
2
extern/sector-storage/storiface/worker.go
vendored
2
extern/sector-storage/storiface/worker.go
vendored
@ -75,6 +75,8 @@ type WorkerStats struct {
|
||||
MemUsedMax uint64
|
||||
GpuUsed float64 // nolint
|
||||
CpuUse uint64 // nolint
|
||||
|
||||
TaskCounts map[string]int
|
||||
}
|
||||
|
||||
const (
|
||||
|
2
extern/sector-storage/worker_local.go
vendored
2
extern/sector-storage/worker_local.go
vendored
@ -289,7 +289,7 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r
|
||||
log.Errorf("get hostname err: %+v", err)
|
||||
}
|
||||
|
||||
err = xerrors.Errorf("%w [Hostname: %s]", err.Error(), hostname)
|
||||
err = xerrors.Errorf("%w [Hostname: %s]", err, hostname)
|
||||
}
|
||||
|
||||
if doReturn(ctx, rt, ci, l.ret, res, toCallError(err)) {
|
||||
|
400
extern/storage-sealing/cbor_gen.go
vendored
400
extern/storage-sealing/cbor_gen.go
vendored
File diff suppressed because it is too large
Load Diff
3
extern/storage-sealing/input.go
vendored
3
extern/storage-sealing/input.go
vendored
@ -22,6 +22,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
)
|
||||
|
||||
@ -220,7 +221,7 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er
|
||||
m.minerSector(sector.SectorType, sector.SectorNumber),
|
||||
pieceSizes,
|
||||
p.Unpadded(),
|
||||
NewNullReader(p.Unpadded()))
|
||||
nullreader.NewNullReader(p.Unpadded()))
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("writing padding piece: %w", err)
|
||||
deal.accepted(sector.SectorNumber, offset, err)
|
||||
|
@ -1,5 +1,23 @@
|
||||
package nullreader
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
type NullReader struct {
|
||||
*io.LimitedReader
|
||||
}
|
||||
|
||||
func NewNullReader(size abi.UnpaddedPieceSize) io.Reader {
|
||||
return &NullReader{(io.LimitReader(&Reader{}, int64(size))).(*io.LimitedReader)}
|
||||
}
|
||||
|
||||
func (m NullReader) NullBytes() int64 {
|
||||
return m.N
|
||||
}
|
||||
|
||||
// TODO: extract this to someplace where it can be shared with lotus
|
||||
type Reader struct{}
|
||||
|
||||
|
20
extern/storage-sealing/nullreader.go
vendored
20
extern/storage-sealing/nullreader.go
vendored
@ -1,20 +0,0 @@
|
||||
package sealing
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
nr "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
|
||||
)
|
||||
|
||||
type NullReader struct {
|
||||
*io.LimitedReader
|
||||
}
|
||||
|
||||
func NewNullReader(size abi.UnpaddedPieceSize) io.Reader {
|
||||
return &NullReader{(io.LimitReader(&nr.Reader{}, int64(size))).(*io.LimitedReader)}
|
||||
}
|
||||
|
||||
func (m NullReader) NullBytes() int64 {
|
||||
return m.N
|
||||
}
|
3
extern/storage-sealing/states_sealing.go
vendored
3
extern/storage-sealing/states_sealing.go
vendored
@ -21,6 +21,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
|
||||
)
|
||||
|
||||
var DealSectorPriority = 1024
|
||||
@ -91,7 +92,7 @@ func (m *Sealing) padSector(ctx context.Context, sectorID storage.SectorRef, exi
|
||||
for i, size := range sizes {
|
||||
expectCid := zerocomm.ZeroPieceCommitment(size)
|
||||
|
||||
ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, NewNullReader(size))
|
||||
ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, nullreader.NewNullReader(size))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("add piece: %w", err)
|
||||
}
|
||||
|
@ -9,29 +9,32 @@ import (
|
||||
|
||||
"contrib.go.opencensus.io/exporter/prometheus"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/metrics/proxy"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/gorilla/mux"
|
||||
promclient "github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// Handler returns a gateway http.Handler, to be mounted as-is on the server.
|
||||
func Handler(a api.Gateway, rateLimit int64, connPerMinute int64, opts ...jsonrpc.ServerOption) (http.Handler, error) {
|
||||
func Handler(gwapi lapi.Gateway, api lapi.FullNode, rateLimit int64, connPerMinute int64, opts ...jsonrpc.ServerOption) (http.Handler, error) {
|
||||
m := mux.NewRouter()
|
||||
|
||||
serveRpc := func(path string, hnd interface{}) {
|
||||
rpcServer := jsonrpc.NewServer(opts...)
|
||||
rpcServer.Register("Filecoin", hnd)
|
||||
rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover")
|
||||
|
||||
m.Handle(path, rpcServer)
|
||||
}
|
||||
|
||||
ma := proxy.MetricedGatewayAPI(a)
|
||||
ma := proxy.MetricedGatewayAPI(gwapi)
|
||||
|
||||
serveRpc("/rpc/v1", ma)
|
||||
serveRpc("/rpc/v0", api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma))
|
||||
serveRpc("/rpc/v0", lapi.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma))
|
||||
|
||||
registry := promclient.DefaultRegisterer.(*promclient.Registry)
|
||||
exporter, err := prometheus.NewExporter(prometheus.Options{
|
||||
@ -42,6 +45,8 @@ func Handler(a api.Gateway, rateLimit int64, connPerMinute int64, opts ...jsonrp
|
||||
return nil, err
|
||||
}
|
||||
m.Handle("/debug/metrics", exporter)
|
||||
m.Handle("/health/livez", node.NewLiveHandler(api))
|
||||
m.Handle("/health/readyz", node.NewReadyHandler(api))
|
||||
m.PathPrefix("/").Handler(http.DefaultServeMux)
|
||||
|
||||
/*ah := &auth.Handler{
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -180,6 +181,10 @@ func (gw *Node) limit(ctx context.Context, tokens int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gw *Node) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
|
||||
return build.OpenRPCDiscoverJSON_Gateway(), nil
|
||||
}
|
||||
|
||||
func (gw *Node) Version(ctx context.Context) (api.APIVersion, error) {
|
||||
if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
|
||||
return api.APIVersion{}, err
|
||||
|
10
go.mod
10
go.mod
@ -41,7 +41,7 @@ require (
|
||||
github.com/filecoin-project/go-legs v0.3.7
|
||||
github.com/filecoin-project/go-padreader v0.0.1
|
||||
github.com/filecoin-project/go-paramfetch v0.0.4
|
||||
github.com/filecoin-project/go-state-types v0.1.3
|
||||
github.com/filecoin-project/go-state-types v0.1.4
|
||||
github.com/filecoin-project/go-statemachine v1.0.2
|
||||
github.com/filecoin-project/go-statestore v0.2.0
|
||||
github.com/filecoin-project/go-storedcounter v0.1.0
|
||||
@ -109,7 +109,7 @@ require (
|
||||
github.com/koalacxr/quantile v0.0.1
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/libp2p/go-eventbus v0.2.1
|
||||
github.com/libp2p/go-libp2p v0.19.0
|
||||
github.com/libp2p/go-libp2p v0.19.3
|
||||
github.com/libp2p/go-libp2p-connmgr v0.3.1
|
||||
github.com/libp2p/go-libp2p-core v0.15.1
|
||||
github.com/libp2p/go-libp2p-discovery v0.6.0
|
||||
@ -117,7 +117,7 @@ require (
|
||||
github.com/libp2p/go-libp2p-mplex v0.6.0 // indirect
|
||||
github.com/libp2p/go-libp2p-noise v0.4.0
|
||||
github.com/libp2p/go-libp2p-peerstore v0.6.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.6.1
|
||||
github.com/libp2p/go-libp2p-pubsub v0.7.0
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.17.0
|
||||
github.com/libp2p/go-libp2p-record v0.1.3
|
||||
github.com/libp2p/go-libp2p-resource-manager v0.2.1
|
||||
@ -145,7 +145,7 @@ require (
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799
|
||||
github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
|
||||
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
|
||||
@ -277,7 +277,7 @@ require (
|
||||
github.com/libp2p/go-tcp-transport v0.5.1 // indirect
|
||||
github.com/libp2p/go-ws-transport v0.6.0 // indirect
|
||||
github.com/libp2p/go-yamux/v3 v3.1.1 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.27.0 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.27.1 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
|
||||
github.com/magefile/mage v1.9.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
|
16
go.sum
16
go.sum
@ -368,8 +368,9 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go
|
||||
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.1.3 h1:rzIJyQo5HO2ptc8Jcu8P0qTutnI7NWwTle54eAHoNO0=
|
||||
github.com/filecoin-project/go-state-types v0.1.3/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.1.4 h1:NU0veVNxtDiLD/eRyKHV9lv3njSzfTh/sJGxxvcYcpg=
|
||||
github.com/filecoin-project/go-state-types v0.1.4/go.mod h1:xCA/WfKlC2zcn3fUmDv4IrzznwS98X5XW/irUP3Lhxg=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54=
|
||||
github.com/filecoin-project/go-statemachine v1.0.2-0.20220322104818-27f8fbb86dfd/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54=
|
||||
@ -1086,8 +1087,8 @@ github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76f
|
||||
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
|
||||
github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8=
|
||||
github.com/libp2p/go-libp2p v0.18.0-rc3/go.mod h1:WYL+Xw1iuwi6rdfzw5VIEpD+HqzYucHZ6fcUuumbI3M=
|
||||
github.com/libp2p/go-libp2p v0.19.0 h1:zosskMbaobL7UDCVLEe1m5CGs1TaFNFoN/M5XLiKg0U=
|
||||
github.com/libp2p/go-libp2p v0.19.0/go.mod h1:Ki9jJXLO2YqrTIFxofV7Twyd3INWPT97+r8hGt7XPjI=
|
||||
github.com/libp2p/go-libp2p v0.19.3 h1:LqjvuBWdyYSqvkH4VVYxA78Fkphzg2Pq86VMnilqgkw=
|
||||
github.com/libp2p/go-libp2p v0.19.3/go.mod h1:AGlPVLjh0+6jvEtf+a2gZEux7yHJrYXnG9IC7wcQ2NY=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I=
|
||||
@ -1241,8 +1242,9 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1
|
||||
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.6.1 h1:wycbV+f4rreCoVY61Do6g/BUk0RIrbNRcYVbn+QkjGk=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.6.1/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.7.0 h1:Fd9198JVc3pCsKuzd37TclzM0QcHA+uDyoiG2pvT7s4=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.7.0/go.mod h1:EuyBJFtF8qF67IEA98biwK8Xnw5MNJpJ/Z+8iWCMFwc=
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.5.0/go.mod h1:TRJKskSem3C0aSb3CmRgPwq6IleVFzds6hS09fmZbGM=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA=
|
||||
@ -1457,8 +1459,9 @@ github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0
|
||||
github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0=
|
||||
github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0=
|
||||
github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg=
|
||||
github.com/lucas-clemente/quic-go v0.27.0 h1:v6WY87q9zD4dKASbG8hy/LpzAVNzEQzw8sEIeloJsc4=
|
||||
github.com/lucas-clemente/quic-go v0.27.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI=
|
||||
github.com/lucas-clemente/quic-go v0.27.1 h1:sOw+4kFSVrdWOYmUjufQ9GBVPqZ+tu+jMtXxXNmRJyk=
|
||||
github.com/lucas-clemente/quic-go v0.27.1/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI=
|
||||
github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=
|
||||
github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
|
||||
@ -1981,8 +1984,9 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:f
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220224212727-7a699437a831/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14 h1:vo2wkP2ceHyGyZwFFtAabpot03EeSxxwAe57pOI9E/4=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799 h1:DOOT2B85S0tHoLGTzV+FakaSSihgRCVwZkjqKQP5L/w=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
|
||||
github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g=
|
||||
|
@ -291,7 +291,7 @@ func startNodes(
|
||||
|
||||
// Create a gateway server in front of the full node
|
||||
gwapi := gateway.NewNode(full, lookbackCap, stateWaitLookbackLimit, 0, time.Minute)
|
||||
handler, err := gateway.Handler(gwapi, 0, 0)
|
||||
handler, err := gateway.Handler(gwapi, full, 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
|
@ -570,6 +570,8 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
}
|
||||
|
||||
noLocal := m.options.minerNoLocalSealing
|
||||
assigner := m.options.minerAssigner
|
||||
disallowRemoteFinalize := m.options.disallowRemoteFinalize
|
||||
|
||||
var mineBlock = make(chan lotusminer.MineReq)
|
||||
opts := []node.Option{
|
||||
@ -595,6 +597,8 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
scfg.Storage.AllowCommit = false
|
||||
}
|
||||
|
||||
scfg.Storage.Assigner = assigner
|
||||
scfg.Storage.DisallowRemoteFinalize = disallowRemoteFinalize
|
||||
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
|
||||
return scfg.StorageManager()
|
||||
}),
|
||||
|
@ -34,13 +34,15 @@ type nodeOpts struct {
|
||||
ownerKey *wallet.Key
|
||||
extraNodeOpts []node.Option
|
||||
|
||||
subsystems MinerSubsystem
|
||||
mainMiner *TestMiner
|
||||
disableLibp2p bool
|
||||
optBuilders []OptBuilder
|
||||
sectorSize abi.SectorSize
|
||||
maxStagingDealsBytes int64
|
||||
minerNoLocalSealing bool // use worker
|
||||
subsystems MinerSubsystem
|
||||
mainMiner *TestMiner
|
||||
disableLibp2p bool
|
||||
optBuilders []OptBuilder
|
||||
sectorSize abi.SectorSize
|
||||
maxStagingDealsBytes int64
|
||||
minerNoLocalSealing bool // use worker
|
||||
minerAssigner string
|
||||
disallowRemoteFinalize bool
|
||||
|
||||
workerTasks []sealtasks.TaskType
|
||||
workerStorageOpt func(stores.Store) stores.Store
|
||||
@ -97,6 +99,20 @@ func WithNoLocalSealing(nope bool) NodeOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithAssigner(a string) NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.minerAssigner = a
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithDisallowRemoteFinalize(d bool) NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.disallowRemoteFinalize = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DisableLibp2p() NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.disableLibp2p = true
|
||||
|
@ -41,6 +41,38 @@ func TestWorkerPledge(t *testing.T) {
|
||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||
}
|
||||
|
||||
func TestWorkerPledgeSpread(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
||||
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal}),
|
||||
kit.WithAssigner("spread"),
|
||||
) // no mock proofs
|
||||
|
||||
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||
|
||||
e, err := worker.Enabled(ctx)
|
||||
require.NoError(t, err)
|
||||
require.True(t, e)
|
||||
|
||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||
}
|
||||
|
||||
func TestWorkerPledgeLocalFin(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
||||
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal}),
|
||||
kit.WithDisallowRemoteFinalize(true),
|
||||
) // no mock proofs
|
||||
|
||||
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||
|
||||
e, err := worker.Enabled(ctx)
|
||||
require.NoError(t, err)
|
||||
require.True(t, e)
|
||||
|
||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||
}
|
||||
|
||||
func TestWorkerDataCid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, miner, worker, _ := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
||||
@ -49,17 +81,23 @@ func TestWorkerDataCid(t *testing.T) {
|
||||
e, err := worker.Enabled(ctx)
|
||||
require.NoError(t, err)
|
||||
require.True(t, e)
|
||||
/*
|
||||
pi, err := miner.ComputeDataCid(ctx, 1016, strings.NewReader(strings.Repeat("a", 1016)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
||||
require.Equal(t, "baga6ea4seaqlhznlutptgfwhffupyer6txswamerq5fc2jlwf2lys2mm5jtiaeq", pi.PieceCID.String())
|
||||
*/
|
||||
|
||||
pi, err := miner.ComputeDataCid(ctx, 1016, strings.NewReader(strings.Repeat("a", 1016)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
||||
require.Equal(t, "baga6ea4seaqlhznlutptgfwhffupyer6txswamerq5fc2jlwf2lys2mm5jtiaeq", pi.PieceCID.String())
|
||||
|
||||
bigPiece := abi.PaddedPieceSize(16 << 20).Unpadded()
|
||||
pi, err := miner.ComputeDataCid(ctx, bigPiece, strings.NewReader(strings.Repeat("a", int(bigPiece))))
|
||||
pi, err = miner.ComputeDataCid(ctx, bigPiece, strings.NewReader(strings.Repeat("a", int(bigPiece))))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bigPiece.Padded(), pi.Size)
|
||||
require.Equal(t, "baga6ea4seaqmhoxl2ybw5m2wyd3pt3h4zmp7j52yumzu2rar26twns3uocq7yfa", pi.PieceCID.String())
|
||||
|
||||
nonFullPiece := abi.PaddedPieceSize(10 << 20).Unpadded()
|
||||
pi, err = miner.ComputeDataCid(ctx, bigPiece, strings.NewReader(strings.Repeat("a", int(nonFullPiece))))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bigPiece.Padded(), pi.Size)
|
||||
require.Equal(t, "baga6ea4seaqbxib4pdxs5cqdn3fmtj4rcxk6rx6ztiqmrx7fcpo3ymuxbp2rodi", pi.PieceCID.String())
|
||||
}
|
||||
|
||||
func TestWinningPostWorker(t *testing.T) {
|
||||
|
47
lib/httpreader/httpreader.go
Normal file
47
lib/httpreader/httpreader.go
Normal file
@ -0,0 +1,47 @@
|
||||
package httpreader
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// HttpReader is a reader which will read a http resource with a simple get request.
|
||||
// Before first Read it will be passed over JsonRPC as a URL.
|
||||
type HttpReader struct {
|
||||
URL string
|
||||
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
func (h *HttpReader) Close() error {
|
||||
h.URL = ""
|
||||
if h.reader != nil {
|
||||
return h.reader.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *HttpReader) Read(p []byte) (n int, err error) {
|
||||
if h.reader == nil {
|
||||
res, err := http.Get(h.URL)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return 0, xerrors.Errorf("unexpected http status %d", res.StatusCode)
|
||||
}
|
||||
|
||||
// mark the reader as reading
|
||||
h.URL = ""
|
||||
h.reader = res.Body
|
||||
}
|
||||
if h.reader == nil {
|
||||
return 0, xerrors.Errorf("http reader closed")
|
||||
}
|
||||
|
||||
return h.reader.Read(p)
|
||||
}
|
||||
|
||||
var _ io.ReadCloser = &HttpReader{}
|
@ -21,7 +21,9 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
|
||||
"github.com/filecoin-project/lotus/lib/httpreader"
|
||||
)
|
||||
|
||||
var log = logging.Logger("rpcenc")
|
||||
@ -33,6 +35,7 @@ type StreamType string
|
||||
const (
|
||||
Null StreamType = "null"
|
||||
PushStream StreamType = "push"
|
||||
HTTP StreamType = "http"
|
||||
// TODO: Data transfer handoff to workers?
|
||||
)
|
||||
|
||||
@ -101,9 +104,12 @@ func ReaderParamEncoder(addr string) jsonrpc.Option {
|
||||
return jsonrpc.WithParamEncoder(new(io.Reader), func(value reflect.Value) (reflect.Value, error) {
|
||||
r := value.Interface().(io.Reader)
|
||||
|
||||
if r, ok := r.(*sealing.NullReader); ok {
|
||||
if r, ok := r.(*nullreader.NullReader); ok {
|
||||
return reflect.ValueOf(ReaderStream{Type: Null, Info: fmt.Sprint(r.N)}), nil
|
||||
}
|
||||
if r, ok := r.(*httpreader.HttpReader); ok && r.URL != "" {
|
||||
return reflect.ValueOf(ReaderStream{Type: HTTP, Info: r.URL}), nil
|
||||
}
|
||||
|
||||
reqID := uuid.New()
|
||||
u, err := url.Parse(addr)
|
||||
@ -412,13 +418,16 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
|
||||
return reflect.Value{}, xerrors.Errorf("unmarshaling reader id: %w", err)
|
||||
}
|
||||
|
||||
if rs.Type == Null {
|
||||
switch rs.Type {
|
||||
case Null:
|
||||
n, err := strconv.ParseInt(rs.Info, 10, 64)
|
||||
if err != nil {
|
||||
return reflect.Value{}, xerrors.Errorf("parsing null byte count: %w", err)
|
||||
}
|
||||
|
||||
return reflect.ValueOf(sealing.NewNullReader(abi.UnpaddedPieceSize(n))), nil
|
||||
return reflect.ValueOf(nullreader.NewNullReader(abi.UnpaddedPieceSize(n))), nil
|
||||
case HTTP:
|
||||
return reflect.ValueOf(&httpreader.HttpReader{URL: rs.Info}), nil
|
||||
}
|
||||
|
||||
u, err := uuid.Parse(rs.Info)
|
||||
|
@ -14,7 +14,8 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
|
||||
)
|
||||
|
||||
type ReaderHandler struct {
|
||||
@ -57,7 +58,7 @@ func (h *ReaderHandler) ReadAll(ctx context.Context, r io.Reader) ([]byte, error
|
||||
}
|
||||
|
||||
func (h *ReaderHandler) ReadNullLen(ctx context.Context, r io.Reader) (int64, error) {
|
||||
return r.(*sealing.NullReader).N, nil
|
||||
return r.(*nullreader.NullReader).N, nil
|
||||
}
|
||||
|
||||
func (h *ReaderHandler) ReadUrl(ctx context.Context, u string) (string, error) {
|
||||
@ -118,7 +119,7 @@ func TestNullReaderProxy(t *testing.T) {
|
||||
|
||||
defer closer()
|
||||
|
||||
n, err := client.ReadNullLen(context.TODO(), sealing.NewNullReader(1016))
|
||||
n, err := client.ReadNullLen(context.TODO(), nullreader.NewNullReader(1016))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1016), n)
|
||||
}
|
||||
|
@ -159,6 +159,8 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
// it's the ratio between 10gbit / 1gbit
|
||||
ParallelFetchLimit: 10,
|
||||
|
||||
Assigner: "utilization",
|
||||
|
||||
// By default use the hardware resource filtering strategy.
|
||||
ResourceFiltering: sectorstorage.ResourceFilteringHardware,
|
||||
},
|
||||
|
@ -756,6 +756,30 @@ This parameter is ONLY applicable if the retrieval pricing policy strategy has b
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Assigner",
|
||||
Type: "string",
|
||||
|
||||
Comment: `Assigner specifies the worker assigner to use when scheduling tasks.
|
||||
"utilization" (default) - assign tasks to workers with lowest utilization.
|
||||
"spread" - assign tasks to as many distinct workers as possible.`,
|
||||
},
|
||||
{
|
||||
Name: "DisallowRemoteFinalize",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `DisallowRemoteFinalize when set to true will force all Finalize tasks to
|
||||
run on workers with local access to both long-term storage and the sealing
|
||||
path containing the sector.
|
||||
--
|
||||
WARNING: Only set this if all workers have access to long-term storage
|
||||
paths. If this flag is enabled, and there are workers without long-term
|
||||
storage access, sectors will not be moved from them, and Finalize tasks
|
||||
will appear to be stuck.
|
||||
--
|
||||
If you see stuck Finalize tasks after enabling this setting, check
|
||||
'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]'`,
|
||||
},
|
||||
{
|
||||
Name: "ResourceFiltering",
|
||||
Type: "sectorstorage.ResourceFilteringStrategy",
|
||||
@ -897,13 +921,13 @@ This is useful for forcing all deals to be assigned as snap deals to sectors mar
|
||||
Name: "MinCommitBatch",
|
||||
Type: "int",
|
||||
|
||||
Comment: `maximum batched commit size - batches will be sent immediately above this size`,
|
||||
Comment: `minimum batched commit size - batches above this size will eventually be sent on a timeout`,
|
||||
},
|
||||
{
|
||||
Name: "MaxCommitBatch",
|
||||
Type: "int",
|
||||
|
||||
Comment: ``,
|
||||
Comment: `maximum batched commit size - batches will be sent immediately above this size`,
|
||||
},
|
||||
{
|
||||
Name: "CommitBatchWait",
|
||||
|
@ -63,6 +63,9 @@ func (c *StorageMiner) StorageManager() sectorstorage.Config {
|
||||
AllowProveReplicaUpdate2: c.Storage.AllowProveReplicaUpdate2,
|
||||
AllowRegenSectorKey: c.Storage.AllowRegenSectorKey,
|
||||
ResourceFiltering: c.Storage.ResourceFiltering,
|
||||
DisallowRemoteFinalize: c.Storage.DisallowRemoteFinalize,
|
||||
|
||||
Assigner: c.Storage.Assigner,
|
||||
|
||||
ParallelCheckLimit: c.Proving.ParallelCheckLimit,
|
||||
}
|
||||
|
@ -290,8 +290,9 @@ type SealingConfig struct {
|
||||
|
||||
// enable / disable commit aggregation (takes effect after nv13)
|
||||
AggregateCommits bool
|
||||
// maximum batched commit size - batches will be sent immediately above this size
|
||||
// minimum batched commit size - batches above this size will eventually be sent on a timeout
|
||||
MinCommitBatch int
|
||||
// maximum batched commit size - batches will be sent immediately above this size
|
||||
MaxCommitBatch int
|
||||
// how long to wait before submitting a batch after crossing the minimum batch size
|
||||
CommitBatchWait Duration
|
||||
@ -329,6 +330,24 @@ type SealerConfig struct {
|
||||
AllowProveReplicaUpdate2 bool
|
||||
AllowRegenSectorKey bool
|
||||
|
||||
// Assigner specifies the worker assigner to use when scheduling tasks.
|
||||
// "utilization" (default) - assign tasks to workers with lowest utilization.
|
||||
// "spread" - assign tasks to as many distinct workers as possible.
|
||||
Assigner string
|
||||
|
||||
// DisallowRemoteFinalize when set to true will force all Finalize tasks to
|
||||
// run on workers with local access to both long-term storage and the sealing
|
||||
// path containing the sector.
|
||||
// --
|
||||
// WARNING: Only set this if all workers have access to long-term storage
|
||||
// paths. If this flag is enabled, and there are workers without long-term
|
||||
// storage access, sectors will not be moved from them, and Finalize tasks
|
||||
// will appear to be stuck.
|
||||
// --
|
||||
// If you see stuck Finalize tasks after enabling this setting, check
|
||||
// 'lotus-miner sealing sched-diag' and 'lotus-miner storage find [sector num]'
|
||||
DisallowRemoteFinalize bool
|
||||
|
||||
// ResourceFiltering instructs the system which resource filtering strategy
|
||||
// to use when evaluating tasks against this worker. An empty value defaults
|
||||
// to "hardware".
|
||||
|
117
node/health.go
Normal file
117
node/health.go
Normal file
@ -0,0 +1,117 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
)
|
||||
|
||||
var healthlog = logging.Logger("healthcheck")
|
||||
|
||||
type HealthHandler struct {
|
||||
healthy int32
|
||||
}
|
||||
|
||||
func (h *HealthHandler) SetHealthy(healthy bool) {
|
||||
var hi32 int32
|
||||
if healthy {
|
||||
hi32 = 1
|
||||
}
|
||||
atomic.StoreInt32(&h.healthy, hi32)
|
||||
}
|
||||
|
||||
func (h *HealthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if atomic.LoadInt32(&h.healthy) != 1 {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// Check that the node is still working. That is, that it's still processing the chain.
|
||||
// If there have been no recent changes, consider the node to be dead.
|
||||
func NewLiveHandler(api lapi.FullNode) *HealthHandler {
|
||||
ctx := context.Background()
|
||||
h := HealthHandler{}
|
||||
go func() {
|
||||
const (
|
||||
reset int32 = 5
|
||||
maxbackoff time.Duration = time.Minute
|
||||
minbackoff time.Duration = time.Second
|
||||
)
|
||||
var (
|
||||
countdown int32
|
||||
headCh <-chan []*lapi.HeadChange
|
||||
backoff time.Duration = minbackoff
|
||||
err error
|
||||
)
|
||||
minutely := time.NewTicker(time.Minute)
|
||||
for {
|
||||
if headCh == nil {
|
||||
healthlog.Infof("waiting %v before starting ChainNotify channel", backoff)
|
||||
<-time.After(backoff)
|
||||
headCh, err = api.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
healthlog.Warnf("failed to instantiate ChainNotify channel; cannot determine liveness. %s", err)
|
||||
h.SetHealthy(false)
|
||||
nextbackoff := 2 * backoff
|
||||
if nextbackoff > maxbackoff {
|
||||
nextbackoff = maxbackoff
|
||||
}
|
||||
backoff = nextbackoff
|
||||
continue
|
||||
} else {
|
||||
healthlog.Infof("started ChainNotify channel")
|
||||
backoff = minbackoff
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-minutely.C:
|
||||
atomic.AddInt32(&countdown, -1)
|
||||
if countdown <= 0 {
|
||||
h.SetHealthy(false)
|
||||
}
|
||||
case _, ok := <-headCh:
|
||||
if !ok { // channel is closed, enter reconnect loop.
|
||||
h.SetHealthy(false)
|
||||
headCh = nil
|
||||
continue
|
||||
}
|
||||
atomic.StoreInt32(&countdown, reset)
|
||||
h.SetHealthy(true)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return &h
|
||||
}
|
||||
|
||||
// Check if we are ready to handle traffic.
|
||||
// 1. sync workers are reasonably up to date.
|
||||
// 2. libp2p is servicable
|
||||
func NewReadyHandler(api lapi.FullNode) *HealthHandler {
|
||||
ctx := context.Background()
|
||||
h := HealthHandler{}
|
||||
go func() {
|
||||
const heightTolerance = uint64(5)
|
||||
var nethealth, synchealth bool
|
||||
minutely := time.NewTicker(time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-minutely.C:
|
||||
netstat, err := api.NetAutoNatStatus(ctx)
|
||||
nethealth = err == nil && netstat.Reachability != network.ReachabilityUnknown
|
||||
|
||||
nodestat, err := api.NodeStatus(ctx, false)
|
||||
synchealth = err == nil && nodestat.SyncStatus.Behind < heightTolerance
|
||||
|
||||
h.SetHealthy(nethealth && synchealth)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return &h
|
||||
}
|
@ -26,61 +26,67 @@ func (t *HelloMessage) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufHelloMessage); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufHelloMessage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.HeaviestTipSet ([]cid.Cid) (slice)
|
||||
if len(t.HeaviestTipSet) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.HeaviestTipSet was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.HeaviestTipSet))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.HeaviestTipSet))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.HeaviestTipSet {
|
||||
if err := cbg.WriteCidBuf(scratch, w, v); err != nil {
|
||||
if err := cbg.WriteCid(w, v); err != nil {
|
||||
return xerrors.Errorf("failed writing cid field t.HeaviestTipSet: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.HeaviestTipSetHeight (abi.ChainEpoch) (int64)
|
||||
if t.HeaviestTipSetHeight >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.HeaviestTipSetHeight)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.HeaviestTipSetHeight)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.HeaviestTipSetHeight-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.HeaviestTipSetHeight-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.HeaviestTipSetWeight (big.Int) (struct)
|
||||
if err := t.HeaviestTipSetWeight.MarshalCBOR(w); err != nil {
|
||||
if err := t.HeaviestTipSetWeight.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.GenesisHash (cid.Cid) (struct)
|
||||
|
||||
if err := cbg.WriteCidBuf(scratch, w, t.GenesisHash); err != nil {
|
||||
if err := cbg.WriteCid(cw, t.GenesisHash); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.GenesisHash: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *HelloMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = HelloMessage{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -91,7 +97,7 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.HeaviestTipSet ([]cid.Cid) (slice)
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -110,7 +116,7 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading cid field t.HeaviestTipSet failed: %w", err)
|
||||
}
|
||||
@ -119,7 +125,7 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.HeaviestTipSetHeight (abi.ChainEpoch) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -146,7 +152,7 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
if err := t.HeaviestTipSetWeight.UnmarshalCBOR(br); err != nil {
|
||||
if err := t.HeaviestTipSetWeight.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.HeaviestTipSetWeight: %w", err)
|
||||
}
|
||||
|
||||
@ -155,7 +161,7 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
{
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.GenesisHash: %w", err)
|
||||
}
|
||||
@ -173,46 +179,52 @@ func (t *LatencyMessage) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufLatencyMessage); err != nil {
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufLatencyMessage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.TArrival (int64) (int64)
|
||||
if t.TArrival >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TArrival)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TArrival)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TArrival-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TArrival-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.TSent (int64) (int64)
|
||||
if t.TSent >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TSent)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TSent)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TSent-1)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TSent-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = LatencyMessage{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
@ -223,7 +235,7 @@ func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
// t.TArrival (int64) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
@ -248,7 +260,7 @@ func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
// t.TSent (int64) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -37,8 +37,10 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
filmktsstore "github.com/filecoin-project/go-fil-markets/stores"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
mktsdagstore "github.com/filecoin-project/lotus/markets/dagstore"
|
||||
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
@ -83,6 +85,7 @@ type StorageMinerAPI struct {
|
||||
SectorBlocks *sectorblocks.SectorBlocks `optional:"true"`
|
||||
Host host.Host `optional:"true"`
|
||||
DAGStore *dagstore.DAGStore `optional:"true"`
|
||||
DAGStoreWrapper *mktsdagstore.Wrapper `optional:"true"`
|
||||
|
||||
// Miner / storage
|
||||
Miner *storage.Miner `optional:"true"`
|
||||
@ -792,6 +795,35 @@ func (sm *StorageMinerAPI) DagstoreListShards(ctx context.Context) ([]api.Dagsto
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreRegisterShard(ctx context.Context, key string) error {
|
||||
if sm.DAGStore == nil {
|
||||
return fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
// First check if the shard has already been registered
|
||||
k := shard.KeyFromString(key)
|
||||
_, err := sm.DAGStore.GetShardInfo(k)
|
||||
if err == nil {
|
||||
// Shard already registered, nothing further to do
|
||||
return nil
|
||||
}
|
||||
// If the shard is not registered we would expect ErrShardUnknown
|
||||
if !errors.Is(err, dagstore.ErrShardUnknown) {
|
||||
return fmt.Errorf("getting shard info from DAG store: %w", err)
|
||||
}
|
||||
|
||||
pieceCid, err := cid.Parse(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing shard key as piece cid: %w", err)
|
||||
}
|
||||
|
||||
if err = filmktsstore.RegisterShardSync(ctx, sm.DAGStoreWrapper, pieceCid, "", true); err != nil {
|
||||
return fmt.Errorf("failed to register shard: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreInitializeShard(ctx context.Context, key string) error {
|
||||
if sm.DAGStore == nil {
|
||||
return fmt.Errorf("dagstore not available on this node")
|
||||
|
@ -27,9 +27,10 @@ import (
|
||||
|
||||
func ResourceManager(connMgrHi uint) func(lc fx.Lifecycle, repo repo.LockedRepo) (network.ResourceManager, error) {
|
||||
return func(lc fx.Lifecycle, repo repo.LockedRepo) (network.ResourceManager, error) {
|
||||
isFullNode := repo.RepoType().Type() == "FullNode"
|
||||
envvar := os.Getenv("LOTUS_RCMGR")
|
||||
if envvar == "0" {
|
||||
// this is enabled by default; specify LOTUS_RCMGR=0 to disable
|
||||
if (isFullNode && envvar == "0") || // only set NullResourceManager if envvar is explicitly "0"
|
||||
(!isFullNode && envvar != "1") { // set NullResourceManager *unless* envvar is explicitly "1"
|
||||
log.Info("libp2p resource manager is disabled")
|
||||
return network.NullResourceManager, nil
|
||||
}
|
||||
|
@ -417,6 +417,10 @@ type fsLockedRepo struct {
|
||||
configLk sync.Mutex
|
||||
}
|
||||
|
||||
func (fsr *fsLockedRepo) RepoType() RepoType {
|
||||
return fsr.repoType
|
||||
}
|
||||
|
||||
func (fsr *fsLockedRepo) Readonly() bool {
|
||||
return fsr.readonly
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user