Merge branch 'master' into asr/merge-release-into-master

This commit is contained in:
Aayush 2023-01-23 12:25:48 -05:00
commit 65ac5669a9
21 changed files with 957 additions and 468 deletions

View File

@ -417,71 +417,6 @@ jobs:
golangci-lint run -v --timeout 10m \ golangci-lint run -v --timeout 10m \
--concurrency 4 << parameters.args >> --concurrency 4 << parameters.args >>
publish:
description: publish binary artifacts
executor: ubuntu
parameters:
linux:
default: false
description: publish linux binaries?
type: boolean
appimage:
default: false
description: publish appimage binaries?
type: boolean
steps:
- run:
name: Install git jq curl
command: apt update && apt install -y git jq curl sudo
- checkout
- git_fetch_all_tags
- install_ipfs
- attach_workspace:
at: /tmp/workspace
- when:
condition: << parameters.linux >>
steps:
- run: ./scripts/build-arch-bundle.sh linux
- run: ./scripts/publish-arch-release.sh linux
- when:
condition: << parameters.appimage >>
steps:
- run: ./scripts/build-appimage-bundle.sh
- run: ./scripts/publish-arch-release.sh appimage
publish-snapcraft:
description: build and push snapcraft
machine:
image: ubuntu-2004:202104-01
resource_class: 2xlarge
parameters:
channel:
type: string
default: "edge"
description: snapcraft channel
snap-name:
type: string
default: 'lotus-filecoin'
description: name of snap in snap store
steps:
- checkout
- run:
name: Install snapcraft
command: sudo snap install snapcraft --classic
- run:
name: Build << parameters.snap-name >> snap
command: |
if [ "<< parameters.snap-name >>" != 'lotus-filecoin' ]; then
cat snap/snapcraft.yaml | sed 's/lotus-filecoin/lotus/' > edited-snapcraft.yaml
mv edited-snapcraft.yaml snap/snapcraft.yaml
fi
snapcraft --use-lxd --debug
- run:
name: Publish snap to << parameters.channel >> channel
shell: /bin/bash -o pipefail
command: |
snapcraft upload *.snap --release << parameters.channel >>
build-docker: build-docker:
description: > description: >
Publish to Dockerhub Publish to Dockerhub
@ -737,9 +672,10 @@ workflows:
target: "./itests/eth_balance_test.go" target: "./itests/eth_balance_test.go"
- test: - test:
name: test-itest-eth_block_hash name: test-itest-eth_block_hash
requires:
- build
suite: itest-eth_block_hash suite: itest-eth_block_hash
target: "./itests/eth_block_hash_test.go" target: "./itests/eth_block_hash_test.go"
- test: - test:
name: test-itest-eth_deploy name: test-itest-eth_deploy
requires: requires:
@ -754,9 +690,10 @@ workflows:
target: "./itests/eth_filter_test.go" target: "./itests/eth_filter_test.go"
- test: - test:
name: test-itest-eth_hash_lookup name: test-itest-eth_hash_lookup
requires:
- build
suite: itest-eth_hash_lookup suite: itest-eth_hash_lookup
target: "./itests/eth_hash_lookup_test.go" target: "./itests/eth_hash_lookup_test.go"
- test: - test:
name: test-itest-eth_transactions name: test-itest-eth_transactions
requires: requires:

View File

@ -417,71 +417,6 @@ jobs:
golangci-lint run -v --timeout 10m \ golangci-lint run -v --timeout 10m \
--concurrency 4 << parameters.args >> --concurrency 4 << parameters.args >>
publish:
description: publish binary artifacts
executor: ubuntu
parameters:
linux:
default: false
description: publish linux binaries?
type: boolean
appimage:
default: false
description: publish appimage binaries?
type: boolean
steps:
- run:
name: Install git jq curl
command: apt update && apt install -y git jq curl sudo
- checkout
- git_fetch_all_tags
- install_ipfs
- attach_workspace:
at: /tmp/workspace
- when:
condition: << parameters.linux >>
steps:
- run: ./scripts/build-arch-bundle.sh linux
- run: ./scripts/publish-arch-release.sh linux
- when:
condition: << parameters.appimage >>
steps:
- run: ./scripts/build-appimage-bundle.sh
- run: ./scripts/publish-arch-release.sh appimage
publish-snapcraft:
description: build and push snapcraft
machine:
image: ubuntu-2004:202104-01
resource_class: 2xlarge
parameters:
channel:
type: string
default: "edge"
description: snapcraft channel
snap-name:
type: string
default: 'lotus-filecoin'
description: name of snap in snap store
steps:
- checkout
- run:
name: Install snapcraft
command: sudo snap install snapcraft --classic
- run:
name: Build << parameters.snap-name >> snap
command: |
if [ "<< parameters.snap-name >>" != 'lotus-filecoin' ]; then
cat snap/snapcraft.yaml | sed 's/lotus-filecoin/lotus/' > edited-snapcraft.yaml
mv edited-snapcraft.yaml snap/snapcraft.yaml
fi
snapcraft --use-lxd --debug
- run:
name: Publish snap to << parameters.channel >> channel
shell: /bin/bash -o pipefail
command: |
snapcraft upload *.snap --release << parameters.channel >>
build-docker: build-docker:
description: > description: >
Publish to Dockerhub Publish to Dockerhub

View File

@ -252,10 +252,16 @@ func ParseTipSetString(ts string) ([]cid.Cid, error) {
return cids, nil return cids, nil
} }
type TipSetResolver interface {
ChainHead(context.Context) (*types.TipSet, error)
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
}
// LoadTipSet gets the tipset from the context, or the head from the API. // LoadTipSet gets the tipset from the context, or the head from the API.
// //
// It always gets the head from the API so commands use a consistent tipset even if time pases. // It always gets the head from the API so commands use a consistent tipset even if time pases.
func LoadTipSet(ctx context.Context, cctx *cli.Context, api v0api.FullNode) (*types.TipSet, error) { func LoadTipSet(ctx context.Context, cctx *cli.Context, api TipSetResolver) (*types.TipSet, error) {
tss := cctx.String("tipset") tss := cctx.String("tipset")
if tss == "" { if tss == "" {
return api.ChainHead(ctx) return api.ChainHead(ctx)
@ -264,7 +270,7 @@ func LoadTipSet(ctx context.Context, cctx *cli.Context, api v0api.FullNode) (*ty
return ParseTipSetRef(ctx, api, tss) return ParseTipSetRef(ctx, api, tss)
} }
func ParseTipSetRef(ctx context.Context, api v0api.FullNode, tss string) (*types.TipSet, error) { func ParseTipSetRef(ctx context.Context, api TipSetResolver, tss string) (*types.TipSet, error) {
if tss[0] == '@' { if tss[0] == '@' {
if tss == "@head" { if tss == "@head" {
return api.ChainHead(ctx) return api.ChainHead(ctx)

View File

@ -51,7 +51,6 @@ var sectorsCmd = &cli.Command{
sectorPreCommitsCmd, sectorPreCommitsCmd,
sectorsCheckExpireCmd, sectorsCheckExpireCmd,
sectorsExpiredCmd, sectorsExpiredCmd,
sectorsRenewCmd,
sectorsExtendCmd, sectorsExtendCmd,
sectorsTerminateCmd, sectorsTerminateCmd,
sectorsRemoveCmd, sectorsRemoveCmd,
@ -743,14 +742,14 @@ func ArrayToString(array []uint64) string {
return strings.Join(sarray, ",") return strings.Join(sarray, ",")
} }
func getSectorsFromFile(filePath string) ([]uint64, error) { func getSectorsFromFile(filePath string) ([]abi.SectorNumber, error) {
file, err := os.Open(filePath) file, err := os.Open(filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
scanner := bufio.NewScanner(file) scanner := bufio.NewScanner(file)
sectors := make([]uint64, 0) sectors := make([]abi.SectorNumber, 0)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
@ -760,7 +759,7 @@ func getSectorsFromFile(filePath string) ([]uint64, error) {
return nil, xerrors.Errorf("could not parse %s as sector id: %s", line, err) return nil, xerrors.Errorf("could not parse %s as sector id: %s", line, err)
} }
sectors = append(sectors, id) sectors = append(sectors, abi.SectorNumber(id))
} }
if err = file.Close(); err != nil { if err = file.Close(); err != nil {
@ -770,9 +769,19 @@ func getSectorsFromFile(filePath string) ([]uint64, error) {
return sectors, nil return sectors, nil
} }
var sectorsRenewCmd = &cli.Command{ func SectorNumsToBitfield(sectors []abi.SectorNumber) bitfield.BitField {
Name: "renew", var numbers []uint64
Usage: "Renew expiring sectors while not exceeding each sector's max life", for sector := range sectors {
numbers = append(numbers, uint64(sector))
}
return bitfield.NewFromSet(numbers)
}
var sectorsExtendCmd = &cli.Command{
Name: "extend",
Usage: "Extend expiring sectors while not exceeding each sector's max life",
ArgsUsage: "<sectorNumbers...(optional)>",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.Int64Flag{ &cli.Int64Flag{
Name: "from", Name: "from",
@ -819,7 +828,7 @@ var sectorsRenewCmd = &cli.Command{
}, },
&cli.BoolFlag{ &cli.BoolFlag{
Name: "really-do-it", Name: "really-do-it",
Usage: "pass this flag to really renew sectors, otherwise will only print out json representation of parameters", Usage: "pass this flag to really extend sectors, otherwise will only print out json representation of parameters",
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
@ -896,8 +905,7 @@ var sectorsRenewCmd = &cli.Command{
return err return err
} }
excludeSet := make(map[uint64]struct{}) excludeSet := make(map[abi.SectorNumber]struct{})
if cctx.IsSet("exclude") { if cctx.IsSet("exclude") {
excludeSectors, err := getSectorsFromFile(cctx.String("exclude")) excludeSectors, err := getSectorsFromFile(cctx.String("exclude"))
if err != nil { if err != nil {
@ -909,29 +917,25 @@ var sectorsRenewCmd = &cli.Command{
} }
} }
var sis []*miner.SectorOnChainInfo var sectors []abi.SectorNumber
if cctx.Args().Present() {
if cctx.IsSet("sector-file") { if cctx.IsSet("sector-file") {
sectors, err := getSectorsFromFile(cctx.String("sector-file")) return xerrors.Errorf("sector-file specified along with command line params")
}
for i, s := range cctx.Args().Slice() {
id, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector %d: %w", i, err)
}
sectors = append(sectors, abi.SectorNumber(id))
}
} else if cctx.IsSet("sector-file") {
sectors, err = getSectorsFromFile(cctx.String("sector-file"))
if err != nil { if err != nil {
return err return err
} }
for _, id := range sectors {
if _, exclude := excludeSet[id]; exclude {
continue
}
si, found := activeSectorsInfo[abi.SectorNumber(id)]
if !found {
return xerrors.Errorf("sector %d is not active", id)
}
if len(si.DealIDs) > 0 && cctx.Bool("only-cc") {
continue
}
sis = append(sis, si)
}
} else { } else {
from := currEpoch + 120 from := currEpoch + 120
to := currEpoch + 92160 to := currEpoch + 92160
@ -945,19 +949,28 @@ var sectorsRenewCmd = &cli.Command{
} }
for _, si := range activeSet { for _, si := range activeSet {
if si.Expiration >= from && si.Expiration <= to {
sectors = append(sectors, si.SectorNumber)
}
}
}
var sis []*miner.SectorOnChainInfo
for _, id := range sectors {
if _, exclude := excludeSet[id]; exclude {
continue
}
si, found := activeSectorsInfo[id]
if !found {
return xerrors.Errorf("sector %d is not active", id)
}
if len(si.DealIDs) > 0 && cctx.Bool("only-cc") { if len(si.DealIDs) > 0 && cctx.Bool("only-cc") {
continue continue
} }
if si.Expiration >= from && si.Expiration <= to {
if _, exclude := excludeSet[uint64(si.SectorNumber)]; !exclude {
sis = append(sis, si) sis = append(sis, si)
} }
}
}
}
extensions := map[lminer.SectorLocation]map[abi.ChainEpoch][]uint64{}
withinTolerance := func(a, b abi.ChainEpoch) bool { withinTolerance := func(a, b abi.ChainEpoch) bool {
diff := a - b diff := a - b
@ -968,6 +981,7 @@ var sectorsRenewCmd = &cli.Command{
return diff <= abi.ChainEpoch(cctx.Int64("tolerance")) return diff <= abi.ChainEpoch(cctx.Int64("tolerance"))
} }
extensions := map[lminer.SectorLocation]map[abi.ChainEpoch][]abi.SectorNumber{}
for _, si := range sis { for _, si := range sis {
extension := abi.ChainEpoch(cctx.Int64("extension")) extension := abi.ChainEpoch(cctx.Int64("extension"))
newExp := si.Expiration + extension newExp := si.Expiration + extension
@ -997,21 +1011,21 @@ var sectorsRenewCmd = &cli.Command{
es, found := extensions[*l] es, found := extensions[*l]
if !found { if !found {
ne := make(map[abi.ChainEpoch][]uint64) ne := make(map[abi.ChainEpoch][]abi.SectorNumber)
ne[newExp] = []uint64{uint64(si.SectorNumber)} ne[newExp] = []abi.SectorNumber{si.SectorNumber}
extensions[*l] = ne extensions[*l] = ne
} else { } else {
added := false added := false
for exp := range es { for exp := range es {
if withinTolerance(newExp, exp) { if withinTolerance(newExp, exp) {
es[exp] = append(es[exp], uint64(si.SectorNumber)) es[exp] = append(es[exp], si.SectorNumber)
added = true added = true
break break
} }
} }
if !added { if !added {
es[newExp] = []uint64{uint64(si.SectorNumber)} es[newExp] = []abi.SectorNumber{si.SectorNumber}
} }
} }
} }
@ -1051,7 +1065,7 @@ var sectorsRenewCmd = &cli.Command{
p.Extensions = append(p.Extensions, miner.ExpirationExtension{ p.Extensions = append(p.Extensions, miner.ExpirationExtension{
Deadline: l.Deadline, Deadline: l.Deadline,
Partition: l.Partition, Partition: l.Partition,
Sectors: bitfield.NewFromSet(numbers), Sectors: SectorNumsToBitfield(numbers),
NewExpiration: newExp, NewExpiration: newExp,
}) })
} }
@ -1083,7 +1097,7 @@ var sectorsRenewCmd = &cli.Command{
} }
scount += int(count) scount += int(count)
} }
fmt.Printf("Renewing %d sectors: ", scount) fmt.Printf("Extending %d sectors: ", scount)
stotal += scount stotal += scount
if !cctx.Bool("really-do-it") { if !cctx.Bool("really-do-it") {
@ -1097,8 +1111,7 @@ var sectorsRenewCmd = &cli.Command{
return err return err
} }
fmt.Println() fmt.Println("\n", string(data))
fmt.Println(string(data))
continue continue
} }
@ -1121,252 +1134,7 @@ var sectorsRenewCmd = &cli.Command{
fmt.Println(smsg.Cid()) fmt.Println(smsg.Cid())
} }
fmt.Printf("%d sectors renewed\n", stotal) fmt.Printf("%d sectors extended\n", stotal)
return nil
},
}
var sectorsExtendCmd = &cli.Command{
Name: "extend",
Usage: "Extend sector expiration",
ArgsUsage: "<sectorNumbers...>",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "new-expiration",
Usage: "new expiration epoch",
Required: false,
},
&cli.BoolFlag{
Name: "v1-sectors",
Usage: "renews all v1 sectors up to the maximum possible lifetime",
Required: false,
},
&cli.Int64Flag{
Name: "tolerance",
Value: 20160,
Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs",
Required: false,
},
&cli.Int64Flag{
Name: "expiration-ignore",
Value: 120,
Usage: "when extending v1 sectors, skip sectors whose current expiration is less than <ignore> epochs from now",
Required: false,
},
&cli.Int64Flag{
Name: "expiration-cutoff",
Usage: "when extending v1 sectors, skip sectors whose current expiration is more than <cutoff> epochs from now (infinity if unspecified)",
Required: false,
},
&cli.StringFlag{},
},
Action: func(cctx *cli.Context) error {
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer nCloser()
ctx := lcli.ReqContext(cctx)
maddr, err := getActorAddress(ctx, cctx)
if err != nil {
return err
}
var params []miner.ExtendSectorExpirationParams
if cctx.Bool("v1-sectors") {
head, err := api.ChainHead(ctx)
if err != nil {
return err
}
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return err
}
extensions := map[lminer.SectorLocation]map[abi.ChainEpoch][]uint64{}
// are given durations within tolerance epochs
withinTolerance := func(a, b abi.ChainEpoch) bool {
diff := a - b
if diff < 0 {
diff = b - a
}
return diff <= abi.ChainEpoch(cctx.Int64("tolerance"))
}
sis, err := api.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner sector infos: %w", err)
}
for _, si := range sis {
if si.SealProof >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
continue
}
if si.Expiration < (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-ignore"))) {
continue
}
if cctx.IsSet("expiration-cutoff") {
if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) {
continue
}
}
ml := policy.GetSectorMaxLifetime(si.SealProof, nv)
// if the sector's missing less than "tolerance" of its maximum possible lifetime, don't bother extending it
if withinTolerance(si.Expiration-si.Activation, ml) {
continue
}
// Set the new expiration to 48 hours less than the theoretical maximum lifetime
newExp := ml - (miner.WPoStProvingPeriod * 2) + si.Activation
if withinTolerance(si.Expiration, newExp) || si.Expiration >= newExp {
continue
}
p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err)
}
if p == nil {
return xerrors.Errorf("sector %d not found in any partition", si.SectorNumber)
}
es, found := extensions[*p]
if !found {
ne := make(map[abi.ChainEpoch][]uint64)
ne[newExp] = []uint64{uint64(si.SectorNumber)}
extensions[*p] = ne
} else {
added := false
for exp := range es {
if withinTolerance(exp, newExp) && newExp >= exp && exp > si.Expiration {
es[exp] = append(es[exp], uint64(si.SectorNumber))
added = true
break
}
}
if !added {
es[newExp] = []uint64{uint64(si.SectorNumber)}
}
}
}
p := miner.ExtendSectorExpirationParams{}
scount := 0
for l, exts := range extensions {
for newExp, numbers := range exts {
scount += len(numbers)
addressedMax, err := policy.GetAddressedSectorsMax(nv)
if err != nil {
return xerrors.Errorf("failed to get addressed sectors max")
}
declMax, err := policy.GetDeclarationsMax(nv)
if err != nil {
return xerrors.Errorf("failed to get declarations max")
}
if scount > addressedMax || len(p.Extensions) == declMax {
params = append(params, p)
p = miner.ExtendSectorExpirationParams{}
scount = len(numbers)
}
p.Extensions = append(p.Extensions, miner.ExpirationExtension{
Deadline: l.Deadline,
Partition: l.Partition,
Sectors: bitfield.NewFromSet(numbers),
NewExpiration: newExp,
})
}
}
// if we have any sectors, then one last append is needed here
if scount != 0 {
params = append(params, p)
}
} else {
if !cctx.Args().Present() || !cctx.IsSet("new-expiration") {
return xerrors.Errorf("must pass at least one sector number and new expiration")
}
sectors := map[lminer.SectorLocation][]uint64{}
for i, s := range cctx.Args().Slice() {
id, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector %d: %w", i, err)
}
p, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(id), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting sector location for sector %d: %w", id, err)
}
if p == nil {
return xerrors.Errorf("sector %d not found in any partition", id)
}
sectors[*p] = append(sectors[*p], id)
}
p := miner.ExtendSectorExpirationParams{}
for l, numbers := range sectors {
// TODO: Dedup with above loop
p.Extensions = append(p.Extensions, miner.ExpirationExtension{
Deadline: l.Deadline,
Partition: l.Partition,
Sectors: bitfield.NewFromSet(numbers),
NewExpiration: abi.ChainEpoch(cctx.Int64("new-expiration")),
})
}
params = append(params, p)
}
if len(params) == 0 {
fmt.Println("nothing to extend")
return nil
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
for i := range params {
sp, aerr := actors.SerializeParams(&params[i])
if aerr != nil {
return xerrors.Errorf("serializing params: %w", err)
}
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: mi.Worker,
To: maddr,
Method: builtin.MethodsMiner.ExtendSectorExpiration,
Value: big.Zero(),
Params: sp,
}, nil)
if err != nil {
return xerrors.Errorf("mpool push message: %w", err)
}
fmt.Println(smsg.Cid())
}
return nil return nil
}, },

View File

@ -5,6 +5,7 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"os" "os"
"strconv"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -14,6 +15,7 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
@ -35,6 +37,8 @@ var actorCmd = &cli.Command{
actorProposeChangeWorker, actorProposeChangeWorker,
actorConfirmChangeWorker, actorConfirmChangeWorker,
actorGetMethodNum, actorGetMethodNum,
actorProposeChangeBeneficiary,
actorConfirmChangeBeneficiary,
}, },
} }
@ -831,3 +835,255 @@ var actorGetMethodNum = &cli.Command{
return nil return nil
}, },
} }
var actorProposeChangeBeneficiary = &cli.Command{
Name: "propose-change-beneficiary",
Usage: "Propose a beneficiary address change",
ArgsUsage: "[beneficiaryAddress quota expiration minerID]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
Usage: "Actually send transaction performing the action",
Value: false,
},
&cli.BoolFlag{
Name: "overwrite-pending-change",
Usage: "Overwrite the current beneficiary change proposal",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 4 {
return lcli.IncorrectNumArgs(cctx)
}
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return xerrors.Errorf("getting fullnode api: %w", err)
}
defer acloser()
ctx := lcli.ReqContext(cctx)
na, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return xerrors.Errorf("parsing beneficiary address: %w", err)
}
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("looking up new beneficiary address: %w", err)
}
quota, err := types.ParseFIL(cctx.Args().Get(1))
if err != nil {
return xerrors.Errorf("parsing quota: %w", err)
}
expiration, err := strconv.ParseInt(cctx.Args().Get(2), 10, 64)
if err != nil {
return xerrors.Errorf("parsing expiration: %w", err)
}
maddr, err := address.NewFromString(cctx.Args().Get(3))
if err != nil {
return xerrors.Errorf("getting miner address: %w", err)
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
if mi.Beneficiary == mi.Owner && newAddr == mi.Owner {
return fmt.Errorf("beneficiary %s already set to owner address", mi.Beneficiary)
}
if mi.PendingBeneficiaryTerm != nil {
fmt.Println("WARNING: replacing Pending Beneficiary Term of:")
fmt.Println("Beneficiary: ", mi.PendingBeneficiaryTerm.NewBeneficiary)
fmt.Println("Quota:", mi.PendingBeneficiaryTerm.NewQuota)
fmt.Println("Expiration Epoch:", mi.PendingBeneficiaryTerm.NewExpiration)
if !cctx.Bool("overwrite-pending-change") {
return fmt.Errorf("must pass --overwrite-pending-change to replace current pending beneficiary change. Please review CAREFULLY")
}
}
if !cctx.Bool("really-do-it") {
fmt.Println("Pass --really-do-it to actually execute this action. Review what you're about to approve CAREFULLY please")
return nil
}
params := &miner.ChangeBeneficiaryParams{
NewBeneficiary: newAddr,
NewQuota: abi.TokenAmount(quota),
NewExpiration: abi.ChainEpoch(expiration),
}
sp, err := actors.SerializeParams(params)
if err != nil {
return xerrors.Errorf("serializing params: %w", err)
}
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: mi.Owner,
To: maddr,
Method: builtin.MethodsMiner.ChangeBeneficiary,
Value: big.Zero(),
Params: sp,
}, nil)
if err != nil {
return xerrors.Errorf("mpool push: %w", err)
}
fmt.Println("Propose Message CID:", smsg.Cid())
// wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
if err != nil {
return xerrors.Errorf("waiting for message to be included in block: %w", err)
}
// check it executed successfully
if wait.Receipt.ExitCode.IsError() {
return fmt.Errorf("propose beneficiary change failed")
}
updatedMinerInfo, err := api.StateMinerInfo(ctx, maddr, wait.TipSet)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
if updatedMinerInfo.PendingBeneficiaryTerm == nil && updatedMinerInfo.Beneficiary == newAddr {
fmt.Println("Beneficiary address successfully changed")
} else {
fmt.Println("Beneficiary address change awaiting additional confirmations")
}
return nil
},
}
var actorConfirmChangeBeneficiary = &cli.Command{
Name: "confirm-change-beneficiary",
Usage: "Confirm a beneficiary address change",
ArgsUsage: "[minerAddress]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
Usage: "Actually send transaction performing the action",
Value: false,
},
&cli.BoolFlag{
Name: "existing-beneficiary",
Usage: "send confirmation from the existing beneficiary address",
},
&cli.BoolFlag{
Name: "new-beneficiary",
Usage: "send confirmation from the new beneficiary address",
},
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return lcli.IncorrectNumArgs(cctx)
}
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return xerrors.Errorf("getting fullnode api: %w", err)
}
defer acloser()
ctx := lcli.ReqContext(cctx)
maddr, err := address.NewFromString(cctx.Args().First())
if err != nil {
return xerrors.Errorf("parsing beneficiary address: %w", err)
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
if mi.PendingBeneficiaryTerm == nil {
return fmt.Errorf("no pending beneficiary term found for miner %s", maddr)
}
if (cctx.IsSet("existing-beneficiary") && cctx.IsSet("new-beneficiary")) || (!cctx.IsSet("existing-beneficiary") && !cctx.IsSet("new-beneficiary")) {
return lcli.ShowHelp(cctx, fmt.Errorf("must pass exactly one of --existing-beneficiary or --new-beneficiary"))
}
var fromAddr address.Address
if cctx.IsSet("existing-beneficiary") {
if mi.PendingBeneficiaryTerm.ApprovedByBeneficiary {
return fmt.Errorf("beneficiary change already approved by current beneficiary")
}
fromAddr = mi.Beneficiary
} else {
if mi.PendingBeneficiaryTerm.ApprovedByNominee {
return fmt.Errorf("beneficiary change already approved by new beneficiary")
}
fromAddr = mi.PendingBeneficiaryTerm.NewBeneficiary
}
fmt.Println("Confirming Pending Beneficiary Term of:")
fmt.Println("Beneficiary: ", mi.PendingBeneficiaryTerm.NewBeneficiary)
fmt.Println("Quota:", mi.PendingBeneficiaryTerm.NewQuota)
fmt.Println("Expiration Epoch:", mi.PendingBeneficiaryTerm.NewExpiration)
if !cctx.Bool("really-do-it") {
fmt.Println("Pass --really-do-it to actually execute this action. Review what you're about to approve CAREFULLY please")
return nil
}
params := &miner.ChangeBeneficiaryParams{
NewBeneficiary: mi.PendingBeneficiaryTerm.NewBeneficiary,
NewQuota: mi.PendingBeneficiaryTerm.NewQuota,
NewExpiration: mi.PendingBeneficiaryTerm.NewExpiration,
}
sp, err := actors.SerializeParams(params)
if err != nil {
return xerrors.Errorf("serializing params: %w", err)
}
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: fromAddr,
To: maddr,
Method: builtin.MethodsMiner.ChangeBeneficiary,
Value: big.Zero(),
Params: sp,
}, nil)
if err != nil {
return xerrors.Errorf("mpool push: %w", err)
}
fmt.Println("Confirm Message CID:", smsg.Cid())
// wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
if err != nil {
return xerrors.Errorf("waiting for message to be included in block: %w", err)
}
// check it executed successfully
if wait.Receipt.ExitCode.IsError() {
return fmt.Errorf("confirm beneficiary change failed with code %d", wait.Receipt.ExitCode)
}
updatedMinerInfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
if updatedMinerInfo.PendingBeneficiaryTerm == nil && updatedMinerInfo.Beneficiary == mi.PendingBeneficiaryTerm.NewBeneficiary {
fmt.Println("Beneficiary address successfully changed")
} else {
fmt.Println("Beneficiary address change awaiting additional confirmations")
}
return nil
},
}

View File

@ -1,6 +1,7 @@
package main package main
import ( import (
"bytes"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@ -12,6 +13,7 @@ import (
"github.com/ipld/go-car" "github.com/ipld/go-car"
mh "github.com/multiformats/go-multihash" mh "github.com/multiformats/go-multihash"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
@ -27,6 +29,42 @@ var cidCmd = &cli.Command{
Subcommands: cli.Commands{ Subcommands: cli.Commands{
cidIdCmd, cidIdCmd,
inspectBundleCmd, inspectBundleCmd,
cborCid,
cidBytes,
},
}
var cidBytes = &cli.Command{
Name: "bytes",
Usage: "cid bytes",
ArgsUsage: "[cid]",
Action: func(cctx *cli.Context) error {
c, err := cid.Decode(cctx.Args().First())
if err != nil {
return err
}
// Add in the troublesome zero byte prefix
fmt.Printf("00%x\n", c.Bytes())
return nil
},
}
var cborCid = &cli.Command{
Name: "cbor",
Usage: "Serialize cid to cbor",
ArgsUsage: "[cid]",
Action: func(cctx *cli.Context) error {
c, err := cid.Decode(cctx.Args().First())
if err != nil {
return err
}
cbgc := cbg.CborCid(c)
buf := bytes.NewBuffer(make([]byte, 0))
if err := cbgc.MarshalCBOR(buf); err != nil {
return err
}
fmt.Printf("%x\n", buf.Bytes())
return nil
}, },
} }

View File

@ -20,6 +20,8 @@ func main() {
local := []*cli.Command{ local := []*cli.Command{
addressCmd, addressCmd,
statActorCmd,
statObjCmd,
base64Cmd, base64Cmd,
base32Cmd, base32Cmd,
base16Cmd, base16Cmd,

View File

@ -0,0 +1,533 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"reflect"
"sync"
"github.com/docker/go-units"
lru "github.com/hashicorp/golang-lru"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
offline "github.com/ipfs/go-ipfs-exchange-offline"
format "github.com/ipfs/go-ipld-format"
"github.com/ipfs/go-merkledag"
"github.com/urfave/cli/v2"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
)
type actorStats struct {
Address address.Address
Actor *types.Actor
Fields []fieldItem
Stats api.ObjStat
}
type fieldItem struct {
Name string
Cid cid.Cid
Stats api.ObjStat
}
type cacheNodeGetter struct {
ds format.NodeGetter
cache *lru.TwoQueueCache
}
func newCacheNodeGetter(d format.NodeGetter, size int) (*cacheNodeGetter, error) {
cng := &cacheNodeGetter{ds: d}
cache, err := lru.New2Q(size)
if err != nil {
return nil, err
}
cng.cache = cache
return cng, nil
}
func (cng *cacheNodeGetter) Get(ctx context.Context, c cid.Cid) (format.Node, error) {
if n, ok := cng.cache.Get(c); ok {
return n.(format.Node), nil
}
n, err := cng.ds.Get(ctx, c)
if err != nil {
return nil, err
}
cng.cache.Add(c, n)
return n, nil
}
func (cng *cacheNodeGetter) GetMany(ctx context.Context, list []cid.Cid) <-chan *format.NodeOption {
out := make(chan *format.NodeOption, len(list))
go func() {
for _, c := range list {
n, err := cng.Get(ctx, c)
if err != nil {
out <- &format.NodeOption{Err: err}
continue
}
out <- &format.NodeOption{Node: n}
}
}()
return out
}
type dagStatCollector struct {
ds format.NodeGetter
walk func(format.Node) ([]*format.Link, error)
statsLk sync.Mutex
stats api.ObjStat
}
func (dsc *dagStatCollector) record(ctx context.Context, nd format.Node) error {
size, err := nd.Size()
if err != nil {
return err
}
dsc.statsLk.Lock()
defer dsc.statsLk.Unlock()
dsc.stats.Size = dsc.stats.Size + size
dsc.stats.Links = dsc.stats.Links + 1
return nil
}
func (dsc *dagStatCollector) walkLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) {
nd, err := dsc.ds.Get(ctx, c)
if err != nil {
return nil, err
}
if err := dsc.record(ctx, nd); err != nil {
return nil, err
}
return dsc.walk(nd)
}
type ChainStoreTipSetResolver struct {
Chain *store.ChainStore
}
func (tsr *ChainStoreTipSetResolver) ChainHead(ctx context.Context) (*types.TipSet, error) {
return tsr.Chain.GetHeaviestTipSet(), nil
}
func (tsr *ChainStoreTipSetResolver) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
ts, err := tsr.Chain.GetTipSetFromKey(ctx, tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
return tsr.Chain.GetTipsetByHeight(ctx, h, ts, true)
}
func (tsr *ChainStoreTipSetResolver) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
return tsr.Chain.LoadTipSet(ctx, tsk)
}
var statObjCmd = &cli.Command{
Name: "stat-obj",
Usage: "calculates the size of any DAG in the blockstore",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx)
c, err := cid.Parse(cctx.Args().First())
if err != nil {
return err
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
exists, err := r.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
dsc := &dagStatCollector{
ds: dag,
walk: carWalkFunc,
}
if err := merkledag.Walk(ctx, dsc.walkLinks, c, cid.NewSet().Visit, merkledag.Concurrent()); err != nil {
return err
}
return DumpJSON(dsc.stats)
},
}
var statActorCmd = &cli.Command{
Name: "stat-actor",
Usage: "calculates the size of actors and their immeidate structures",
Description: `Any DAG linked by the actor object (field) will have its size calculated independently of all
other linked DAG. If an actor has two fields containing links to the same DAG the structure size will be counted
twice, included in each fields size individually.
The top level stats reported for an actor is computed independently of all fields and is a more accurate
accounting of the true size of the actor in the state datastore.
The calculation of these stats results in the actor state being traversed twice. The dag-cache-size flag can be used
to reduce the number of decode operations performed by caching the decoded object after first access.`,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "tipset",
Usage: "specify tipset to call method on (pass comma separated array of cids)",
},
&cli.IntFlag{
Name: "workers",
Usage: "number of workers to use when processing",
Value: 10,
},
&cli.IntFlag{
Name: "dag-cache-size",
Usage: "cache size per worker (setting to 0 disables)",
Value: 8092,
},
&cli.BoolFlag{
Name: "all",
Usage: "process all actors in stateroot of tipset",
Value: false,
},
&cli.BoolFlag{
Name: "pretty",
Usage: "print formated output instead of ldjson",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx)
var addrs []address.Address
if !cctx.Bool("all") {
for _, a := range cctx.Args().Slice() {
addr, err := address.NewFromString(a)
if err != nil {
return err
}
addrs = append(addrs, addr)
}
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
exists, err := r.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
mds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
cs := store.NewChainStore(bs, bs, mds, nil, nil)
if err := cs.Load(ctx); err != nil {
return nil
}
tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
if err != nil {
return err
}
tsr := &ChainStoreTipSetResolver{
Chain: cs,
}
ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
if err != nil {
return err
}
log.Infow("tipset", "parentstate", ts.ParentState())
if len(addrs) == 0 && cctx.Bool("all") {
var err error
addrs, err = sm.ListAllActors(ctx, ts)
if err != nil {
return err
}
}
numWorkers := cctx.Int("workers")
dagCacheSize := cctx.Int("dag-cache-size")
eg, egctx := errgroup.WithContext(ctx)
jobs := make(chan address.Address, numWorkers)
results := make(chan actorStats, numWorkers)
worker := func(ctx context.Context, id int) error {
completed := 0
defer func() {
log.Infow("worker done", "id", id, "completed", completed)
}()
for {
select {
case addr, ok := <-jobs:
if !ok {
return nil
}
actor, err := sm.LoadActor(ctx, addr, ts)
if err != nil {
return err
}
var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
if dagCacheSize != 0 {
var err error
dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))), dagCacheSize)
if err != nil {
return err
}
}
actStats, err := collectStats(ctx, addr, actor, dag)
if err != nil {
return err
}
select {
case results <- actStats:
case <-ctx.Done():
return ctx.Err()
}
case <-ctx.Done():
return ctx.Err()
}
completed = completed + 1
}
}
for w := 0; w < numWorkers; w++ {
id := w
eg.Go(func() error {
return worker(egctx, id)
})
}
go func() {
defer close(jobs)
for _, addr := range addrs {
jobs <- addr
}
}()
go func() {
// error is check later
eg.Wait() //nolint:errcheck
close(results)
}()
for {
select {
case result, ok := <-results:
if !ok {
return eg.Wait()
}
if cctx.Bool("pretty") {
DumpStats(result)
} else {
if err := DumpJSON(result); err != nil {
return err
}
}
case <-ctx.Done():
return ctx.Err()
}
}
},
}
func collectStats(ctx context.Context, addr address.Address, actor *types.Actor, dag format.NodeGetter) (actorStats, error) {
log.Infow("actor", "addr", addr, "code", actor.Code, "name", builtin.ActorNameByCode(actor.Code))
nd, err := dag.Get(ctx, actor.Head)
if err != nil {
return actorStats{}, err
}
// When it comes to fvm / evm actors this method of inspecting fields will probably not work
// and we may only be able to collect stats for the top level object. We might be able to iterate
// over the top level fields for the actors and identify field that are CIDs, but unsure if we would
// be able to identify a field name.
oif, err := vm.DumpActorState(consensus.NewTipSetExecutor(filcns.RewardFunc).NewActorRegistry(), actor, nd.RawData())
if err != nil {
oif = nil
}
fields := []fieldItem{}
// Account actors return nil from DumpActorState as they have no state
if oif != nil {
v := reflect.Indirect(reflect.ValueOf(oif))
for i := 0; i < v.NumField(); i++ {
varName := v.Type().Field(i).Name
varType := v.Type().Field(i).Type
varValue := v.Field(i).Interface()
if varType == reflect.TypeOf(cid.Cid{}) {
fields = append(fields, fieldItem{
Name: varName,
Cid: varValue.(cid.Cid),
})
}
}
}
actStats := actorStats{
Address: addr,
Actor: actor,
}
dsc := &dagStatCollector{
ds: dag,
walk: carWalkFunc,
}
if err := merkledag.Walk(ctx, dsc.walkLinks, actor.Head, cid.NewSet().Visit, merkledag.Concurrent()); err != nil {
return actorStats{}, err
}
actStats.Stats = dsc.stats
for _, field := range fields {
dsc := &dagStatCollector{
ds: dag,
walk: carWalkFunc,
}
if err := merkledag.Walk(ctx, dsc.walkLinks, field.Cid, cid.NewSet().Visit, merkledag.Concurrent()); err != nil {
return actorStats{}, err
}
field.Stats = dsc.stats
actStats.Fields = append(actStats.Fields, field)
}
return actStats, nil
}
func DumpJSON(i interface{}) error {
bs, err := json.Marshal(i)
if err != nil {
return err
}
fmt.Println(string(bs))
return nil
}
func DumpStats(actStats actorStats) {
strtype := builtin.ActorNameByCode(actStats.Actor.Code)
fmt.Printf("Address:\t%s\n", actStats.Address)
fmt.Printf("Balance:\t%s\n", types.FIL(actStats.Actor.Balance))
fmt.Printf("Nonce:\t\t%d\n", actStats.Actor.Nonce)
fmt.Printf("Code:\t\t%s (%s)\n", actStats.Actor.Code, strtype)
fmt.Printf("Head:\t\t%s\n", actStats.Actor.Head)
fmt.Println()
fmt.Printf("%-*s%-*s%-*s\n", 32, "Field", 24, "Size", 24, "\"Blocks\"")
stats := actStats.Stats
sizeStr := units.BytesSize(float64(stats.Size))
fmt.Printf("%-*s%-*s%-*s%-*d\n", 32, "<self>", 10, sizeStr, 14, fmt.Sprintf("(%d)", stats.Size), 24, stats.Links)
for _, s := range actStats.Fields {
stats := s.Stats
sizeStr := units.BytesSize(float64(stats.Size))
fmt.Printf("%-*s%-*s%-*s%-*d\n", 32, s.Name, 10, sizeStr, 14, fmt.Sprintf("(%d)", stats.Size), 24, stats.Links)
}
fmt.Println("--------------------------------------------------------------------------")
}

View File

@ -1677,8 +1677,7 @@ COMMANDS:
precommits Print on-chain precommit info precommits Print on-chain precommit info
check-expire Inspect expiring sectors check-expire Inspect expiring sectors
expired Get or cleanup expired sectors expired Get or cleanup expired sectors
renew Renew expiring sectors while not exceeding each sector's max life extend Extend expiring sectors while not exceeding each sector's max life
extend Extend sector expiration
terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)
remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))
snap-up Mark a committed capacity sector to be filled with deals snap-up Mark a committed capacity sector to be filled with deals
@ -1884,13 +1883,13 @@ OPTIONS:
``` ```
### lotus-miner sectors renew ### lotus-miner sectors extend
``` ```
NAME: NAME:
lotus-miner sectors renew - Renew expiring sectors while not exceeding each sector's max life lotus-miner sectors extend - Extend expiring sectors while not exceeding each sector's max life
USAGE: USAGE:
lotus-miner sectors renew [command options] [arguments...] lotus-miner sectors extend [command options] <sectorNumbers...(optional)>
OPTIONS: OPTIONS:
--exclude value optionally provide a file containing excluding sectors --exclude value optionally provide a file containing excluding sectors
@ -1900,31 +1899,13 @@ OPTIONS:
--max-sectors value the maximum number of sectors contained in each message message (default: 0) --max-sectors value the maximum number of sectors contained in each message message (default: 0)
--new-expiration value try to extend selected sectors to this epoch, ignoring extension (default: 0) --new-expiration value try to extend selected sectors to this epoch, ignoring extension (default: 0)
--only-cc only extend CC sectors (useful for making sector ready for snap upgrade) (default: false) --only-cc only extend CC sectors (useful for making sector ready for snap upgrade) (default: false)
--really-do-it pass this flag to really renew sectors, otherwise will only print out json representation of parameters (default: false) --really-do-it pass this flag to really extend sectors, otherwise will only print out json representation of parameters (default: false)
--sector-file value provide a file containing one sector number in each line, ignoring above selecting criteria --sector-file value provide a file containing one sector number in each line, ignoring above selecting criteria
--to value only consider sectors whose current expiration epoch is in the range of [from, to], <to> defaults to: now + 92160 (32 days) (default: 0) --to value only consider sectors whose current expiration epoch is in the range of [from, to], <to> defaults to: now + 92160 (32 days) (default: 0)
--tolerance value don't try to extend sectors by fewer than this number of epochs, defaults to 7 days (default: 20160) --tolerance value don't try to extend sectors by fewer than this number of epochs, defaults to 7 days (default: 20160)
``` ```
### lotus-miner sectors extend
```
NAME:
lotus-miner sectors extend - Extend sector expiration
USAGE:
lotus-miner sectors extend [command options] <sectorNumbers...>
OPTIONS:
--expiration-cutoff value when extending v1 sectors, skip sectors whose current expiration is more than <cutoff> epochs from now (infinity if unspecified) (default: 0)
--expiration-ignore value when extending v1 sectors, skip sectors whose current expiration is less than <ignore> epochs from now (default: 120)
--new-expiration value new expiration epoch (default: 0)
--tolerance value when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs (default: 20160)
--v1-sectors renews all v1 sectors up to the maximum possible lifetime (default: false)
```
### lotus-miner sectors terminate ### lotus-miner sectors terminate
``` ```
NAME: NAME:

6
go.mod
View File

@ -39,7 +39,7 @@ require (
github.com/filecoin-project/go-data-transfer v1.15.2 github.com/filecoin-project/go-data-transfer v1.15.2
github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commcid v0.1.0
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0
github.com/filecoin-project/go-fil-markets v1.25.2 github.com/filecoin-project/go-fil-markets v1.26.0
github.com/filecoin-project/go-jsonrpc v0.1.9 github.com/filecoin-project/go-jsonrpc v0.1.9
github.com/filecoin-project/go-legs v0.4.4 github.com/filecoin-project/go-legs v0.4.4
github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-padreader v0.0.1
@ -48,7 +48,6 @@ require (
github.com/filecoin-project/go-statemachine v1.0.2 github.com/filecoin-project/go-statemachine v1.0.2
github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-statestore v0.2.0
github.com/filecoin-project/go-storedcounter v0.1.0 github.com/filecoin-project/go-storedcounter v0.1.0
github.com/filecoin-project/index-provider v0.9.1
github.com/filecoin-project/pubsub v1.0.0 github.com/filecoin-project/pubsub v1.0.0
github.com/filecoin-project/specs-actors v0.9.15 github.com/filecoin-project/specs-actors v0.9.15
github.com/filecoin-project/specs-actors/v2 v2.3.6 github.com/filecoin-project/specs-actors/v2 v2.3.6
@ -110,6 +109,7 @@ require (
github.com/ipld/go-codec-dagpb v1.5.0 github.com/ipld/go-codec-dagpb v1.5.0
github.com/ipld/go-ipld-prime v0.18.0 github.com/ipld/go-ipld-prime v0.18.0
github.com/ipld/go-ipld-selector-text-lite v0.0.1 github.com/ipld/go-ipld-selector-text-lite v0.0.1
github.com/ipni/index-provider v0.10.0
github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig v1.4.0
github.com/koalacxr/quantile v0.0.1 github.com/koalacxr/quantile v0.0.1
github.com/libp2p/go-buffer-pool v0.1.0 github.com/libp2p/go-buffer-pool v0.1.0
@ -201,7 +201,6 @@ require (
github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect
github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect
github.com/filecoin-project/storetheindex v0.4.30-0.20221114113647-683091f8e893 // indirect
github.com/flynn/noise v1.0.0 // indirect github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect
@ -245,6 +244,7 @@ require (
github.com/ipfs/go-peertaskqueue v0.8.0 // indirect github.com/ipfs/go-peertaskqueue v0.8.0 // indirect
github.com/ipfs/go-verifcid v0.0.2 // indirect github.com/ipfs/go-verifcid v0.0.2 // indirect
github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect
github.com/ipni/storetheindex v0.5.3-0.20221203123030-16745cb63f15 // indirect
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect

12
go.sum
View File

@ -331,8 +331,8 @@ github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88Oq
github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo=
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8=
github.com/filecoin-project/go-fil-markets v1.25.2 h1:kVfgaamTC7dkn8KwS5zRJBNEBSNvVqdG3BCoDaUYuCI= github.com/filecoin-project/go-fil-markets v1.26.0 h1:uNtt1UAxX2C/Q8tlWD00oF2Zma3CVGxhZmBc2ljYhkk=
github.com/filecoin-project/go-fil-markets v1.25.2/go.mod h1:dc2oTPU6GH3Qk1nA+Er+hSX64rg+NVykkPIWFBYxcZU= github.com/filecoin-project/go-fil-markets v1.26.0/go.mod h1:eOIYHfPwyqc64O1HiapvcelfnrTfU7gLQgBf55IYleQ=
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
@ -366,8 +366,6 @@ github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNd
github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo=
github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus=
github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8=
github.com/filecoin-project/index-provider v0.9.1 h1:Jnh9dviIHvQxZ2baNoYu3n8z6F9O62ksnVlyREgPyyM=
github.com/filecoin-project/index-provider v0.9.1/go.mod h1:NlHxQcy2iMGfUoUGUzrRxntcpiC50QSnvp68u2VTT40=
github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM= github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM=
github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg= github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg=
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
@ -388,8 +386,6 @@ github.com/filecoin-project/specs-actors/v7 v7.0.1 h1:w72xCxijK7xs1qzmJiw+WYJaVt
github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk= github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk=
github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y= github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y=
github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA= github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA=
github.com/filecoin-project/storetheindex v0.4.30-0.20221114113647-683091f8e893 h1:6GCuzxLVHBzlz7y+FkbHh6n0UyoEGWqDwJKQPJoz7bE=
github.com/filecoin-project/storetheindex v0.4.30-0.20221114113647-683091f8e893/go.mod h1:S7590oDimBvXMUtzWsBXoshu9HtYKwtXl47zAK9rcP8=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
@ -899,6 +895,10 @@ github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd
github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY=
github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y=
github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM=
github.com/ipni/index-provider v0.10.0 h1:nu8YBxzRopdjwZHsgCUuC4AHpq88VVHJYrbkqUDx7eg=
github.com/ipni/index-provider v0.10.0/go.mod h1:InSXbZp2p/ZhAwiDElG/wzjnA1ea1iJ3hhyiAHrD+Vo=
github.com/ipni/storetheindex v0.5.3-0.20221203123030-16745cb63f15 h1:qJq6QtLk+9nQi3CDBhNfJ1cjZ4pghjCHcQUZ1mWbF0k=
github.com/ipni/storetheindex v0.5.3-0.20221203123030-16745cb63f15/go.mod h1:c/NS640Iu2NrCCIErnUhsUM5KVEyeXymgtNnx6eDwMU=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=

View File

@ -8,12 +8,12 @@ import (
"testing" "testing"
"time" "time"
provider "github.com/ipni/index-provider"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
datatransfer "github.com/filecoin-project/go-data-transfer" datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/shared_testutil" "github.com/filecoin-project/go-fil-markets/shared_testutil"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
provider "github.com/filecoin-project/index-provider"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/itests/kit"

View File

@ -38,7 +38,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
kit.QuietMiningLogs() kit.QuietMiningLogs()
ctx := context.Background() ctx := context.Background()
blockTime := 5 * time.Millisecond blockTime := 10 * time.Millisecond
var ( var (
paymentCreator kit.TestFullNode paymentCreator kit.TestFullNode
@ -55,6 +55,15 @@ func TestPaymentChannelsAPI(t *testing.T) {
bms := ens.BeginMiningMustPost(blockTime) bms := ens.BeginMiningMustPost(blockTime)
bm := bms[0] bm := bms[0]
waitRecvInSync := func() {
// paymentCreator is the block miner, in some cases paymentReceiver may fall behind, so we wait for it to catch up
head, err := paymentReceiver.ChainHead(ctx)
require.NoError(t, err)
paymentReceiver.WaitTillChain(ctx, kit.HeightAtLeast(head.Height()))
}
// send some funds to register the receiver // send some funds to register the receiver
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1) receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
require.NoError(t, err) require.NoError(t, err)
@ -74,6 +83,8 @@ func TestPaymentChannelsAPI(t *testing.T) {
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel) channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
require.NoError(t, err) require.NoError(t, err)
waitRecvInSync()
// allocate three lanes // allocate three lanes
var lanes []uint64 var lanes []uint64
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
@ -110,6 +121,8 @@ func TestPaymentChannelsAPI(t *testing.T) {
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle") res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel") require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel")
waitRecvInSync()
creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator))) creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator)))
// wait for the receiver to submit their vouchers // wait for the receiver to submit their vouchers

View File

@ -83,7 +83,7 @@ func TestLotusAccessorFetchUnsealedPiece(t *testing.T) {
dealInfo := piecestore.DealInfo{ dealInfo := piecestore.DealInfo{
SectorID: sectorID, SectorID: sectorID,
} }
err = ps.AddDealForPiece(cid1, dealInfo) err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo)
require.NoError(t, err) require.NoError(t, err)
} }
@ -124,7 +124,7 @@ func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) {
dealInfo := piecestore.DealInfo{ dealInfo := piecestore.DealInfo{
Length: 10, Length: 10,
} }
err = ps.AddDealForPiece(cid1, dealInfo) err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo)
require.NoError(t, err) require.NoError(t, err)
// Check that the data length is correct // Check that the data length is correct
@ -153,7 +153,7 @@ func TestThrottle(t *testing.T) {
SectorID: unsealedSectorID, SectorID: unsealedSectorID,
Length: 10, Length: 10,
} }
err = ps.AddDealForPiece(cid1, dealInfo) err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo)
require.NoError(t, err) require.NoError(t, err)
// hold the lock to block. // hold the lock to block.

View File

@ -4,6 +4,7 @@ import (
"errors" "errors"
"time" "time"
provider "github.com/ipni/index-provider"
"go.uber.org/fx" "go.uber.org/fx"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -12,7 +13,6 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
provider "github.com/filecoin-project/index-provider"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/api/v1api"

View File

@ -18,6 +18,7 @@ import (
graphsync "github.com/ipfs/go-graphsync/impl" graphsync "github.com/ipfs/go-graphsync/impl"
gsnet "github.com/ipfs/go-graphsync/network" gsnet "github.com/ipfs/go-graphsync/network"
"github.com/ipfs/go-graphsync/storeutil" "github.com/ipfs/go-graphsync/storeutil"
provider "github.com/ipni/index-provider"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
"go.uber.org/fx" "go.uber.org/fx"
"go.uber.org/multierr" "go.uber.org/multierr"
@ -42,7 +43,6 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-statestore"
provider "github.com/filecoin-project/index-provider"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v0api"

View File

@ -5,14 +5,14 @@ import (
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace" "github.com/ipfs/go-datastore/namespace"
provider "github.com/ipni/index-provider"
"github.com/ipni/index-provider/engine"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
"go.uber.org/fx" "go.uber.org/fx"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
provider "github.com/filecoin-project/index-provider"
"github.com/filecoin-project/index-provider/engine"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/config"

View File

@ -7,6 +7,7 @@ import (
"time" "time"
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
provider "github.com/ipni/index-provider"
"github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
@ -14,7 +15,6 @@ import (
"go.uber.org/fx" "go.uber.org/fx"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
provider "github.com/filecoin-project/index-provider"
"github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules"

View File

@ -94,8 +94,9 @@ func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.Server
fnapi = api.PermissionedFullAPI(fnapi) fnapi = api.PermissionedFullAPI(fnapi)
} }
var v0 v0api.FullNode = &(struct{ v0api.FullNode }{&v0api.WrapperV1Full{FullNode: fnapi}})
serveRpc("/rpc/v1", fnapi) serveRpc("/rpc/v1", fnapi)
serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: fnapi}) serveRpc("/rpc/v0", v0)
// Import handler // Import handler
handleImportFunc := handleImport(a.(*impl.FullNodeAPI)) handleImportFunc := handleImport(a.(*impl.FullNodeAPI))

View File

@ -1,3 +1,6 @@
//go:build !windows
// +build !windows
package fsutil package fsutil
import ( import (

View File

@ -0,0 +1,16 @@
package fsutil
import (
"fmt"
)
type SizeInfo struct {
OnDisk int64
}
// FileSize returns bytes used by a file or directory on disk
// NOTE: We care about the allocated bytes, not file or directory size
// This is not currently supported on Windows, but at least other lotus can components can build on Windows now
func FileSize(path string) (SizeInfo, error) {
return SizeInfo{0}, fmt.Errorf("unsupported")
}