Merge remote-tracking branch 'origin/master' into feat/paych-avail-reuse
This commit is contained in:
commit
e9a6f5f287
@ -390,7 +390,7 @@ jobs:
|
|||||||
|
|
||||||
build-appimage:
|
build-appimage:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-2004:202111-02
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
@ -398,6 +398,16 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: install appimage-builder
|
name: install appimage-builder
|
||||||
command: |
|
command: |
|
||||||
|
# appimage-builder requires /dev/snd to exist. It creates containers during the testing phase
|
||||||
|
# that pass sound devices from the host to the testing container. (hard coded!)
|
||||||
|
# https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54
|
||||||
|
# Circleci doesn't provide a working sound device; this is enough to fake it.
|
||||||
|
if [ ! -e /dev/snd ]
|
||||||
|
then
|
||||||
|
sudo mkdir /dev/snd
|
||||||
|
sudo mknod /dev/snd/ControlC0 c 1 2
|
||||||
|
fi
|
||||||
|
|
||||||
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
||||||
@ -986,10 +996,19 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-appimage:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- publish:
|
- publish:
|
||||||
requires:
|
requires:
|
||||||
- build-all
|
- build-all
|
||||||
- build-macos
|
- build-macos
|
||||||
|
- build-appimage
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
|
@ -390,7 +390,7 @@ jobs:
|
|||||||
|
|
||||||
build-appimage:
|
build-appimage:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-2004:202111-02
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
@ -398,6 +398,16 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: install appimage-builder
|
name: install appimage-builder
|
||||||
command: |
|
command: |
|
||||||
|
# appimage-builder requires /dev/snd to exist. It creates containers during the testing phase
|
||||||
|
# that pass sound devices from the host to the testing container. (hard coded!)
|
||||||
|
# https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54
|
||||||
|
# Circleci doesn't provide a working sound device; this is enough to fake it.
|
||||||
|
if [ ! -e /dev/snd ]
|
||||||
|
then
|
||||||
|
sudo mkdir /dev/snd
|
||||||
|
sudo mknod /dev/snd/ControlC0 c 1 2
|
||||||
|
fi
|
||||||
|
|
||||||
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
||||||
@ -816,10 +826,19 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-appimage:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- publish:
|
- publish:
|
||||||
requires:
|
requires:
|
||||||
- build-all
|
- build-all
|
||||||
- build-macos
|
- build-macos
|
||||||
|
- build-appimage
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
|
@ -49,23 +49,23 @@ AppDir:
|
|||||||
fedora:
|
fedora:
|
||||||
image: appimagecrafters/tests-env:fedora-30
|
image: appimagecrafters/tests-env:fedora-30
|
||||||
command: ./AppRun
|
command: ./AppRun
|
||||||
use_host_x: true
|
use_host_x: false
|
||||||
debian:
|
debian:
|
||||||
image: appimagecrafters/tests-env:debian-stable
|
image: appimagecrafters/tests-env:debian-stable
|
||||||
command: ./AppRun
|
command: ./AppRun
|
||||||
use_host_x: true
|
use_host_x: false
|
||||||
arch:
|
arch:
|
||||||
image: appimagecrafters/tests-env:archlinux-latest
|
image: appimagecrafters/tests-env:archlinux-latest
|
||||||
command: ./AppRun
|
command: ./AppRun
|
||||||
use_host_x: true
|
use_host_x: false
|
||||||
centos:
|
centos:
|
||||||
image: appimagecrafters/tests-env:centos-7
|
image: appimagecrafters/tests-env:centos-7
|
||||||
command: ./AppRun
|
command: ./AppRun
|
||||||
use_host_x: true
|
use_host_x: false
|
||||||
ubuntu:
|
ubuntu:
|
||||||
image: appimagecrafters/tests-env:ubuntu-xenial
|
image: appimagecrafters/tests-env:ubuntu-xenial
|
||||||
command: ./AppRun
|
command: ./AppRun
|
||||||
use_host_x: true
|
use_host_x: false
|
||||||
AppImage:
|
AppImage:
|
||||||
arch: x86_64
|
arch: x86_64
|
||||||
update-information: guess
|
update-information: guess
|
||||||
|
31
CHANGELOG.md
31
CHANGELOG.md
@ -1,5 +1,34 @@
|
|||||||
# Lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
|
# 1.14.2 / 2022-02-24
|
||||||
|
|
||||||
|
This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
|
||||||
|
|
||||||
|
Note that the network is STILL scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to at least Lotus v1.14.0 before that time. Storage providers must update their daemons, miners, and worker(s).
|
||||||
|
|
||||||
|
Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out!
|
||||||
|
|
||||||
|
## Bug Fixes
|
||||||
|
- fix lotus-bench for sealing jobs (#8173)
|
||||||
|
- fix:sealing:really-do-it flag for abort upgrade (#8181)
|
||||||
|
- fix:proving:post check sector handles snap deals replica faults (#8177)
|
||||||
|
- fix: sealing: missing file type (#8180)
|
||||||
|
|
||||||
|
## Others
|
||||||
|
- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong
|
||||||
|
commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore,
|
||||||
|
we want to retract it and users may use v1.14.1&^.
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
| Contributor | Commits | Lines ± | Files Changed |
|
||||||
|
|-------------|---------|---------|---------------|
|
||||||
|
| @zenground0 | 2 | +73/-58 | 12 |
|
||||||
|
| @eben.xie | 1 | +7/-0 | 1 |
|
||||||
|
| @jennijuju | 1 | +4/-0 | 1 |
|
||||||
|
| @jennijuju | 1 | +2/-1 | 1 |
|
||||||
|
| @ribasushi | 1 | +2/-0 | 1 |
|
||||||
|
|
||||||
# 1.14.1 / 2022-02-18
|
# 1.14.1 / 2022-02-18
|
||||||
|
|
||||||
This is an **optional** release of lotus, that fixes the incorrect *comment* of network v15 OhSnap upgrade **date**. Note the actual upgrade epoch in [v1.14.0](https://github.com/filecoin-project/lotus/releases/tag/v1.14.0) was correct.
|
This is an **optional** release of lotus, that fixes the incorrect *comment* of network v15 OhSnap upgrade **date**. Note the actual upgrade epoch in [v1.14.0](https://github.com/filecoin-project/lotus/releases/tag/v1.14.0) was correct.
|
||||||
@ -22,7 +51,7 @@ It is recommended that storage providers download the new params before updating
|
|||||||
- run `./lotus-shed fetch-params` with the appropriate `proving-params` flag
|
- run `./lotus-shed fetch-params` with the appropriate `proving-params` flag
|
||||||
- Upgrade the Lotus daemon and miner **when the previous step is complete**
|
- Upgrade the Lotus daemon and miner **when the previous step is complete**
|
||||||
|
|
||||||
All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (150 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
|
All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (90 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
|
||||||
|
|
||||||
## New Features and Changes
|
## New Features and Changes
|
||||||
- Integrate actor v7-rc1:
|
- Integrate actor v7-rc1:
|
||||||
|
@ -256,7 +256,7 @@ type StorageMiner interface {
|
|||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
|
|
||||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||||
|
|
||||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||||
}
|
}
|
||||||
|
@ -629,7 +629,7 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
||||||
|
|
||||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||||
|
|
||||||
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
|
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
|
||||||
|
|
||||||
@ -3773,14 +3773,14 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres
|
|||||||
return *new(abi.SectorSize), ErrNotSupported
|
return *new(abi.SectorSize), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
||||||
if s.Internal.CheckProvable == nil {
|
if s.Internal.CheckProvable == nil {
|
||||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.CheckProvable(p0, p1, p2, p3)
|
return s.Internal.CheckProvable(p0, p1, p2, p3, p4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
||||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ var (
|
|||||||
FullAPIVersion0 = newVer(1, 5, 0)
|
FullAPIVersion0 = newVer(1, 5, 0)
|
||||||
FullAPIVersion1 = newVer(2, 2, 0)
|
FullAPIVersion1 = newVer(2, 2, 0)
|
||||||
|
|
||||||
MinerAPIVersion0 = newVer(1, 3, 0)
|
MinerAPIVersion0 = newVer(1, 4, 0)
|
||||||
WorkerAPIVersion0 = newVer(1, 5, 0)
|
WorkerAPIVersion0 = newVer(1, 5, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Binary file not shown.
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -62,6 +63,11 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RemoveDataCapProposal = verifreg{{.latestVersion}}.RemoveDataCapProposal
|
||||||
|
type RemoveDataCapRequest = verifreg{{.latestVersion}}.RemoveDataCapRequest
|
||||||
|
type RemoveDataCapParams = verifreg{{.latestVersion}}.RemoveDataCapParams
|
||||||
|
type RmDcProposalID = verifreg{{.latestVersion}}.RmDcProposalID
|
||||||
|
const SignatureDomainSeparation_RemoveDataCap = verifreg{{.latestVersion}}.SignatureDomainSeparation_RemoveDataCap
|
||||||
|
|
||||||
type State interface {
|
type State interface {
|
||||||
cbor.Marshaler
|
cbor.Marshaler
|
||||||
@ -69,6 +75,7 @@ type State interface {
|
|||||||
RootKey() (address.Address, error)
|
RootKey() (address.Address, error)
|
||||||
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
|
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||||
VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
|
VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||||
|
RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error)
|
||||||
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
|
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||||
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
|
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||||
GetState() interface{}
|
GetState() interface{}
|
||||||
|
@ -61,6 +61,10 @@ func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePo
|
|||||||
return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state{{.v}}) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version{{.v}}, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -77,6 +81,11 @@ func (s *state{{.v}}) verifiers() (adt.Map, error) {
|
|||||||
return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
|
return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state{{.v}}) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
{{if le .v 6}}return nil, nil
|
||||||
|
{{else}}return adt{{.v}}.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin{{.v}}.DefaultHamtBitwidth){{end}}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state{{.v}}) GetState() interface{} {
|
func (s *state{{.v}}) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
@ -6,6 +6,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
"github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -50,3 +51,28 @@ func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr
|
|||||||
return cb(a, dcap)
|
return cb(a, dcap)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getRemoveDataCapProposalID(store adt.Store, ver actors.Version, root rootFunc, verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
if verifier.Protocol() != address.ID {
|
||||||
|
return false, 0, xerrors.Errorf("can only look up ID addresses")
|
||||||
|
}
|
||||||
|
if client.Protocol() != address.ID {
|
||||||
|
return false, 0, xerrors.Errorf("can only look up ID addresses")
|
||||||
|
}
|
||||||
|
vh, err := root()
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, xerrors.Errorf("loading verifreg: %w", err)
|
||||||
|
}
|
||||||
|
if vh == nil {
|
||||||
|
return false, 0, xerrors.Errorf("remove data cap proposal hamt not found. you are probably using an incompatible version of actors")
|
||||||
|
}
|
||||||
|
|
||||||
|
var id verifreg.RmDcProposalID
|
||||||
|
if found, err := vh.Get(abi.NewAddrPairKey(verifier, client), &id); err != nil {
|
||||||
|
return false, 0, xerrors.Errorf("looking up addr pair: %w", err)
|
||||||
|
} else if !found {
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, id.ProposalID, nil
|
||||||
|
}
|
||||||
|
@ -53,6 +53,10 @@ func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
|||||||
return getDataCap(s.store, actors.Version0, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version0, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state0) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version0, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version0, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version0, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -69,6 +73,11 @@ func (s *state0) verifiers() (adt.Map, error) {
|
|||||||
return adt0.AsMap(s.store, s.Verifiers)
|
return adt0.AsMap(s.store, s.Verifiers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state0) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state0) GetState() interface{} {
|
func (s *state0) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
||||||
|
@ -53,6 +53,10 @@ func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
|||||||
return getDataCap(s.store, actors.Version2, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version2, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state2) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version2, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version2, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version2, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -69,6 +73,11 @@ func (s *state2) verifiers() (adt.Map, error) {
|
|||||||
return adt2.AsMap(s.store, s.Verifiers)
|
return adt2.AsMap(s.store, s.Verifiers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state2) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state2) GetState() interface{} {
|
func (s *state2) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
||||||
|
@ -54,6 +54,10 @@ func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
|||||||
return getDataCap(s.store, actors.Version3, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version3, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state3) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version3, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version3, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version3, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -70,6 +74,11 @@ func (s *state3) verifiers() (adt.Map, error) {
|
|||||||
return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth)
|
return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state3) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state3) GetState() interface{} {
|
func (s *state3) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
||||||
|
@ -54,6 +54,10 @@ func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
|||||||
return getDataCap(s.store, actors.Version4, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version4, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state4) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version4, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version4, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version4, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -70,6 +74,11 @@ func (s *state4) verifiers() (adt.Map, error) {
|
|||||||
return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth)
|
return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state4) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state4) GetState() interface{} {
|
func (s *state4) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
||||||
|
@ -54,6 +54,10 @@ func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
|||||||
return getDataCap(s.store, actors.Version5, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version5, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state5) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version5, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version5, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version5, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -70,6 +74,11 @@ func (s *state5) verifiers() (adt.Map, error) {
|
|||||||
return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth)
|
return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state5) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state5) GetState() interface{} {
|
func (s *state5) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
||||||
|
@ -54,6 +54,10 @@ func (s *state6) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
|||||||
return getDataCap(s.store, actors.Version6, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version6, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state6) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version6, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version6, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version6, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -70,6 +74,11 @@ func (s *state6) verifiers() (adt.Map, error) {
|
|||||||
return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth)
|
return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state6) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state6) GetState() interface{} {
|
func (s *state6) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
||||||
|
@ -54,6 +54,10 @@ func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
|||||||
return getDataCap(s.store, actors.Version7, s.verifiers, addr)
|
return getDataCap(s.store, actors.Version7, s.verifiers, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state7) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||||
|
return getRemoveDataCapProposalID(s.store, actors.Version7, s.removeDataCapProposalIDs, verifier, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||||
return forEachCap(s.store, actors.Version7, s.verifiers, cb)
|
return forEachCap(s.store, actors.Version7, s.verifiers, cb)
|
||||||
}
|
}
|
||||||
@ -70,6 +74,10 @@ func (s *state7) verifiers() (adt.Map, error) {
|
|||||||
return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth)
|
return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state7) removeDataCapProposalIDs() (adt.Map, error) {
|
||||||
|
return adt7.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin7.DefaultHamtBitwidth)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state7) GetState() interface{} {
|
func (s *state7) GetState() interface{} {
|
||||||
return &s.State
|
return &s.State
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -151,12 +152,20 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
|||||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RemoveDataCapProposal = verifreg7.RemoveDataCapProposal
|
||||||
|
type RemoveDataCapRequest = verifreg7.RemoveDataCapRequest
|
||||||
|
type RemoveDataCapParams = verifreg7.RemoveDataCapParams
|
||||||
|
type RmDcProposalID = verifreg7.RmDcProposalID
|
||||||
|
|
||||||
|
const SignatureDomainSeparation_RemoveDataCap = verifreg7.SignatureDomainSeparation_RemoveDataCap
|
||||||
|
|
||||||
type State interface {
|
type State interface {
|
||||||
cbor.Marshaler
|
cbor.Marshaler
|
||||||
|
|
||||||
RootKey() (address.Address, error)
|
RootKey() (address.Address, error)
|
||||||
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
|
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||||
VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
|
VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||||
|
RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error)
|
||||||
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
|
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||||
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
|
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||||
GetState() interface{}
|
GetState() interface{}
|
||||||
|
@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
|||||||
}
|
}
|
||||||
|
|
||||||
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
|
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
|
||||||
pl := vm.PricelistByEpoch(baseTs.Height())
|
pl := vm.PricelistByEpoch(b.Header.Height)
|
||||||
var sumGasLimit int64
|
var sumGasLimit int64
|
||||||
checkMsg := func(msg types.ChainMsg) error {
|
checkMsg := func(msg types.ChainMsg) error {
|
||||||
m := msg.VMMessage()
|
m := msg.VMMessage()
|
||||||
|
@ -106,7 +106,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
|
|||||||
curTs := mp.curTs
|
curTs := mp.curTs
|
||||||
mp.curTsLk.Unlock()
|
mp.curTsLk.Unlock()
|
||||||
|
|
||||||
epoch := curTs.Height()
|
epoch := curTs.Height() + 1
|
||||||
|
|
||||||
var baseFee big.Int
|
var baseFee big.Int
|
||||||
if len(curTs.Blocks()) > 0 {
|
if len(curTs.Blocks()) > 0 {
|
||||||
|
@ -628,7 +628,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
|
|||||||
// For non local messages, if the message cannot be included in the next 20 blocks it returns
|
// For non local messages, if the message cannot be included in the next 20 blocks it returns
|
||||||
// a (soft) validation error.
|
// a (soft) validation error.
|
||||||
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
|
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
|
||||||
epoch := curTs.Height()
|
epoch := curTs.Height() + 1
|
||||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||||
|
|
||||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
|
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
|
||||||
|
@ -1244,25 +1244,3 @@ func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
|
|||||||
bbr, ok := syncer.bad.Has(blk)
|
bbr, ok := syncer.bad.Has(blk)
|
||||||
return bbr.String(), ok
|
return bbr.String(), ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
|
|
||||||
cur := ts
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
cbe := cur.Blocks()[0].BeaconEntries
|
|
||||||
if len(cbe) > 0 {
|
|
||||||
return &cbe[len(cbe)-1], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if cur.Height() == 0 {
|
|
||||||
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
|
|
||||||
}
|
|
||||||
|
|
||||||
next, err := syncer.store.LoadTipSet(ctx, cur.Parents())
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
|
|
||||||
}
|
|
||||||
cur = next
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
|
|
||||||
}
|
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -240,3 +241,34 @@ func TestSyncManager(t *testing.T) {
|
|||||||
op3.done()
|
op3.done()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSyncManagerBucketSet(t *testing.T) {
|
||||||
|
ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||||
|
ts2 := mock.TipSet(mock.MkBlock(ts1, 1, 0))
|
||||||
|
bucket1 := newSyncTargetBucket(ts1, ts2)
|
||||||
|
bucketSet := syncBucketSet{buckets: []*syncTargetBucket{bucket1}}
|
||||||
|
|
||||||
|
// inserting a tipset (potential sync target) from an existing chain, should add to an existing bucket
|
||||||
|
//stm: @CHAIN_SYNCER_ADD_SYNC_TARGET_001
|
||||||
|
ts3 := mock.TipSet(mock.MkBlock(ts2, 2, 0))
|
||||||
|
bucketSet.Insert(ts3)
|
||||||
|
require.Equal(t, 1, len(bucketSet.buckets))
|
||||||
|
require.Equal(t, 3, len(bucketSet.buckets[0].tips))
|
||||||
|
|
||||||
|
// inserting a tipset from new chain, should create a new bucket
|
||||||
|
ts4fork := mock.TipSet(mock.MkBlock(nil, 1, 1))
|
||||||
|
bucketSet.Insert(ts4fork)
|
||||||
|
require.Equal(t, 2, len(bucketSet.buckets))
|
||||||
|
require.Equal(t, 3, len(bucketSet.buckets[0].tips))
|
||||||
|
require.Equal(t, 1, len(bucketSet.buckets[1].tips))
|
||||||
|
|
||||||
|
// Pop removes the best bucket (best sync target), e.g. bucket1
|
||||||
|
//stm: @CHAIN_SYNCER_SELECT_SYNC_TARGET_001
|
||||||
|
popped := bucketSet.Pop()
|
||||||
|
require.Equal(t, popped, bucket1)
|
||||||
|
require.Equal(t, 1, len(bucketSet.buckets))
|
||||||
|
|
||||||
|
// PopRelated removes the bucket containing the given tipset, leaving the set empty
|
||||||
|
bucketSet.PopRelated(ts4fork)
|
||||||
|
require.Equal(t, 0, len(bucketSet.buckets))
|
||||||
|
}
|
||||||
|
@ -1098,3 +1098,158 @@ func TestInvalidHeight(t *testing.T) {
|
|||||||
|
|
||||||
tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true)
|
tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIncomingBlocks mines new blocks and checks if the incoming channel streams new block headers properly
|
||||||
|
func TestIncomingBlocks(t *testing.T) {
|
||||||
|
H := 50
|
||||||
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
|
client := tu.addClientNode()
|
||||||
|
require.NoError(t, tu.mn.LinkAll())
|
||||||
|
|
||||||
|
clientNode := tu.nds[client]
|
||||||
|
//stm: @CHAIN_SYNCER_INCOMING_BLOCKS_001
|
||||||
|
incoming, err := clientNode.SyncIncomingBlocks(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
tu.connect(client, 0)
|
||||||
|
tu.waitUntilSync(0, client)
|
||||||
|
tu.compareSourceState(client)
|
||||||
|
|
||||||
|
timeout := time.After(10 * time.Second)
|
||||||
|
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
tu.mineNewBlock(0, nil)
|
||||||
|
tu.waitUntilSync(0, client)
|
||||||
|
tu.compareSourceState(client)
|
||||||
|
|
||||||
|
// just in case, so we don't get deadlocked
|
||||||
|
select {
|
||||||
|
case <-incoming:
|
||||||
|
case <-timeout:
|
||||||
|
tu.t.Fatal("TestIncomingBlocks timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSyncManualBadTS tests manually marking and unmarking blocks in the bad TS cache
|
||||||
|
func TestSyncManualBadTS(t *testing.T) {
|
||||||
|
// Test setup:
|
||||||
|
// - source node is fully synced,
|
||||||
|
// - client node is unsynced
|
||||||
|
// - client manually marked source's head and it's parent as bad
|
||||||
|
H := 50
|
||||||
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
|
client := tu.addClientNode()
|
||||||
|
require.NoError(t, tu.mn.LinkAll())
|
||||||
|
|
||||||
|
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
clientHead, err := tu.nds[client].ChainHead(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync in test setup")
|
||||||
|
|
||||||
|
//stm: @CHAIN_SYNCER_MARK_BAD_001
|
||||||
|
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHead.Cids()[0])
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
sourceHeadParent := sourceHead.Parents().Cids()[0]
|
||||||
|
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHeadParent)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
||||||
|
reason, err := tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
|
||||||
|
|
||||||
|
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
|
||||||
|
|
||||||
|
// Assertion 1:
|
||||||
|
// - client shouldn't be synced after timeout, because the source TS is marked bad.
|
||||||
|
// - bad block is the first block that should be synced, 1sec should be enough
|
||||||
|
tu.connect(1, 0)
|
||||||
|
timeout := time.After(1 * time.Second)
|
||||||
|
<-timeout
|
||||||
|
|
||||||
|
clientHead, err = tu.nds[client].ChainHead(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync if source head is bad")
|
||||||
|
|
||||||
|
// Assertion 2:
|
||||||
|
// - after unmarking blocks as bad and reconnecting, source & client should be in sync
|
||||||
|
//stm: @CHAIN_SYNCER_UNMARK_BAD_001
|
||||||
|
err = tu.nds[client].SyncUnmarkBad(tu.ctx, sourceHead.Cids()[0])
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
|
||||||
|
|
||||||
|
err = tu.nds[client].SyncUnmarkAllBad(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
|
||||||
|
|
||||||
|
tu.disconnect(1, 0)
|
||||||
|
tu.connect(1, 0)
|
||||||
|
|
||||||
|
tu.waitUntilSync(0, client)
|
||||||
|
tu.compareSourceState(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestState tests fetching the sync worker state before, during & after the sync
|
||||||
|
func TestSyncState(t *testing.T) {
|
||||||
|
H := 50
|
||||||
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
|
client := tu.addClientNode()
|
||||||
|
require.NoError(t, tu.mn.LinkAll())
|
||||||
|
clientNode := tu.nds[client]
|
||||||
|
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
|
||||||
|
// sync state should be empty before the sync
|
||||||
|
state, err := clientNode.SyncState(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.Equal(tu.t, len(state.ActiveSyncs), 0)
|
||||||
|
|
||||||
|
tu.connect(client, 0)
|
||||||
|
|
||||||
|
// wait until sync starts, or at most `timeout` seconds
|
||||||
|
timeout := time.After(5 * time.Second)
|
||||||
|
activeSyncs := []api.ActiveSync{}
|
||||||
|
|
||||||
|
for len(activeSyncs) == 0 {
|
||||||
|
//stm: @CHAIN_SYNCER_STATE_001
|
||||||
|
state, err = clientNode.SyncState(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
activeSyncs = state.ActiveSyncs
|
||||||
|
|
||||||
|
sleep := time.After(100 * time.Millisecond)
|
||||||
|
select {
|
||||||
|
case <-sleep:
|
||||||
|
case <-timeout:
|
||||||
|
tu.t.Fatal("TestSyncState timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check state during sync
|
||||||
|
require.Equal(tu.t, len(activeSyncs), 1)
|
||||||
|
require.True(tu.t, activeSyncs[0].Target.Equals(sourceHead))
|
||||||
|
|
||||||
|
tu.waitUntilSync(0, client)
|
||||||
|
tu.compareSourceState(client)
|
||||||
|
|
||||||
|
// check state after sync
|
||||||
|
state, err = clientNode.SyncState(tu.ctx)
|
||||||
|
require.NoError(tu.t, err)
|
||||||
|
require.Equal(tu.t, len(state.ActiveSyncs), 1)
|
||||||
|
require.Equal(tu.t, state.ActiveSyncs[0].Stage, api.StageSyncComplete)
|
||||||
|
}
|
||||||
|
73
chain/wallet/multi_test.go
Normal file
73
chain/wallet/multi_test.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
//stm: #unit
|
||||||
|
package wallet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMultiWallet(t *testing.T) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
local, err := NewWallet(NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wallet api.Wallet = MultiWallet{
|
||||||
|
Local: local,
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_MULTI_NEW_ADDRESS_001
|
||||||
|
a1, err := wallet.WalletNew(ctx, types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_MULTI_HAS_001
|
||||||
|
exists, err := wallet.WalletHas(ctx, a1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
t.Fatalf("address doesn't exist in wallet")
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_MULTI_LIST_001
|
||||||
|
addrs, err := wallet.WalletList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// one default address and one newly created
|
||||||
|
if len(addrs) == 2 {
|
||||||
|
t.Fatalf("wrong number of addresses in wallet")
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_MULTI_EXPORT_001
|
||||||
|
keyInfo, err := wallet.WalletExport(ctx, a1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_MULTI_IMPORT_001
|
||||||
|
addr, err := wallet.WalletImport(ctx, keyInfo)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr != a1 {
|
||||||
|
t.Fatalf("imported address doesn't match exported address")
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_DELETE_001
|
||||||
|
err = wallet.WalletDelete(ctx, a1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
105
chain/wallet/wallet_test.go
Normal file
105
chain/wallet/wallet_test.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
//stm: #unit
|
||||||
|
package wallet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWallet(t *testing.T) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
w1, err := NewWallet(NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_NEW_001
|
||||||
|
a1, err := w1.WalletNew(ctx, types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_HAS_001
|
||||||
|
exists, err := w1.WalletHas(ctx, a1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
t.Fatalf("address doesn't exist in wallet")
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := NewWallet(NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.WalletNew(ctx, types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a3, err := w2.WalletNew(ctx, types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_LIST_001
|
||||||
|
addrs, err := w2.WalletList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(addrs) != 2 {
|
||||||
|
t.Fatalf("wrong number of addresses in wallet")
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_DELETE_001
|
||||||
|
err = w2.WalletDelete(ctx, a2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_HAS_001
|
||||||
|
exists, err = w2.WalletHas(ctx, a2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
t.Fatalf("failed to delete wallet address")
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_SET_DEFAULT_001
|
||||||
|
err = w2.SetDefault(a3)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_DEFAULT_ADDRESS_001
|
||||||
|
def, err := w2.GetDefault()
|
||||||
|
if !assert.Equal(t, a3, def) {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_EXPORT_001
|
||||||
|
keyInfo, err := w2.WalletExport(ctx, a3)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @TOKEN_WALLET_IMPORT_001
|
||||||
|
addr, err := w2.WalletImport(ctx, keyInfo)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr != a3 {
|
||||||
|
t.Fatalf("imported address doesn't match exported address")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
112
cli/filplus.go
112
cli/filplus.go
@ -1,7 +1,9 @@
|
|||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
||||||
@ -34,6 +36,7 @@ var filplusCmd = &cli.Command{
|
|||||||
filplusListClientsCmd,
|
filplusListClientsCmd,
|
||||||
filplusCheckClientCmd,
|
filplusCheckClientCmd,
|
||||||
filplusCheckNotaryCmd,
|
filplusCheckNotaryCmd,
|
||||||
|
filplusSignRemoveDataCapProposal,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,3 +277,112 @@ func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address)
|
|||||||
|
|
||||||
return st.VerifierDataCap(vid)
|
return st.VerifierDataCap(vid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var filplusSignRemoveDataCapProposal = &cli.Command{
|
||||||
|
Name: "sign-remove-data-cap-proposal",
|
||||||
|
Usage: "allows a notary to sign a Remove Data Cap Proposal",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.Int64Flag{
|
||||||
|
Name: "id",
|
||||||
|
Usage: "specify the RemoveDataCapProposal ID (will look up on chain if unspecified)",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.Args().Len() != 3 {
|
||||||
|
return fmt.Errorf("must specify three arguments: notary address, client address, and allowance to remove")
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get full node api: %w", err)
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
|
act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get verifreg actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
apibs := blockstore.NewAPIBlockstore(api)
|
||||||
|
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
|
||||||
|
|
||||||
|
st, err := verifreg.Load(store, act)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to load verified registry state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier, err := address.NewFromString(cctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
verifierIdAddr, err := api.StateLookupID(ctx, verifier, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := address.NewFromString(cctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
clientIdAddr, err := api.StateLookupID(ctx, client, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
allowanceToRemove, err := types.BigFromString(cctx.Args().Get(2))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, dataCap, err := st.VerifiedClientDataCap(clientIdAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to find verified client data cap: %w", err)
|
||||||
|
}
|
||||||
|
if dataCap.LessThanEqual(big.Zero()) {
|
||||||
|
return xerrors.Errorf("client data cap %s is less than amount requested to be removed %s", dataCap.String(), allowanceToRemove.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
found, _, err := checkNotary(ctx, api, verifier)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to check notary status: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return xerrors.New("verifier address must be a notary")
|
||||||
|
}
|
||||||
|
|
||||||
|
id := cctx.Uint64("id")
|
||||||
|
if id == 0 {
|
||||||
|
_, id, err = st.RemoveDataCapProposalID(verifierIdAddr, clientIdAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed find remove data cap proposal id: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
params := verifreg.RemoveDataCapProposal{
|
||||||
|
RemovalProposalID: verifreg.RmDcProposalID{ProposalID: id},
|
||||||
|
DataCapAmount: allowanceToRemove,
|
||||||
|
VerifiedClient: clientIdAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
paramBuf := new(bytes.Buffer)
|
||||||
|
paramBuf.WriteString(verifreg.SignatureDomainSeparation_RemoveDataCap)
|
||||||
|
err = params.MarshalCBOR(paramBuf)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to marshall paramBuf: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig, err := api.WalletSign(ctx, verifier, paramBuf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to sign message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sigBytes := append([]byte{byte(sig.Type)}, sig.Data...)
|
||||||
|
|
||||||
|
fmt.Println(hex.EncodeToString(sigBytes))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
61
cli/mpool.go
61
cli/mpool.go
@ -60,6 +60,8 @@ var MpoolPending = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -72,7 +74,7 @@ var MpoolPending = &cli.Command{
|
|||||||
if tos := cctx.String("to"); tos != "" {
|
if tos := cctx.String("to"); tos != "" {
|
||||||
a, err := address.NewFromString(tos)
|
a, err := address.NewFromString(tos)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("given 'to' address %q was invalid: %w", tos, err)
|
return xerrors.Errorf("given 'to' address %q was invalid: %w", tos, err)
|
||||||
}
|
}
|
||||||
toa = a
|
toa = a
|
||||||
}
|
}
|
||||||
@ -80,7 +82,7 @@ var MpoolPending = &cli.Command{
|
|||||||
if froms := cctx.String("from"); froms != "" {
|
if froms := cctx.String("from"); froms != "" {
|
||||||
a, err := address.NewFromString(froms)
|
a, err := address.NewFromString(froms)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("given 'from' address %q was invalid: %w", froms, err)
|
return xerrors.Errorf("given 'from' address %q was invalid: %w", froms, err)
|
||||||
}
|
}
|
||||||
froma = a
|
froma = a
|
||||||
}
|
}
|
||||||
@ -119,13 +121,13 @@ var MpoolPending = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
if cctx.Bool("cids") {
|
if cctx.Bool("cids") {
|
||||||
fmt.Println(msg.Cid())
|
afmt.Println(msg.Cid())
|
||||||
} else {
|
} else {
|
||||||
out, err := json.MarshalIndent(msg, "", " ")
|
out, err := json.MarshalIndent(msg, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Println(string(out))
|
afmt.Println(string(out))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,6 +218,8 @@ var MpoolStat = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -234,6 +238,7 @@ var MpoolStat = &cli.Command{
|
|||||||
currTs := ts
|
currTs := ts
|
||||||
for i := 0; i < cctx.Int("basefee-lookback"); i++ {
|
for i := 0; i < cctx.Int("basefee-lookback"); i++ {
|
||||||
currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
|
currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("walking chain: %w", err)
|
return xerrors.Errorf("walking chain: %w", err)
|
||||||
}
|
}
|
||||||
@ -296,7 +301,7 @@ var MpoolStat = &cli.Command{
|
|||||||
for a, bkt := range buckets {
|
for a, bkt := range buckets {
|
||||||
act, err := api.StateGetActor(ctx, a, ts.Key())
|
act, err := api.StateGetActor(ctx, a, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("%s, err: %s\n", a, err)
|
afmt.Printf("%s, err: %s\n", a, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,11 +355,11 @@ var MpoolStat = &cli.Command{
|
|||||||
total.belowPast += stat.belowPast
|
total.belowPast += stat.belowPast
|
||||||
total.gasLimit = big.Add(total.gasLimit, stat.gasLimit)
|
total.gasLimit = big.Add(total.gasLimit, stat.gasLimit)
|
||||||
|
|
||||||
fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit)
|
afmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("-----")
|
afmt.Println("-----")
|
||||||
fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit)
|
afmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@ -385,8 +390,9 @@ var MpoolReplaceCmd = &cli.Command{
|
|||||||
Usage: "Spend up to X FIL for this message in units of FIL. Previously when flag was `max-fee` units were in attoFIL. Applicable for auto mode",
|
Usage: "Spend up to X FIL for this message in units of FIL. Previously when flag was `max-fee` units were in attoFIL. Applicable for auto mode",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ArgsUsage: "<from nonce> | <message-cid>",
|
ArgsUsage: "<from> <nonce> | <message-cid>",
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -407,13 +413,14 @@ var MpoolReplaceCmd = &cli.Command{
|
|||||||
|
|
||||||
msg, err := api.ChainGetMessage(ctx, mcid)
|
msg, err := api.ChainGetMessage(ctx, mcid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not find referenced message: %w", err)
|
return xerrors.Errorf("could not find referenced message: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
from = msg.From
|
from = msg.From
|
||||||
nonce = msg.Nonce
|
nonce = msg.Nonce
|
||||||
case 2:
|
case 2:
|
||||||
f, err := address.NewFromString(cctx.Args().Get(0))
|
arg0 := cctx.Args().Get(0)
|
||||||
|
f, err := address.NewFromString(arg0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -448,7 +455,7 @@ var MpoolReplaceCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
if found == nil {
|
if found == nil {
|
||||||
return fmt.Errorf("no pending message found from %s with nonce %d", from, nonce)
|
return xerrors.Errorf("no pending message found from %s with nonce %d", from, nonce)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := found.Message
|
msg := found.Message
|
||||||
@ -460,7 +467,7 @@ var MpoolReplaceCmd = &cli.Command{
|
|||||||
if cctx.IsSet("fee-limit") {
|
if cctx.IsSet("fee-limit") {
|
||||||
maxFee, err := types.ParseFIL(cctx.String("fee-limit"))
|
maxFee, err := types.ParseFIL(cctx.String("fee-limit"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing max-spend: %w", err)
|
return xerrors.Errorf("parsing max-spend: %w", err)
|
||||||
}
|
}
|
||||||
mss = &lapi.MessageSendSpec{
|
mss = &lapi.MessageSendSpec{
|
||||||
MaxFee: abi.TokenAmount(maxFee),
|
MaxFee: abi.TokenAmount(maxFee),
|
||||||
@ -472,7 +479,7 @@ var MpoolReplaceCmd = &cli.Command{
|
|||||||
msg.GasPremium = abi.NewTokenAmount(0)
|
msg.GasPremium = abi.NewTokenAmount(0)
|
||||||
retm, err := api.GasEstimateMessageGas(ctx, &msg, mss, types.EmptyTSK)
|
retm, err := api.GasEstimateMessageGas(ctx, &msg, mss, types.EmptyTSK)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to estimate gas values: %w", err)
|
return xerrors.Errorf("failed to estimate gas values: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg.GasPremium = big.Max(retm.GasPremium, minRBF)
|
msg.GasPremium = big.Max(retm.GasPremium, minRBF)
|
||||||
@ -489,26 +496,26 @@ var MpoolReplaceCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
|
msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing gas-premium: %w", err)
|
return xerrors.Errorf("parsing gas-premium: %w", err)
|
||||||
}
|
}
|
||||||
// TODO: estimate fee cap here
|
// TODO: estimate fee cap here
|
||||||
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
|
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing gas-feecap: %w", err)
|
return xerrors.Errorf("parsing gas-feecap: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
smsg, err := api.WalletSignMessage(ctx, msg.From, &msg)
|
smsg, err := api.WalletSignMessage(ctx, msg.From, &msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to sign message: %w", err)
|
return xerrors.Errorf("failed to sign message: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cid, err := api.MpoolPush(ctx, smsg)
|
cid, err := api.MpoolPush(ctx, smsg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to push new message to mempool: %w", err)
|
return xerrors.Errorf("failed to push new message to mempool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("new message cid: ", cid)
|
afmt.Println("new message cid: ", cid)
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -531,6 +538,8 @@ var MpoolFindCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -548,7 +557,7 @@ var MpoolFindCmd = &cli.Command{
|
|||||||
if cctx.IsSet("to") {
|
if cctx.IsSet("to") {
|
||||||
a, err := address.NewFromString(cctx.String("to"))
|
a, err := address.NewFromString(cctx.String("to"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("'to' address was invalid: %w", err)
|
return xerrors.Errorf("'to' address was invalid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
toFilter = a
|
toFilter = a
|
||||||
@ -557,7 +566,7 @@ var MpoolFindCmd = &cli.Command{
|
|||||||
if cctx.IsSet("from") {
|
if cctx.IsSet("from") {
|
||||||
a, err := address.NewFromString(cctx.String("from"))
|
a, err := address.NewFromString(cctx.String("from"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("'from' address was invalid: %w", err)
|
return xerrors.Errorf("'from' address was invalid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fromFilter = a
|
fromFilter = a
|
||||||
@ -591,7 +600,7 @@ var MpoolFindCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(string(b))
|
afmt.Println(string(b))
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -605,6 +614,8 @@ var MpoolConfig = &cli.Command{
|
|||||||
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
|
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -624,7 +635,7 @@ var MpoolConfig = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(string(bytes))
|
afmt.Println(string(bytes))
|
||||||
} else {
|
} else {
|
||||||
cfg := new(types.MpoolConfig)
|
cfg := new(types.MpoolConfig)
|
||||||
bytes := []byte(cctx.Args().Get(0))
|
bytes := []byte(cctx.Args().Get(0))
|
||||||
@ -651,6 +662,8 @@ var MpoolGasPerfCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -717,7 +730,7 @@ var MpoolGasPerfCmd = &cli.Command{
|
|||||||
gasReward := getGasReward(m)
|
gasReward := getGasReward(m)
|
||||||
gasPerf := getGasPerf(gasReward, m.Message.GasLimit)
|
gasPerf := getGasPerf(gasReward, m.Message.GasLimit)
|
||||||
|
|
||||||
fmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf)
|
afmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
582
cli/mpool_test.go
Normal file
582
cli/mpool_test.go
Normal file
@ -0,0 +1,582 @@
|
|||||||
|
//stm: #cli
|
||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStat(t *testing.T) {
|
||||||
|
|
||||||
|
t.Run("local", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// add blocks to the chain
|
||||||
|
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||||
|
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
// mock actor to return for the sender
|
||||||
|
actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)}
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||||
|
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil),
|
||||||
|
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr, toAddr}, nil),
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_STAT_002
|
||||||
|
err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1", "--local"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), "Nonce past: 1")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("all", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// add blocks to the chain
|
||||||
|
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||||
|
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
// mock actor to return for the sender
|
||||||
|
actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)}
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||||
|
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil),
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_STAT_001
|
||||||
|
err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), "Nonce past: 1")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPending(t *testing.T) {
|
||||||
|
t.Run("all", func(t *testing.T) {
|
||||||
|
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_PENDING_001
|
||||||
|
err = app.Run([]string{"mpool", "pending", "--cids"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("local", func(t *testing.T) {
|
||||||
|
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil),
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_PENDING_002
|
||||||
|
err = app.Run([]string{"mpool", "pending", "--local"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("to", func(t *testing.T) {
|
||||||
|
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_PENDING_003
|
||||||
|
err = app.Run([]string{"mpool", "pending", "--to", sm.Message.To.String()})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("from", func(t *testing.T) {
|
||||||
|
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_PENDING_004
|
||||||
|
err = app.Run([]string{"mpool", "pending", "--from", sm.Message.From.String()})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplace(t *testing.T) {
|
||||||
|
t.Run("manual", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil),
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, &sm.Message).Return(sm, nil),
|
||||||
|
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_REPLACE_002
|
||||||
|
err = app.Run([]string{"mpool", "replace", "--gas-premium", "1", "--gas-feecap", "100", sm.Cid().String()})
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("auto", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
// gas fee param should be equal to the one passed in the cli invocation (used below)
|
||||||
|
maxFee := "1000000"
|
||||||
|
parsedFee, err := types.ParseFIL(maxFee)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)}
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil),
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
// use gomock.any to match the message in expected api calls
|
||||||
|
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
|
||||||
|
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
|
||||||
|
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil),
|
||||||
|
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_REPLACE_002
|
||||||
|
err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Cid().String()})
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("sender / nonce", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
// gas fee param should be equal to the one passed in the cli invocation (used below)
|
||||||
|
maxFee := "1000000"
|
||||||
|
parsedFee, err := types.ParseFIL(maxFee)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)}
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
// use gomock.any to match the message in expected api calls
|
||||||
|
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
|
||||||
|
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
|
||||||
|
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil),
|
||||||
|
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_REPLACE_001
|
||||||
|
err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Message.From.String(), fmt.Sprint(sm.Message.Nonce)})
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindMsg(t *testing.T) {
|
||||||
|
t.Run("from", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_FIND_001
|
||||||
|
err = app.Run([]string{"mpool", "find", "--from", sm.Message.From.String()})
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("to", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_FIND_002
|
||||||
|
err = app.Run([]string{"mpool", "find", "--to", sm.Message.To.String()})
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("method", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_FIND_003
|
||||||
|
err = app.Run([]string{"mpool", "find", "--method", sm.Message.Method.String()})
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGasPerf(t *testing.T) {
|
||||||
|
t.Run("all", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// add blocks to the chain
|
||||||
|
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||||
|
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 13, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_GAS_PERF_002
|
||||||
|
err = app.Run([]string{"mpool", "gas-perf", "--all", "true"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), sm.Message.From.String())
|
||||||
|
assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("local", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// add blocks to the chain
|
||||||
|
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||||
|
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||||
|
|
||||||
|
// create a signed message to be returned as a pending message
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sm := mock.MkMessage(senderAddr, toAddr, 13, w)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||||
|
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil),
|
||||||
|
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_GAS_PERF_001
|
||||||
|
err = app.Run([]string{"mpool", "gas-perf"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), sm.Message.From.String())
|
||||||
|
assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfig(t *testing.T) {
|
||||||
|
t.Run("get", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 1234567, SizeLimitLow: 6, ReplaceByFeeRatio: 0.25}
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolGetConfig(ctx).Return(mpoolCfg, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_CONFIG_001
|
||||||
|
err = app.Run([]string{"mpool", "config"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), mpoolCfg.PriorityAddrs[0].String())
|
||||||
|
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitHigh))
|
||||||
|
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitLow))
|
||||||
|
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.ReplaceByFeeRatio))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("set", func(t *testing.T) {
|
||||||
|
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 234567, SizeLimitLow: 3, ReplaceByFeeRatio: 0.33}
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().MpoolSetConfig(ctx, mpoolCfg).Return(nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
bytes, err := json.Marshal(mpoolCfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @CLI_MEMPOOL_CONFIG_002
|
||||||
|
err = app.Run([]string{"mpool", "config", string(bytes)})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
28
cli/sync.go
28
cli/sync.go
@ -33,6 +33,8 @@ var SyncStatusCmd = &cli.Command{
|
|||||||
Name: "status",
|
Name: "status",
|
||||||
Usage: "check sync status",
|
Usage: "check sync status",
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
apic, closer, err := GetFullNodeAPI(cctx)
|
apic, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -45,9 +47,9 @@ var SyncStatusCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("sync status:")
|
afmt.Println("sync status:")
|
||||||
for _, ss := range state.ActiveSyncs {
|
for _, ss := range state.ActiveSyncs {
|
||||||
fmt.Printf("worker %d:\n", ss.WorkerID)
|
afmt.Printf("worker %d:\n", ss.WorkerID)
|
||||||
var base, target []cid.Cid
|
var base, target []cid.Cid
|
||||||
var heightDiff int64
|
var heightDiff int64
|
||||||
var theight abi.ChainEpoch
|
var theight abi.ChainEpoch
|
||||||
@ -62,20 +64,20 @@ var SyncStatusCmd = &cli.Command{
|
|||||||
} else {
|
} else {
|
||||||
heightDiff = 0
|
heightDiff = 0
|
||||||
}
|
}
|
||||||
fmt.Printf("\tBase:\t%s\n", base)
|
afmt.Printf("\tBase:\t%s\n", base)
|
||||||
fmt.Printf("\tTarget:\t%s (%d)\n", target, theight)
|
afmt.Printf("\tTarget:\t%s (%d)\n", target, theight)
|
||||||
fmt.Printf("\tHeight diff:\t%d\n", heightDiff)
|
afmt.Printf("\tHeight diff:\t%d\n", heightDiff)
|
||||||
fmt.Printf("\tStage: %s\n", ss.Stage)
|
afmt.Printf("\tStage: %s\n", ss.Stage)
|
||||||
fmt.Printf("\tHeight: %d\n", ss.Height)
|
afmt.Printf("\tHeight: %d\n", ss.Height)
|
||||||
if ss.End.IsZero() {
|
if ss.End.IsZero() {
|
||||||
if !ss.Start.IsZero() {
|
if !ss.Start.IsZero() {
|
||||||
fmt.Printf("\tElapsed: %s\n", time.Since(ss.Start))
|
afmt.Printf("\tElapsed: %s\n", time.Since(ss.Start))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start))
|
afmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start))
|
||||||
}
|
}
|
||||||
if ss.Stage == api.StageSyncErrored {
|
if ss.Stage == api.StageSyncErrored {
|
||||||
fmt.Printf("\tError: %s\n", ss.Message)
|
afmt.Printf("\tError: %s\n", ss.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -168,6 +170,8 @@ var SyncCheckBadCmd = &cli.Command{
|
|||||||
Usage: "check if the given block was marked bad, and for what reason",
|
Usage: "check if the given block was marked bad, and for what reason",
|
||||||
ArgsUsage: "[blockCid]",
|
ArgsUsage: "[blockCid]",
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
afmt := NewAppFmt(cctx.App)
|
||||||
|
|
||||||
napi, closer, err := GetFullNodeAPI(cctx)
|
napi, closer, err := GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -190,11 +194,11 @@ var SyncCheckBadCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
if reason == "" {
|
if reason == "" {
|
||||||
fmt.Println("block was not marked as bad")
|
afmt.Println("block was not marked as bad")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(reason)
|
afmt.Println(reason)
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
189
cli/sync_test.go
Normal file
189
cli/sync_test.go
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSyncStatus(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncStatusCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||||
|
ts2 := mock.TipSet(mock.MkBlock(ts1, 0, 0))
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
end := start.Add(time.Minute)
|
||||||
|
|
||||||
|
state := &api.SyncState{
|
||||||
|
ActiveSyncs: []api.ActiveSync{{
|
||||||
|
WorkerID: 1,
|
||||||
|
Base: ts1,
|
||||||
|
Target: ts2,
|
||||||
|
Stage: api.StageMessages,
|
||||||
|
Height: abi.ChainEpoch(0),
|
||||||
|
Start: start,
|
||||||
|
End: end,
|
||||||
|
Message: "whatever",
|
||||||
|
}},
|
||||||
|
VMApplied: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
mockApi.EXPECT().SyncState(ctx).Return(state, nil)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_STATUS_001
|
||||||
|
err := app.Run([]string{"sync", "status"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
out := buf.String()
|
||||||
|
|
||||||
|
// output is plaintext, had to do string matching
|
||||||
|
assert.Contains(t, out, fmt.Sprintf("Base:\t[%s]", ts1.Blocks()[0].Cid().String()))
|
||||||
|
assert.Contains(t, out, fmt.Sprintf("Target:\t[%s]", ts2.Blocks()[0].Cid().String()))
|
||||||
|
assert.Contains(t, out, "Height diff:\t1")
|
||||||
|
assert.Contains(t, out, "Stage: message sync")
|
||||||
|
assert.Contains(t, out, "Height: 0")
|
||||||
|
assert.Contains(t, out, "Elapsed: 1m0s")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncMarkBad(t *testing.T) {
|
||||||
|
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncMarkBadCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
blk := mock.MkBlock(nil, 0, 0)
|
||||||
|
|
||||||
|
mockApi.EXPECT().SyncMarkBad(ctx, blk.Cid()).Return(nil)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_MARK_BAD_001
|
||||||
|
err := app.Run([]string{"sync", "mark-bad", blk.Cid().String()})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncUnmarkBad(t *testing.T) {
|
||||||
|
t.Run("one-block", func(t *testing.T) {
|
||||||
|
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
blk := mock.MkBlock(nil, 0, 0)
|
||||||
|
|
||||||
|
mockApi.EXPECT().SyncUnmarkBad(ctx, blk.Cid()).Return(nil)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_UNMARK_BAD_001
|
||||||
|
err := app.Run([]string{"sync", "unmark-bad", blk.Cid().String()})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("all", func(t *testing.T) {
|
||||||
|
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
mockApi.EXPECT().SyncUnmarkAllBad(ctx).Return(nil)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_UNMARK_BAD_002
|
||||||
|
err := app.Run([]string{"sync", "unmark-bad", "-all"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncCheckBad(t *testing.T) {
|
||||||
|
t.Run("not-bad", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
blk := mock.MkBlock(nil, 0, 0)
|
||||||
|
|
||||||
|
mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return("", nil)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_CHECK_BAD_002
|
||||||
|
err := app.Run([]string{"sync", "check-bad", blk.Cid().String()})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), "block was not marked as bad")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("bad", func(t *testing.T) {
|
||||||
|
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
blk := mock.MkBlock(nil, 0, 0)
|
||||||
|
reason := "whatever"
|
||||||
|
|
||||||
|
mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return(reason, nil)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_CHECK_BAD_001
|
||||||
|
err := app.Run([]string{"sync", "check-bad", blk.Cid().String()})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, buf.String(), reason)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncCheckpoint(t *testing.T) {
|
||||||
|
t.Run("tipset", func(t *testing.T) {
|
||||||
|
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
blk := mock.MkBlock(nil, 0, 0)
|
||||||
|
ts := mock.TipSet(blk)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().ChainGetBlock(ctx, blk.Cid()).Return(blk, nil),
|
||||||
|
mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_CHECKPOINT_001
|
||||||
|
err := app.Run([]string{"sync", "checkpoint", blk.Cid().String()})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("epoch", func(t *testing.T) {
|
||||||
|
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd))
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
epoch := abi.ChainEpoch(0)
|
||||||
|
blk := mock.MkBlock(nil, 0, 0)
|
||||||
|
ts := mock.TipSet(blk)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK).Return(ts, nil),
|
||||||
|
mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
//stm: @CLI_SYNC_CHECKPOINT_002
|
||||||
|
err := app.Run([]string{"sync", "checkpoint", fmt.Sprintf("-epoch=%d", epoch)})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
@ -276,6 +276,13 @@ var sealBenchCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to run seals: %w", err)
|
return xerrors.Errorf("failed to run seals: %w", err)
|
||||||
}
|
}
|
||||||
|
for _, s := range extendedSealedSectors {
|
||||||
|
sealedSectors = append(sealedSectors, proof.SectorInfo{
|
||||||
|
SealedCID: s.SealedCID,
|
||||||
|
SectorNumber: s.SectorNumber,
|
||||||
|
SealProof: s.SealProof,
|
||||||
|
})
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not)
|
// TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not)
|
||||||
|
|
||||||
|
@ -437,6 +437,7 @@ var provingCheckProvableCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var tocheck []storage.SectorRef
|
var tocheck []storage.SectorRef
|
||||||
|
var update []bool
|
||||||
for _, info := range sectorInfos {
|
for _, info := range sectorInfos {
|
||||||
si := abi.SectorID{
|
si := abi.SectorID{
|
||||||
Miner: abi.ActorID(mid),
|
Miner: abi.ActorID(mid),
|
||||||
@ -454,9 +455,10 @@ var provingCheckProvableCmd = &cli.Command{
|
|||||||
ProofType: info.SealProof,
|
ProofType: info.SealProof,
|
||||||
ID: si,
|
ID: si,
|
||||||
})
|
})
|
||||||
|
update = append(update, info.SectorKeyCID != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
|
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, update, cctx.Bool("slow"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -39,9 +39,12 @@ func barString(total, y, g float64) string {
|
|||||||
yBars := int(math.Round(y / total * barCols))
|
yBars := int(math.Round(y / total * barCols))
|
||||||
gBars := int(math.Round(g / total * barCols))
|
gBars := int(math.Round(g / total * barCols))
|
||||||
eBars := int(barCols) - yBars - gBars
|
eBars := int(barCols) - yBars - gBars
|
||||||
return color.YellowString(strings.Repeat("|", yBars)) +
|
var barString = color.YellowString(strings.Repeat("|", yBars)) +
|
||||||
color.GreenString(strings.Repeat("|", gBars)) +
|
color.GreenString(strings.Repeat("|", gBars))
|
||||||
strings.Repeat(" ", eBars)
|
if eBars >= 0 {
|
||||||
|
barString += strings.Repeat(" ", eBars)
|
||||||
|
}
|
||||||
|
return barString
|
||||||
}
|
}
|
||||||
|
|
||||||
var sealingWorkersCmd = &cli.Command{
|
var sealingWorkersCmd = &cli.Command{
|
||||||
|
@ -1535,11 +1535,23 @@ var sectorsSnapAbortCmd = &cli.Command{
|
|||||||
Name: "abort-upgrade",
|
Name: "abort-upgrade",
|
||||||
Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before",
|
Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before",
|
||||||
ArgsUsage: "<sectorNum>",
|
ArgsUsage: "<sectorNum>",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "really-do-it",
|
||||||
|
Usage: "pass this flag if you know what you are doing",
|
||||||
|
},
|
||||||
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
if cctx.Args().Len() != 1 {
|
if cctx.Args().Len() != 1 {
|
||||||
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
|
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
really := cctx.Bool("really-do-it")
|
||||||
|
if !really {
|
||||||
|
//nolint:golint
|
||||||
|
return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned")
|
||||||
|
}
|
||||||
|
|
||||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -35,7 +35,7 @@ var diffStateTrees = &cli.Command{
|
|||||||
return xerrors.Errorf("expected two state-tree roots")
|
return xerrors.Errorf("expected two state-tree roots")
|
||||||
}
|
}
|
||||||
|
|
||||||
argA := cctx.Args().Get(1)
|
argA := cctx.Args().Get(0)
|
||||||
rootA, err := cid.Parse(argA)
|
rootA, err := cid.Parse(argA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("first state-tree root (%q) is not a CID: %w", argA, err)
|
return xerrors.Errorf("first state-tree root (%q) is not a CID: %w", argA, err)
|
||||||
|
@ -1,8 +1,13 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -35,6 +40,7 @@ var verifRegCmd = &cli.Command{
|
|||||||
verifRegListClientsCmd,
|
verifRegListClientsCmd,
|
||||||
verifRegCheckClientCmd,
|
verifRegCheckClientCmd,
|
||||||
verifRegCheckVerifierCmd,
|
verifRegCheckVerifierCmd,
|
||||||
|
verifRegRemoveVerifiedClientDataCapCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -409,3 +415,154 @@ var verifRegCheckVerifierCmd = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var verifRegRemoveVerifiedClientDataCapCmd = &cli.Command{
|
||||||
|
Name: "remove-verified-client-data-cap",
|
||||||
|
Usage: "Remove data cap from verified client",
|
||||||
|
ArgsUsage: "<message sender> <client address> <allowance to remove> <verifier 1 address> <verifier 1 signature> <verifier 2 address> <verifier 2 signature>",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.Args().Len() != 7 {
|
||||||
|
return fmt.Errorf("must specify seven arguments: sender, client, allowance to remove, verifier 1 address, verifier 1 signature, verifier 2 address, verifier 2 signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
srv, err := lcli.GetFullNodeServices(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srv.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
api := srv.FullNodeAPI()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
sender, err := address.NewFromString(cctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := address.NewFromString(cctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
allowanceToRemove, err := types.BigFromString(cctx.Args().Get(2))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier1Addr, err := address.NewFromString(cctx.Args().Get(3))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier1Sig, err := hex.DecodeString(cctx.Args().Get(4))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier2Addr, err := address.NewFromString(cctx.Args().Get(5))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier2Sig, err := hex.DecodeString(cctx.Args().Get(6))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var sig1 crypto.Signature
|
||||||
|
if err := sig1.UnmarshalBinary(verifier1Sig); err != nil {
|
||||||
|
return xerrors.Errorf("couldn't unmarshal sig: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sig2 crypto.Signature
|
||||||
|
if err := sig2.UnmarshalBinary(verifier2Sig); err != nil {
|
||||||
|
return xerrors.Errorf("couldn't unmarshal sig: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&verifreg.RemoveDataCapParams{
|
||||||
|
VerifiedClientToRemove: client,
|
||||||
|
DataCapAmountToRemove: allowanceToRemove,
|
||||||
|
VerifierRequest1: verifreg.RemoveDataCapRequest{
|
||||||
|
Verifier: verifier1Addr,
|
||||||
|
VerifierSignature: sig1,
|
||||||
|
},
|
||||||
|
VerifierRequest2: verifreg.RemoveDataCapRequest{
|
||||||
|
Verifier: verifier2Addr,
|
||||||
|
VerifierSignature: sig2,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vrk, err := api.StateVerifiedRegistryRootKey(ctx, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vrkState, err := api.StateGetActor(ctx, vrk, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
apibs := blockstore.NewAPIBlockstore(api)
|
||||||
|
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
|
||||||
|
|
||||||
|
st, err := multisig.Load(store, vrkState)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signers, err := st.Signers()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
senderIsSigner := false
|
||||||
|
senderIdAddr, err := address.IDFromAddress(sender)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, signer := range signers {
|
||||||
|
signerIdAddr, err := address.IDFromAddress(signer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if signerIdAddr == senderIdAddr {
|
||||||
|
senderIsSigner = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !senderIsSigner {
|
||||||
|
return fmt.Errorf("sender must be a vrk signer")
|
||||||
|
}
|
||||||
|
|
||||||
|
proto, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.RemoveVerifiedClientDataCap), params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, _, err := srv.PublishMessage(ctx, proto, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgCid := sm.Cid()
|
||||||
|
|
||||||
|
fmt.Printf("message sent, now waiting on cid: %s\n", msgCid)
|
||||||
|
|
||||||
|
mwait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if mwait.Receipt.ExitCode != 0 {
|
||||||
|
return fmt.Errorf("failed to removed verified data cap: %d", mwait.Receipt.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: Internal msg might still have failed
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
@ -119,9 +119,8 @@ var DaemonCmd = &cli.Command{
|
|||||||
Usage: "halt the process after importing chain from file",
|
Usage: "halt the process after importing chain from file",
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "lite",
|
Name: "lite",
|
||||||
Usage: "start lotus in lite mode",
|
Usage: "start lotus in lite mode",
|
||||||
Hidden: true,
|
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "pprof",
|
Name: "pprof",
|
||||||
|
@ -338,6 +338,9 @@ Inputs:
|
|||||||
"ProofType": 8
|
"ProofType": 8
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
[
|
||||||
|
true
|
||||||
|
],
|
||||||
true
|
true
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
@ -1826,7 +1826,8 @@ USAGE:
|
|||||||
lotus-miner sectors abort-upgrade [command options] <sectorNum>
|
lotus-miner sectors abort-upgrade [command options] <sectorNum>
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--really-do-it pass this flag if you know what you are doing (default: false)
|
||||||
|
--help, -h show help (default: false)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -63,6 +63,7 @@ OPTIONS:
|
|||||||
--import-chain value on first run, load chain from given file or url and validate
|
--import-chain value on first run, load chain from given file or url and validate
|
||||||
--import-snapshot value import chain state from a given chain export file or url
|
--import-snapshot value import chain state from a given chain export file or url
|
||||||
--halt-after-import halt the process after importing chain from file (default: false)
|
--halt-after-import halt the process after importing chain from file (default: false)
|
||||||
|
--lite start lotus in lite mode (default: false)
|
||||||
--pprof value specify name of file for writing cpu profile to
|
--pprof value specify name of file for writing cpu profile to
|
||||||
--profile value specify type of node
|
--profile value specify type of node
|
||||||
--manage-fdlimit manage open file limit (default: true)
|
--manage-fdlimit manage open file limit (default: true)
|
||||||
@ -1234,12 +1235,13 @@ USAGE:
|
|||||||
lotus filplus command [command options] [arguments...]
|
lotus filplus command [command options] [arguments...]
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
grant-datacap give allowance to the specified verified client address
|
grant-datacap give allowance to the specified verified client address
|
||||||
list-notaries list all notaries
|
list-notaries list all notaries
|
||||||
list-clients list all verified clients
|
list-clients list all verified clients
|
||||||
check-client-datacap check verified client remaining bytes
|
check-client-datacap check verified client remaining bytes
|
||||||
check-notary-datacap check a notary's remaining bytes
|
check-notary-datacap check a notary's remaining bytes
|
||||||
help, h Shows a list of commands or help for one command
|
sign-remove-data-cap-proposal allows a notary to sign a Remove Data Cap Proposal
|
||||||
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--help, -h show help (default: false)
|
||||||
@ -1313,6 +1315,20 @@ OPTIONS:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### lotus filplus sign-remove-data-cap-proposal
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus filplus sign-remove-data-cap-proposal - allows a notary to sign a Remove Data Cap Proposal
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus filplus sign-remove-data-cap-proposal [command options] [arguments...]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--id value specify the RemoveDataCapProposal ID (will look up on chain if unspecified) (default: 0)
|
||||||
|
--help, -h show help (default: false)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
## lotus paych
|
## lotus paych
|
||||||
```
|
```
|
||||||
NAME:
|
NAME:
|
||||||
@ -1644,7 +1660,7 @@ NAME:
|
|||||||
lotus mpool replace - replace a message in the mempool
|
lotus mpool replace - replace a message in the mempool
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
lotus mpool replace [command options] <from nonce> | <message-cid>
|
lotus mpool replace [command options] <from> <nonce> | <message-cid>
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--gas-feecap value gas feecap for new message (burn and pay to miner, attoFIL/GasUnit)
|
--gas-feecap value gas feecap for new message (burn and pay to miner, attoFIL/GasUnit)
|
||||||
|
96
extern/sector-storage/faults.go
vendored
96
extern/sector-storage/faults.go
vendored
@ -19,11 +19,11 @@ import (
|
|||||||
|
|
||||||
// FaultTracker TODO: Track things more actively
|
// FaultTracker TODO: Track things more actively
|
||||||
type FaultTracker interface {
|
type FaultTracker interface {
|
||||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error)
|
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckProvable returns unprovable sectors
|
// CheckProvable returns unprovable sectors
|
||||||
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
||||||
var bad = make(map[abi.SectorID]string)
|
var bad = make(map[abi.SectorID]string)
|
||||||
|
|
||||||
ssize, err := pp.SectorSize()
|
ssize, err := pp.SectorSize()
|
||||||
@ -32,72 +32,76 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: More better checks
|
// TODO: More better checks
|
||||||
for _, sector := range sectors {
|
for i, sector := range sectors {
|
||||||
err := func() error {
|
err := func() error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
var fReplica string
|
||||||
|
var fCache string
|
||||||
|
|
||||||
locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
if update[i] {
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !locked {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector)
|
|
||||||
bad[sector.ID] = fmt.Sprint("can't acquire read lock")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
|
||||||
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// temporary hack to make the check work with snapdeals
|
|
||||||
// will go away in https://github.com/filecoin-project/lotus/pull/7971
|
|
||||||
if lp.Sealed == "" || lp.Cache == "" {
|
|
||||||
// maybe it's update
|
|
||||||
lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone)
|
lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
if lockedUpdate {
|
if !lockedUpdate {
|
||||||
lp, _, err = m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock on update replica", "sector", sector)
|
||||||
if err != nil {
|
bad[sector.ID] = fmt.Sprint("can't acquire read lock")
|
||||||
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
return nil
|
||||||
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
lp.Sealed, lp.Cache = lp.Update, lp.UpdateCache
|
|
||||||
}
|
}
|
||||||
|
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("CheckProvable Sector FAULT: acquire sector update replica in checkProvable", "sector", sector, "error", err)
|
||||||
|
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fReplica, fCache = lp.Update, lp.UpdateCache
|
||||||
|
} else {
|
||||||
|
locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !locked {
|
||||||
|
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector)
|
||||||
|
bad[sector.ID] = fmt.Sprint("can't acquire read lock")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
||||||
|
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fReplica, fCache = lp.Sealed, lp.Cache
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if lp.Sealed == "" || lp.Cache == "" {
|
if fReplica == "" || fCache == "" {
|
||||||
log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache)
|
log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", fReplica, "cache", fCache)
|
||||||
bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed)
|
bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", fCache, fReplica)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
toCheck := map[string]int64{
|
toCheck := map[string]int64{
|
||||||
lp.Sealed: 1,
|
fReplica: 1,
|
||||||
filepath.Join(lp.Cache, "p_aux"): 0,
|
filepath.Join(fCache, "p_aux"): 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
addCachePathsForSectorSize(toCheck, lp.Cache, ssize)
|
addCachePathsForSectorSize(toCheck, fCache, ssize)
|
||||||
|
|
||||||
for p, sz := range toCheck {
|
for p, sz := range toCheck {
|
||||||
st, err := os.Stat(p)
|
st, err := os.Stat(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err)
|
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "err", err)
|
||||||
bad[sector.ID] = fmt.Sprintf("%s", err)
|
bad[sector.ID] = fmt.Sprintf("%s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if sz != 0 {
|
if sz != 0 {
|
||||||
if st.Size() != int64(ssize)*sz {
|
if st.Size() != int64(ssize)*sz {
|
||||||
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
|
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
|
||||||
bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz)
|
bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -118,14 +122,14 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
|||||||
sector.ID.Number,
|
sector.ID.Number,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
|
log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
|
||||||
bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err)
|
bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
commr, err := rg(ctx, sector.ID)
|
commr, err := rg(ctx, sector.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
|
log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
|
||||||
bad[sector.ID] = fmt.Sprintf("getting commR: %s", err)
|
bad[sector.ID] = fmt.Sprintf("getting commR: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -136,12 +140,12 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
|||||||
SectorNumber: sector.ID.Number,
|
SectorNumber: sector.ID.Number,
|
||||||
SealedCID: commr,
|
SealedCID: commr,
|
||||||
},
|
},
|
||||||
CacheDirPath: lp.Cache,
|
CacheDirPath: fCache,
|
||||||
PoStProofType: wpp,
|
PoStProofType: wpp,
|
||||||
SealedSectorPath: lp.Sealed,
|
SealedSectorPath: fReplica,
|
||||||
}, ch.Challenges[sector.ID.Number])
|
}, ch.Challenges[sector.ID.Number])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
|
log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
|
||||||
bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err)
|
bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
2
extern/sector-storage/manager.go
vendored
2
extern/sector-storage/manager.go
vendored
@ -763,7 +763,7 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error {
|
|||||||
func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) {
|
func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
log.Errorf("manager is doing replica update")
|
log.Debugf("manager is doing replica update")
|
||||||
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces)
|
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err)
|
return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err)
|
||||||
|
2
extern/sector-storage/mock/mock.go
vendored
2
extern/sector-storage/mock/mock.go
vendored
@ -505,7 +505,7 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
||||||
bad := map[abi.SectorID]string{}
|
bad := map[abi.SectorID]string{}
|
||||||
|
|
||||||
for _, sid := range ids {
|
for _, sid := range ids {
|
||||||
|
4
extern/sector-storage/stores/http_handler.go
vendored
4
extern/sector-storage/stores/http_handler.go
vendored
@ -294,6 +294,10 @@ func ftFromString(t string) (storiface.SectorFileType, error) {
|
|||||||
return storiface.FTSealed, nil
|
return storiface.FTSealed, nil
|
||||||
case storiface.FTCache.String():
|
case storiface.FTCache.String():
|
||||||
return storiface.FTCache, nil
|
return storiface.FTCache, nil
|
||||||
|
case storiface.FTUpdate.String():
|
||||||
|
return storiface.FTUpdate, nil
|
||||||
|
case storiface.FTUpdateCache.String():
|
||||||
|
return storiface.FTUpdateCache, nil
|
||||||
default:
|
default:
|
||||||
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
|
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,7 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec
|
|||||||
log.Errorf("no good address to send replica update message from: %+v", err)
|
log.Errorf("no good address to send replica update message from: %+v", err)
|
||||||
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
|
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
|
||||||
}
|
}
|
||||||
mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, big.Zero(), big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
|
mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err)
|
log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err)
|
||||||
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
|
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
|
||||||
|
7
go.mod
7
go.mod
@ -52,7 +52,7 @@ require (
|
|||||||
github.com/filecoin-project/specs-actors/v4 v4.0.1
|
github.com/filecoin-project/specs-actors/v4 v4.0.1
|
||||||
github.com/filecoin-project/specs-actors/v5 v5.0.4
|
github.com/filecoin-project/specs-actors/v5 v5.0.4
|
||||||
github.com/filecoin-project/specs-actors/v6 v6.0.1
|
github.com/filecoin-project/specs-actors/v6 v6.0.1
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1
|
github.com/filecoin-project/specs-actors/v7 v7.0.0
|
||||||
github.com/filecoin-project/specs-storage v0.2.0
|
github.com/filecoin-project/specs-storage v0.2.0
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.5
|
github.com/filecoin-project/test-vectors/schema v0.0.5
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
@ -100,6 +100,7 @@ require (
|
|||||||
github.com/ipfs/go-metrics-prometheus v0.0.2
|
github.com/ipfs/go-metrics-prometheus v0.0.2
|
||||||
github.com/ipfs/go-path v0.0.7
|
github.com/ipfs/go-path v0.0.7
|
||||||
github.com/ipfs/go-unixfs v0.3.1
|
github.com/ipfs/go-unixfs v0.3.1
|
||||||
|
github.com/ipfs/go-unixfsnode v1.2.0
|
||||||
github.com/ipfs/interface-go-ipfs-core v0.4.0
|
github.com/ipfs/interface-go-ipfs-core v0.4.0
|
||||||
github.com/ipld/go-car v0.3.3
|
github.com/ipld/go-car v0.3.3
|
||||||
github.com/ipld/go-car/v2 v2.1.1
|
github.com/ipld/go-car/v2 v2.1.1
|
||||||
@ -110,7 +111,7 @@ require (
|
|||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/libp2p/go-buffer-pool v0.0.2
|
github.com/libp2p/go-buffer-pool v0.0.2
|
||||||
github.com/libp2p/go-eventbus v0.2.1
|
github.com/libp2p/go-eventbus v0.2.1
|
||||||
github.com/libp2p/go-libp2p v0.18.0-rc4
|
github.com/libp2p/go-libp2p v0.18.0-rc5
|
||||||
github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect
|
github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect
|
||||||
github.com/libp2p/go-libp2p-core v0.14.0
|
github.com/libp2p/go-libp2p-core v0.14.0
|
||||||
github.com/libp2p/go-libp2p-discovery v0.6.0
|
github.com/libp2p/go-libp2p-discovery v0.6.0
|
||||||
@ -122,7 +123,7 @@ require (
|
|||||||
github.com/libp2p/go-libp2p-record v0.1.3
|
github.com/libp2p/go-libp2p-record v0.1.3
|
||||||
github.com/libp2p/go-libp2p-resource-manager v0.1.4
|
github.com/libp2p/go-libp2p-resource-manager v0.1.4
|
||||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
||||||
github.com/libp2p/go-libp2p-swarm v0.10.1
|
github.com/libp2p/go-libp2p-swarm v0.10.2
|
||||||
github.com/libp2p/go-libp2p-tls v0.3.1
|
github.com/libp2p/go-libp2p-tls v0.3.1
|
||||||
github.com/libp2p/go-libp2p-yamux v0.8.2
|
github.com/libp2p/go-libp2p-yamux v0.8.2
|
||||||
github.com/libp2p/go-maddr-filter v0.1.0
|
github.com/libp2p/go-maddr-filter v0.1.0
|
||||||
|
15
go.sum
15
go.sum
@ -380,8 +380,8 @@ github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3
|
|||||||
github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk=
|
github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk=
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
|
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
|
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 h1:FuDaXIbcw2hRsFI8SDTmsGGCE+NumpF6aiBoU/2X5W4=
|
github.com/filecoin-project/specs-actors/v7 v7.0.0 h1:FQN7tjt3o68hfb3qLFSJBoLMuOFY0REkFVLO/zXj8RU=
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
|
github.com/filecoin-project/specs-actors/v7 v7.0.0/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
|
||||||
github.com/filecoin-project/specs-storage v0.2.0 h1:Y4UDv0apRQ3zI2GiPPubi8JblpUZZphEdaJUxCutfyg=
|
github.com/filecoin-project/specs-storage v0.2.0 h1:Y4UDv0apRQ3zI2GiPPubi8JblpUZZphEdaJUxCutfyg=
|
||||||
github.com/filecoin-project/specs-storage v0.2.0/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
|
github.com/filecoin-project/specs-storage v0.2.0/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||||
@ -995,8 +995,8 @@ github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m
|
|||||||
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
|
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
|
||||||
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
|
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
|
||||||
github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8=
|
github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8=
|
||||||
github.com/libp2p/go-libp2p v0.18.0-rc4 h1:OUsSbeu7q+Ck/bV9wHDxFzb08ORqBupHhpCmRBhWrJ8=
|
github.com/libp2p/go-libp2p v0.18.0-rc5 h1:88wWDHb9nNo0vBNCupLde3OTnFAkugOCNkrDfl3ivK4=
|
||||||
github.com/libp2p/go-libp2p v0.18.0-rc4/go.mod h1:wzmsk1ioOq9FGQys2BN5BIw4nugP6+R+CyW3JbPEbbs=
|
github.com/libp2p/go-libp2p v0.18.0-rc5/go.mod h1:aZPS5l84bDvCvP4jkyEUT/J6YOpUq33Fgqrs3K59mpI=
|
||||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
|
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
|
||||||
github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E=
|
github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E=
|
||||||
github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I=
|
github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I=
|
||||||
@ -1182,8 +1182,8 @@ github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkR
|
|||||||
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
|
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
|
||||||
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
|
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
|
||||||
github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA=
|
github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA=
|
||||||
github.com/libp2p/go-libp2p-swarm v0.10.1 h1:lXW3pgGt+BVmkzcFX61erX7l6Lt+WAamNhwa2Kf3eJM=
|
github.com/libp2p/go-libp2p-swarm v0.10.2 h1:UaXf+CTq6Ns1N2V1EgqJ9Q3xaRsiN7ImVlDMpirMAWw=
|
||||||
github.com/libp2p/go-libp2p-swarm v0.10.1/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs=
|
github.com/libp2p/go-libp2p-swarm v0.10.2/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs=
|
||||||
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||||
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||||
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||||
@ -1299,8 +1299,9 @@ github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyP
|
|||||||
github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
|
github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
|
||||||
github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM=
|
github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM=
|
||||||
github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI=
|
github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI=
|
||||||
github.com/libp2p/go-tcp-transport v0.5.0 h1:3ZPW8HAuyRAuFzyabE0hSrCXKKSWzROnZZX7DtcIatY=
|
|
||||||
github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y=
|
github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y=
|
||||||
|
github.com/libp2p/go-tcp-transport v0.5.1 h1:edOOs688VLZAozWC7Kj5/6HHXKNwi9M6wgRmmLa8M6Q=
|
||||||
|
github.com/libp2p/go-tcp-transport v0.5.1/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y=
|
||||||
github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I=
|
github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I=
|
||||||
github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc=
|
github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc=
|
||||||
github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU=
|
github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU=
|
||||||
|
2
lotuspond/front/package-lock.json
generated
2
lotuspond/front/package-lock.json
generated
@ -6630,7 +6630,7 @@
|
|||||||
"resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz",
|
"resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz",
|
||||||
"integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=",
|
"integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=",
|
||||||
"requires": {
|
"requires": {
|
||||||
"node-fetch": "^1.0.1",
|
"node-fetch": "^2.6.7",
|
||||||
"whatwg-fetch": ">=0.10.0"
|
"whatwg-fetch": ">=0.10.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
mdagipld "github.com/ipfs/go-ipld-format"
|
mdagipld "github.com/ipfs/go-ipld-format"
|
||||||
|
"github.com/ipfs/go-unixfsnode"
|
||||||
dagpb "github.com/ipld/go-codec-dagpb"
|
dagpb "github.com/ipld/go-codec-dagpb"
|
||||||
"github.com/ipld/go-ipld-prime"
|
"github.com/ipld/go-ipld-prime"
|
||||||
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
|
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
|
||||||
@ -62,6 +63,7 @@ func TraverseDag(
|
|||||||
|
|
||||||
return bytes.NewBuffer(node.RawData()), nil
|
return bytes.NewBuffer(node.RawData()), nil
|
||||||
}
|
}
|
||||||
|
unixfsnode.AddUnixFSReificationToLinkSystem(&linkSystem)
|
||||||
|
|
||||||
// this is how we pull the start node out of the DS
|
// this is how we pull the start node out of the DS
|
||||||
startLink := cidlink.Link{Cid: startFrom}
|
startLink := cidlink.Link{Cid: startFrom}
|
||||||
|
@ -1272,7 +1272,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet
|
|||||||
//
|
//
|
||||||
// IF/WHEN this changes in the future we will have to be able to calculate
|
// IF/WHEN this changes in the future we will have to be able to calculate
|
||||||
// "old style" commP, and thus will need to introduce a version switch or similar
|
// "old style" commP, and thus will need to introduce a version switch or similar
|
||||||
arbitraryProofType := abi.RegisteredSealProof_StackedDrg32GiBV1_1
|
arbitraryProofType := abi.RegisteredSealProof_StackedDrg64GiBV1_1
|
||||||
|
|
||||||
rdr, err := os.Open(inpath)
|
rdr, err := os.Open(inpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1127,7 +1127,7 @@ func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error
|
|||||||
return backup(ctx, sm.DS, fpath)
|
return backup(ctx, sm.DS, fpath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) {
|
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) {
|
||||||
var rg storiface.RGetter
|
var rg storiface.RGetter
|
||||||
if expensive {
|
if expensive {
|
||||||
rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) {
|
rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) {
|
||||||
@ -1143,7 +1143,7 @@ func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredP
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, rg)
|
bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, update, rg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -206,6 +206,7 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
|
|||||||
|
|
||||||
sectors := make(map[abi.SectorNumber]struct{})
|
sectors := make(map[abi.SectorNumber]struct{})
|
||||||
var tocheck []storage.SectorRef
|
var tocheck []storage.SectorRef
|
||||||
|
var update []bool
|
||||||
for _, info := range sectorInfos {
|
for _, info := range sectorInfos {
|
||||||
sectors[info.SectorNumber] = struct{}{}
|
sectors[info.SectorNumber] = struct{}{}
|
||||||
tocheck = append(tocheck, storage.SectorRef{
|
tocheck = append(tocheck, storage.SectorRef{
|
||||||
@ -215,9 +216,10 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
|
|||||||
Number: info.SectorNumber,
|
Number: info.SectorNumber,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
update = append(update, info.SectorKeyCID != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, nil)
|
bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, update, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
|
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,7 @@ func (m mockVerif) GenerateWinningPoStSectorChallenge(context.Context, abi.Regis
|
|||||||
type mockFaultTracker struct {
|
type mockFaultTracker struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
||||||
// Returns "bad" sectors so just return empty map meaning all sectors are good
|
// Returns "bad" sectors so just return empty map meaning all sectors are good
|
||||||
return map[abi.SectorID]string{}, nil
|
return map[abi.SectorID]string{}, nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user