Merge pull request #8457 from filecoin-project/release/v1.15.1

build: release: v1.15.1
This commit is contained in:
Jiaying Wang 2022-04-07 18:14:15 -04:00 committed by GitHub
commit 731da455d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
220 changed files with 13198 additions and 1571 deletions

View File

@ -2,7 +2,6 @@ version: 2.1
orbs: orbs:
go: gotest/tools@0.0.13 go: gotest/tools@0.0.13
aws-cli: circleci/aws-cli@1.3.2 aws-cli: circleci/aws-cli@1.3.2
packer: salaxander/packer@0.0.3
executors: executors:
golang: golang:
@ -12,6 +11,16 @@ executors:
ubuntu: ubuntu:
docker: docker:
- image: ubuntu:20.04 - image: ubuntu:20.04
packer:
description: |
The HashiCorp provided Packer container
parameters:
packer-version:
type: string
default: "1.8"
docker:
- image: hashicorp/packer:<< parameters.packer-version >>
commands: commands:
install-deps: install-deps:
@ -75,6 +84,26 @@ commands:
command: | command: |
git fetch --all git fetch --all
packer_build:
description: "Run a packer build"
parameters:
template:
description: |
The name of the packer template file
type: string
default: packer.json
args:
description: |
Arguments to pass to the packer build command
type: string
default: ""
steps:
- run:
name: "Run a packer build"
command: packer build << parameters.args >> << parameters.template >>
no_output_timeout: 30m
jobs: jobs:
mod-tidy-check: mod-tidy-check:
executor: golang executor: golang
@ -390,7 +419,7 @@ jobs:
build-appimage: build-appimage:
machine: machine:
image: ubuntu-2004:202104-01 image: ubuntu-2004:202111-02
steps: steps:
- checkout - checkout
- attach_workspace: - attach_workspace:
@ -398,6 +427,16 @@ jobs:
- run: - run:
name: install appimage-builder name: install appimage-builder
command: | command: |
# appimage-builder requires /dev/snd to exist. It creates containers during the testing phase
# that pass sound devices from the host to the testing container. (hard coded!)
# https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54
# Circleci doesn't provide a working sound device; this is enough to fake it.
if [ ! -e /dev/snd ]
then
sudo mkdir /dev/snd
sudo mknod /dev/snd/ControlC0 c 1 2
fi
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
sudo apt update sudo apt update
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
@ -618,6 +657,11 @@ jobs:
default: "latest" default: "latest"
description: A comma-separated string containing docker image tags to build and push (default = latest) description: A comma-separated string containing docker image tags to build and push (default = latest)
target:
type: string
default: "lotus-all-in-one"
description: Docker target to build
steps: steps:
- run: - run:
name: Confirm that environment variables are set name: Confirm that environment variables are set
@ -657,6 +701,7 @@ jobs:
docker build \ docker build \
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \ <<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
--target <<parameters.target>> \
-f <<parameters.path>>/<<parameters.dockerfile>> \ -f <<parameters.path>>/<<parameters.dockerfile>> \
$docker_tag_args \ $docker_tag_args \
<<parameters.path>> <<parameters.path>>
@ -669,42 +714,16 @@ jobs:
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag} docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
done done
publish-packer-mainnet: publish-packer-snap:
description: build and push AWS IAM and DigitalOcean droplet. description: build packer image with snap. mainnet only.
executor: executor:
name: packer/default name: packer
packer-version: 1.6.6
steps: steps:
- checkout - checkout
- attach_workspace: - attach_workspace:
at: "." at: "."
- packer/build: - packer_build:
template: tools/packer/lotus.pkr.hcl template: tools/packer/lotus-snap.pkr.hcl
args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
publish-packer-calibrationnet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
publish-packer-butterflynet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
publish-dockerhub: publish-dockerhub:
description: publish to dockerhub description: publish to dockerhub
machine: machine:
@ -785,6 +804,11 @@ workflows:
suite: itest-deals_512mb suite: itest-deals_512mb
target: "./itests/deals_512mb_test.go" target: "./itests/deals_512mb_test.go"
- test:
name: test-itest-deals_anycid
suite: itest-deals_anycid
target: "./itests/deals_anycid_test.go"
- test: - test:
name: test-itest-deals_concurrent name: test-itest-deals_concurrent
suite: itest-deals_concurrent suite: itest-deals_concurrent
@ -850,6 +874,11 @@ workflows:
suite: itest-get_messages_in_ts suite: itest-get_messages_in_ts
target: "./itests/get_messages_in_ts_test.go" target: "./itests/get_messages_in_ts_test.go"
- test:
name: test-itest-mempool
suite: itest-mempool
target: "./itests/mempool_test.go"
- test: - test:
name: test-itest-multisig name: test-itest-multisig
suite: itest-multisig suite: itest-multisig
@ -981,10 +1010,19 @@ workflows:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-appimage:
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish: - publish:
requires: requires:
- build-all - build-all
- build-macos - build-macos
- build-appimage
filters: filters:
branches: branches:
ignore: ignore:
@ -997,36 +1035,13 @@ workflows:
path: . path: .
repo: lotus-dev repo: lotus-dev
tag: '${CIRCLE_SHA1:0:8}' tag: '${CIRCLE_SHA1:0:8}'
- publish-packer-mainnet: target: lotus-all-in-one
requires: - build-and-push-image:
- build-all dockerfile: Dockerfile.lotus
filters: path: .
branches: repo: lotus-test
ignore: tag: '${CIRCLE_SHA1:0:8}'
- /.*/ target: lotus-test
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-packer-calibrationnet:
requires:
- build-ntwk-calibration
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-packer-butterflynet:
requires:
- build-ntwk-butterfly
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-snapcraft: - publish-snapcraft:
name: publish-snapcraft-stable name: publish-snapcraft-stable
channel: stable channel: stable
@ -1063,3 +1078,13 @@ workflows:
- publish-dockerhub: - publish-dockerhub:
name: publish-dockerhub-nightly name: publish-dockerhub-nightly
tag: nightly tag: nightly
monthly:
triggers:
- schedule:
cron: "0 0 1 * *"
filters:
branches:
only:
- master
jobs:
- publish-packer-snap

View File

@ -2,7 +2,6 @@ version: 2.1
orbs: orbs:
go: gotest/tools@0.0.13 go: gotest/tools@0.0.13
aws-cli: circleci/aws-cli@1.3.2 aws-cli: circleci/aws-cli@1.3.2
packer: salaxander/packer@0.0.3
executors: executors:
golang: golang:
@ -12,6 +11,16 @@ executors:
ubuntu: ubuntu:
docker: docker:
- image: ubuntu:20.04 - image: ubuntu:20.04
packer:
description: |
The HashiCorp provided Packer container
parameters:
packer-version:
type: string
default: "1.8"
docker:
- image: hashicorp/packer:<< parameters.packer-version >>
commands: commands:
install-deps: install-deps:
@ -75,6 +84,26 @@ commands:
command: | command: |
git fetch --all git fetch --all
packer_build:
description: "Run a packer build"
parameters:
template:
description: |
The name of the packer template file
type: string
default: packer.json
args:
description: |
Arguments to pass to the packer build command
type: string
default: ""
steps:
- run:
name: "Run a packer build"
command: packer build << parameters.args >> << parameters.template >>
no_output_timeout: 30m
jobs: jobs:
mod-tidy-check: mod-tidy-check:
executor: golang executor: golang
@ -390,7 +419,7 @@ jobs:
build-appimage: build-appimage:
machine: machine:
image: ubuntu-2004:202104-01 image: ubuntu-2004:202111-02
steps: steps:
- checkout - checkout
- attach_workspace: - attach_workspace:
@ -398,6 +427,16 @@ jobs:
- run: - run:
name: install appimage-builder name: install appimage-builder
command: | command: |
# appimage-builder requires /dev/snd to exist. It creates containers during the testing phase
# that pass sound devices from the host to the testing container. (hard coded!)
# https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54
# Circleci doesn't provide a working sound device; this is enough to fake it.
if [ ! -e /dev/snd ]
then
sudo mkdir /dev/snd
sudo mknod /dev/snd/ControlC0 c 1 2
fi
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
sudo apt update sudo apt update
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
@ -618,6 +657,11 @@ jobs:
default: "latest" default: "latest"
description: A comma-separated string containing docker image tags to build and push (default = latest) description: A comma-separated string containing docker image tags to build and push (default = latest)
target:
type: string
default: "lotus-all-in-one"
description: Docker target to build
steps: steps:
- run: - run:
name: Confirm that environment variables are set name: Confirm that environment variables are set
@ -657,6 +701,7 @@ jobs:
docker build \ docker build \
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \ <<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
--target <<parameters.target>> \
-f <<parameters.path>>/<<parameters.dockerfile>> \ -f <<parameters.path>>/<<parameters.dockerfile>> \
$docker_tag_args \ $docker_tag_args \
<<parameters.path>> <<parameters.path>>
@ -669,42 +714,16 @@ jobs:
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag} docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
done done
publish-packer-mainnet: publish-packer-snap:
description: build and push AWS IAM and DigitalOcean droplet. description: build packer image with snap. mainnet only.
executor: executor:
name: packer/default name: packer
packer-version: 1.6.6
steps: steps:
- checkout - checkout
- attach_workspace: - attach_workspace:
at: "." at: "."
- packer/build: - packer_build:
template: tools/packer/lotus.pkr.hcl template: tools/packer/lotus-snap.pkr.hcl
args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
publish-packer-calibrationnet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
publish-packer-butterflynet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
publish-dockerhub: publish-dockerhub:
description: publish to dockerhub description: publish to dockerhub
machine: machine:
@ -816,10 +835,19 @@ workflows:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-appimage:
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish: - publish:
requires: requires:
- build-all - build-all
- build-macos - build-macos
- build-appimage
filters: filters:
branches: branches:
ignore: ignore:
@ -832,36 +860,13 @@ workflows:
path: . path: .
repo: lotus-dev repo: lotus-dev
tag: '${CIRCLE_SHA1:0:8}' tag: '${CIRCLE_SHA1:0:8}'
- publish-packer-mainnet: target: lotus-all-in-one
requires: - build-and-push-image:
- build-all dockerfile: Dockerfile.lotus
filters: path: .
branches: repo: lotus-test
ignore: tag: '${CIRCLE_SHA1:0:8}'
- /.*/ target: lotus-test
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-packer-calibrationnet:
requires:
- build-ntwk-calibration
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-packer-butterflynet:
requires:
- build-ntwk-butterfly
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-snapcraft: - publish-snapcraft:
name: publish-snapcraft-stable name: publish-snapcraft-stable
channel: stable channel: stable
@ -898,3 +903,13 @@ workflows:
- publish-dockerhub: - publish-dockerhub:
name: publish-dockerhub-nightly name: publish-dockerhub-nightly
tag: nightly tag: nightly
monthly:
triggers:
- schedule:
cron: "0 0 1 * *"
filters:
branches:
only:
- master
jobs:
- publish-packer-snap

1
.gitignore vendored
View File

@ -40,6 +40,7 @@ build/paramfetch.sh
/bundle /bundle
/darwin /darwin
/linux /linux
*.snap
*-fuzz.zip *-fuzz.zip
/chain/types/work_msg/ /chain/types/work_msg/

View File

@ -49,23 +49,23 @@ AppDir:
fedora: fedora:
image: appimagecrafters/tests-env:fedora-30 image: appimagecrafters/tests-env:fedora-30
command: ./AppRun command: ./AppRun
use_host_x: true use_host_x: false
debian: debian:
image: appimagecrafters/tests-env:debian-stable image: appimagecrafters/tests-env:debian-stable
command: ./AppRun command: ./AppRun
use_host_x: true use_host_x: false
arch: arch:
image: appimagecrafters/tests-env:archlinux-latest image: appimagecrafters/tests-env:archlinux-latest
command: ./AppRun command: ./AppRun
use_host_x: true use_host_x: false
centos: centos:
image: appimagecrafters/tests-env:centos-7 image: appimagecrafters/tests-env:centos-7
command: ./AppRun command: ./AppRun
use_host_x: true use_host_x: false
ubuntu: ubuntu:
image: appimagecrafters/tests-env:ubuntu-xenial image: appimagecrafters/tests-env:ubuntu-xenial
command: ./AppRun command: ./AppRun
use_host_x: true use_host_x: false
AppImage: AppImage:
arch: x86_64 arch: x86_64
update-information: guess update-information: guess

View File

@ -1,5 +1,129 @@
# Lotus changelog # Lotus changelog
# 1.15.1 / 2022-04-07
This is a *HIGHLY recommended* feature release v1.15.1, especially for node operators and storage providers who want to be a part of the content addressing network of Filecoin and IPFS.
This feature release introduces Index Provider, GraphSync v2, and many other latest functionalities, improvements and bug fixes. More importantly, node operator can now enable the FVM(experimental) to sync mainnet!!
## Highlights
### 🔥🔥🔥 FVM (Experimental) 🔥🔥🔥
- feat: fvm: FVM integration ([filecoin-project/lotus#8332](https://github.com/filecoin-project/lotus/pull/8332))
The lotus team is excited to announce the launch of experimental non-programmable FVM on mainnet. By enabling `"LOTUS_USE_FVM_EXPERIMENTAL=1` envvar, the lotus daemon will be running the [WASM-compiled built-in actors](https://github.com/filecoin-project/builtin-actors) that is compatible with the existing chain(Network v15 OhSnap). If you are trying it out and having any questions or feedbacks, please leave a comment [here](https://github.com/filecoin-project/lotus/discussions/8334)!
- chore: FVM: log when fvm is used([filecoin-project/lotus#8363](https://github.com/filecoin-project/lotus/pull/8363))
- chore: ffi: the latest fvm release([filecoin-project/lotus#8382](https://github.com/filecoin-project/lotus/pull/8382))
### 🌟🌟🌟 Index Provider (Production Ready!) 🌟🌟🌟
- feat: markets: Integrate index ingest protocol and retrieve by any CID ([filecoin-project/lotus#7313](https://github.com/filecoin-project/lotus/pull/7313))
More and more useful data is being stored on Filecoin via deals made by clients to Storage Providers. The goal is that this content is discoverable when people need them. To achieve that goal, one of the projects [the Bedrock team](https://www.notion.so/pl-strflt/Bedrock-2e956d5d8143432080a1d84435ccf0ff) is working on is building an Indexer Ecosystem, a project that's focus on content addressing on Filecoin, then potentially have interoperability with IPFS in the future and eventually serve the retrieval market. The Indexer Ecosystem high level architecture overview diagram can be found [here](https://github.com/filecoin-project/storetheindex/blob/main/doc/indexer_ecosys.png) and a detailed write up about can be found [here](https://www.notion.so/pl-strflt/Introducing-Indexer-to-SP-90bf296794174a8281c121d4ce6747a0).
That being said, with this release, lotus Storage Providers can easily become an Index Provider and serve the Indexer Content Addressing System. Index Providers generate advertisements from the deals made by a storage provider and announces the data to the indexer nodes for further processing:
- To learn more about *what is an Index Provider and how to be an Index Provider*, read it [here](https://lotus.filecoin.io/storage-providers/operate/index-provider/) in lotus docuementation.
- An [one-off migration](https://lotus.filecoin.io/storage-providers/operate/index-provider/#first-time-migration) is needed in order for a Storage Provider to become an Index Provider and announce the proper formatted index. It's *highly recommended* for all Index Provider to do a [force bulk initialization](https://lotus.filecoin.io/storage-providers/configure/dagstore/#forcing-bulk-initialization) to enable index announcement on all existing deals.
- Note that the Initialization places IO workload on your storage system. SP should set a proper `concurrency` based on your hardware or can stop/start initialization at their wish/convenience as proving deadlines approach and elapse, to avoid IOPS starvation or competition with window PoSt.
- After the first one-time migration, being an Index Provider barely puts any extra usage on SP's market system.
- You can find the testing result by SPX fellows [here](https://github.com/filecoin-project/lotus/discussions/8087).
We recommend all Storage Providers that are serving deals in the Filecoin network to become a Index Provider, make the data you are storing discoverable for the retrieval market and retrieval clients!
- If you have any questions about becoming an index provider, or the indexer system in general, leave a comment [here](https://github.com/filecoin-project/lotus/discussions/8341).
- Follow the indexer project at https://github.com/filecoin-project/go-indexer-core.
- If you have any feature request or bug reports of running an index provider, create an issue in https://github.com/filecoin-project/index-provider.
- You may also join the #storetheindex channel in the Filecoin Slack to engage with the team & the community!
### ❗️❗️❗️ Dag Migration For New CAR index format in DagStore ❗️❗️❗️
The index provider leverages the latest CARv2 indexing format `MultihashIndexSorted`, which stores the multihash code as well as the digest of all CIDs in a CAR file. Thus, all Storage Providers SHOULD perform an one-off DAG mirgation to regenerate DagStore CARv2 indices. You have to do it to become an index provider, failing to do so may also impact your future deal making.
Follow the instruction [here](https://lotus.filecoin.io/storage-providers/operate/index-provider/) to perform the migration.
## New Features
- feat: sealing: Sector upgrade queue ([filecoin-project/lotus#8333](https://github.com/filecoin-project/lotus/pull/8333))
- see more details in docs: [here](https://lotus.filecoin.io/storage-providers/operate/snap-deals/#snap-deal-queue)
- feat: market utils: Support unixfsnode in TraverseDag ([filecoin-project/lotus#8168](https://github.com/filecoin-project/lotus/pull/8168))
- feat: config: enable indexer providing by default ([filecoin-project/lotus#8314](https://github.com/filecoin-project/lotus/pull/8314))
- feat: api: Make ClientCalcCommP multithreaded ([filecoin-project/lotus#8276](https://github.com/filecoin-project/lotus/pull/8276))
- feat: config: Persistent subsystem log level config ([filecoin-project/lotus#8283](https://github.com/filecoin-project/lotus/pull/8283))
- feat: shed: blockstore/vlog to car export cmds ([filecoin-project/lotus#8265](https://github.com/filecoin-project/lotus/pull/8265))
- feat: shed: ItestD ([filecoin-project/lotus#8290](https://github.com/filecoin-project/lotus/pull/8290))
- feat: Make add piece idempotent ([filecoin-project/lotus#8160](https://github.com/filecoin-project/lotus/pull/8160))
- feat: paychmgr: Support paych funding (a.k.a. fast paid retrieval) ([filecoin-project/lotus#7883](https://github.com/filecoin-project/lotus/pull/7883))
- feat: ci: packer snap ([filecoin-project/lotus#7819](https://github.com/filecoin-project/lotus/pull/7819))
- feat: #6147: Include worker name in sealing errors ([filecoin-project/lotus#7844](https://github.com/filecoin-project/lotus/pull/7844))
- Feat: cli: Remove verified data cap ([filecoin-project/lotus#8175](https://github.com/filecoin-project/lotus/pull/8175))
- feat: gateway: add MsigGetVestingSchedule to gateway api ([filecoin-project/lotus#8104](https://github.com/filecoin-project/lotus/pull/8104))
- feat: itests: add itests ensemble mocknet getter ([filecoin-project/lotus#8157](https://github.com/filecoin-project/lotus/pull/8157))
- feat: lotus-miner sectors list --initial-pledge ([filecoin-project/lotus#8098](https://github.com/filecoin-project/lotus/pull/8098))
- Resource Manager Metrics ([filecoin-project/lotus#8089](https://github.com/filecoin-project/lotus/pull/8089))
- feat: cli: set current network version from params ([filecoin-project/lotus#8111](https://github.com/filecoin-project/lotus/pull/8111))
- feat: Snapdeals support in `storage find` CLI ([filecoin-project/lotus#8130](https://github.com/filecoin-project/lotus/pull/8130))
## Improvements
- improve resource manager integration ([filecoin-project/lotus#8318](https://github.com/filecoin-project/lotus/pull/8318))
- add check manual-stateless-deal with interactive deal making ([filecoin-project/lotus#7560](https://github.com/filecoin-project/lotus/pull/7560))
- test: cli: adding wallet tests ([filecoin-project/lotus#8079](https://github.com/filecoin-project/lotus/pull/8079))
- test: chain: unit tests for the syncer & sync manager ([filecoin-project/lotus#8072](https://github.com/filecoin-project/lotus/pull/8072))
- test: cli: unit tests for sync related commands ([filecoin-project/lotus#8080](https://github.com/filecoin-project/lotus/pull/8080))
- misc: wallet: wallet tests with annotations for system test matrix ([filecoin-project/lotus#7928](https://github.com/filecoin-project/lotus/pull/7928))
- test: Cli: add mempool tests ([filecoin-project/lotus#8162](https://github.com/filecoin-project/lotus/pull/8162))
- add a state-tree diff command to lotus shed ([filecoin-project/lotus#8081](https://github.com/filecoin-project/lotus/pull/8081))
- test: mempool: Add unit and integration tests ([filecoin-project/lotus#8017](https://github.com/filecoin-project/lotus/pull/8017))
- splistore cold object reification redux ([filecoin-project/lotus#8029](https://github.com/filecoin-project/lotus/pull/8029))
- test: cli: chain category unit tests ([filecoin-project/lotus#8048](https://github.com/filecoin-project/lotus/pull/8048))
- feat: config: Move MakeNewSectorForDeals config into the Sealing section([filecoin-project/lotus#8382](https://github.com/filecoin-project/lotus/pull/8382))
## Bug Fixes
- fix: FVM: add finality check for consensus faults #8452
- fix: market: Reuse the market PubSub in index provider #8451
- fix: market: set all index provider options based on lotus config #8444
- fix: sealing: Fix PR1 worker selection (#8421)
- fix: miner: dead loop on removing sector (#8421)
- fix: sealing: Remove sector copies from workers after snapdeals ([filecoin-project/lotus#8331](https://github.com/filecoin-project/lotus/pull/8331))
- fix: storagefsm: Fix error loop on bad event ([filecoin-project/lotus#8339](https://github.com/filecoin-project/lotus/pull/8339))
- fix: sealing: FinalizeSector doesn't need sealed replica access ([filecoin-project/lotus#8339](https://github.com/filecoin-project/lotus/pull/8339))
- fix: sealing: always do cooldown in handleSubmitReplicaUpdateFailed ([filecoin-project/lotus#8353](https://github.com/filecoin-project/lotus/pull/8353))
- fix: storage cli: Output primary sector status correctly ([filecoin-project/lotus#8320](https://github.com/filecoin-project/lotus/pull/8320))
- fix: sealing fsm: Handle inputLk correctly ([filecoin-project/lotus#8291](https://github.com/filecoin-project/lotus/pull/8291))
- fix: piece provider: Don't log CIDs as binary ([filecoin-project/lotus#8287](https://github.com/filecoin-project/lotus/pull/8287))
- fix:sealing:Log instead of error normal shutdown of state machine ([filecoin-project/lotus#8232](https://github.com/filecoin-project/lotus/pull/8232))
- fix:sealing:Handle finalize replica update failures in fsm ([filecoin-project/lotus#8229](https://github.com/filecoin-project/lotus/pull/8229))
- ci: appimage: re-install appimage CI ([filecoin-project/lotus#7943](https://github.com/filecoin-project/lotus/pull/7943))
- fix: sealing: PRU insufficient collateral ([filecoin-project/lotus#8219](https://github.com/filecoin-project/lotus/pull/8219))
- fix: shed: diff command ([filecoin-project/lotus#8202](https://github.com/filecoin-project/lotus/pull/8202))
- Make `--lite` option visible in the lotus daemon help text ([filecoin-project/lotus#8207](https://github.com/filecoin-project/lotus/pull/8207))
- fix:sealing:Less verbose sector manager logging ([filecoin-project/lotus#8213](https://github.com/filecoin-project/lotus/pull/8213))
- avoid panic ([filecoin-project/lotus#8205](https://github.com/filecoin-project/lotus/pull/8205))
- A package is vulnerable to Exposure of Sensitive Information ([filecoin-project/lotus#8204](https://github.com/filecoin-project/lotus/pull/8204))
- fix: sealing: add flag usage ([filecoin-project/lotus#8190](https://github.com/filecoin-project/lotus/pull/8190))
- Fix the epoch used for gas in the message pool & validation ([filecoin-project/lotus#8163](https://github.com/filecoin-project/lotus/pull/8163))
- fix:sealing:really-do-it flag for abort upgrade ([filecoin-project/lotus#8181](https://github.com/filecoin-project/lotus/pull/8181))
- fix:proving:post check sector handles snap deals replica faults ([filecoin-project/lotus#8177](https://github.com/filecoin-project/lotus/pull/8177))
- fix: client: calculate commps for pieces bigger than 32GB ([filecoin-project/lotus#8179](https://github.com/filecoin-project/lotus/pull/8179))
- fix:cli:Continue instead of return error if no valid value is filled ([filecoin-project/lotus#8131](https://github.com/filecoin-project/lotus/pull/8131))
- fix: limit reification sizes ([filecoin-project/lotus#8149](https://github.com/filecoin-project/lotus/pull/8149))
- fix: state: Allow lotus-miner info to complete without admin permission ([filecoin-project/lotus#8057](https://github.com/filecoin-project/lotus/pull/8057))
- fix:paychan:deflake integration test ([filecoin-project/lotus#8088](https://github.com/filecoin-project/lotus/pull/8088))
- fix: worker: allow enable/disabling ReplicaUpdate tasks ([filecoin-project/lotus#8090](https://github.com/filecoin-project/lotus/pull/8090))
- don't fail reification on missing references ([filecoin-project/lotus#8128](https://github.com/filecoin-project/lotus/pull/8128))
- sealer: fix error message ([filecoin-project/lotus#8121](https://github.com/filecoin-project/lotus/pull/8121))
- don't track peer ids in rcmgr metrics ([filecoin-project/lotus#8099](https://github.com/filecoin-project/lotus/pull/8099))
- temporarily disable reification ([filecoin-project/lotus#8132](https://github.com/filecoin-project/lotus/pull/8132))
- [Describe]: when excute cmd "lotus-bench sealing" without "benchmark-… ([filecoin-project/lotus#8173](https://github.com/filecoin-project/lotus/pull/8173))
## Dependency Updates
- deps: update go-libp2p and go-libp2p-resource-manager ([filecoin-project/lotus#8289](https://github.com/filecoin-project/lotus/pull/8289))
- feat(deps): update to graphsync v0.13.0 with 2.0 protocol ([filecoin-project/lotus#8273](https://github.com/filecoin-project/lotus/pull/8273))
- dep: actor: get v7 ([filecoin-project/lotus#8194](https://github.com/filecoin-project/lotus/pull/8194))
- deps: update go-libp2p to v0.18 release ([filecoin-project/lotus#8355](https://github.com/filecoin-project/lotus/pull/8355))
- github.com/filecoin-project/go-data-transfer (v1.14.1 -> v1.15.0):
- github.com/filecoin-project/go-fil-markets (v1.19.2 -> v1.20.1):
- deps: update go-libp2p to v0.18.0-rc5 ([filecoin-project/lotus#8169](https://github.com/filecoin-project/lotus/pull/8169))
## Others
- chore: build: backport releases ([filecoin-project/lotus#8192](https://github.com/filecoin-project/lotus/pull/8192))
- feat: build: bump the version to v1.15.1-dev ([filecoin-project/lotus#8073](https://github.com/filecoin-project/lotus/pull/8073))
- makefile: add make jen ([filecoin-project/lotus#8122](https://github.com/filecoin-project/lotus/pull/8122))
- chore: Merge releases into master ([filecoin-project/lotus#8156](https://github.com/filecoin-project/lotus/pull/8156))
- chore: ci: disable publish-packer #8451
# 1.15.0 / 2022-03-09 # 1.15.0 / 2022-03-09
This is an optional release with retrieval improvements(client side), SP ux with unsealing, snap deals and regular deal making and many other new features, improvements and bug fixes. This is an optional release with retrieval improvements(client side), SP ux with unsealing, snap deals and regular deal making and many other new features, improvements and bug fixes.
@ -423,22 +547,22 @@ This feature release includes the latest functionalities and improvements, like
## Highlights ## Highlights
- Enable separate storage and retrieval transfer limits ([filecoin-project/lotus#7405](https://github.com/filecoin-project/lotus/pull/7405)) - Enable separate storage and retrieval transfer limits ([filecoin-project/lotus#7405](https://github.com/filecoin-project/lotus/pull/7405))
- `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20. - `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20.
- If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`. - If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`.
- If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`. - If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`.
- Update proofs to v10.0.0 ([filecoin-project/lotus#7420](https://github.com/filecoin-project/lotus/pull/7420)) - Update proofs to v10.0.0 ([filecoin-project/lotus#7420](https://github.com/filecoin-project/lotus/pull/7420))
- This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`. - This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`.
- You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time. - You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time.
## New Features ## New Features
- Feat/datamodel selector retrieval ([filecoin-project/lotus#6393](https://github.com/filecoin-project/lotus/pull/66393393)) - Feat/datamodel selector retrieval ([filecoin-project/lotus#6393](https://github.com/filecoin-project/lotus/pull/66393393))
- This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents. - This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents.
- For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output` - For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output`
- Expose storage stats on the metrics endpoint ([filecoin-project/lotus#7418](https://github.com/filecoin-project/lotus/pull/7418)) - Expose storage stats on the metrics endpoint ([filecoin-project/lotus#7418](https://github.com/filecoin-project/lotus/pull/7418))
- feat: Catch panic to generate report and reraise ([filecoin-project/lotus#7341](https://github.com/filecoin-project/lotus/pull/7341)) - feat: Catch panic to generate report and reraise ([filecoin-project/lotus#7341](https://github.com/filecoin-project/lotus/pull/7341))
- Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers. - Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers.
- Add envconfig docs to the config ([filecoin-project/lotus#7412](https://github.com/filecoin-project/lotus/pull/7412)) - Add envconfig docs to the config ([filecoin-project/lotus#7412](https://github.com/filecoin-project/lotus/pull/7412))
- You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml). - You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml).
- lotus shed: fr32 utils ([filecoin-project/lotus#7355](https://github.com/filecoin-project/lotus/pull/7355)) - lotus shed: fr32 utils ([filecoin-project/lotus#7355](https://github.com/filecoin-project/lotus/pull/7355))
- Miner CLI: Allow trying to change owners of any miner actor ([filecoin-project/lotus#7328](https://github.com/filecoin-project/lotus/pull/7328)) - Miner CLI: Allow trying to change owners of any miner actor ([filecoin-project/lotus#7328](https://github.com/filecoin-project/lotus/pull/7328))
- Add --unproven flag to the sectors list command ([filecoin-project/lotus#7308](https://github.com/filecoin-project/lotus/pull/7308)) - Add --unproven flag to the sectors list command ([filecoin-project/lotus#7308](https://github.com/filecoin-project/lotus/pull/7308))
@ -455,7 +579,7 @@ This feature release includes the latest functionalities and improvements, like
- Prep retrieval for selectors: no functional changes ([filecoin-project/lotus#7306](https://github.com/filecoin-project/lotus/pull/7306)) - Prep retrieval for selectors: no functional changes ([filecoin-project/lotus#7306](https://github.com/filecoin-project/lotus/pull/7306))
- Seed: improve helptext ([filecoin-project/lotus#7304](https://github.com/filecoin-project/lotus/pull/7304)) - Seed: improve helptext ([filecoin-project/lotus#7304](https://github.com/filecoin-project/lotus/pull/7304))
- Mempool: reduce size of sigValCache ([filecoin-project/lotus#7305](https://github.com/filecoin-project/lotus/pull/7305)) - Mempool: reduce size of sigValCache ([filecoin-project/lotus#7305](https://github.com/filecoin-project/lotus/pull/7305))
- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474)) - Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474))
## Bug Fixes ## Bug Fixes
- StateSearchMsg: Correct usage of the allowReplaced flag ([filecoin-project/lotus#7450](https://github.com/filecoin-project/lotus/pull/7450)) - StateSearchMsg: Correct usage of the allowReplaced flag ([filecoin-project/lotus#7450](https://github.com/filecoin-project/lotus/pull/7450))
@ -541,17 +665,17 @@ Note that this release is built on top of lotus v1.11.3. Enterprising users like
## Dependency Updates ## Dependency Updates
- Add [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0) - Add [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0)
- **Protocol changes** - **Protocol changes**
- Multisig Approve only hashes when hash in params - Multisig Approve only hashes when hash in params
- FIP 0020 WithdrawBalance methods return withdrawn value - FIP 0020 WithdrawBalance methods return withdrawn value
- FIP 0021 Fix bug in power calculation when extending verified deals sectors - FIP 0021 Fix bug in power calculation when extending verified deals sectors
- FIP 0022 PublishStorageDeals drops errors in batch - FIP 0022 PublishStorageDeals drops errors in batch
- FIP 0024 BatchBalancer update and burn added to PreCommitBatch - FIP 0024 BatchBalancer update and burn added to PreCommitBatch
- FIP 0026 Add FaultMaxAge extension - FIP 0026 Add FaultMaxAge extension
- Reduce calls to power and reward actors by passing values from power cron - Reduce calls to power and reward actors by passing values from power cron
- Defensive programming hardening power cron against programmer error - Defensive programming hardening power cron against programmer error
- **Implementation changes** - **Implementation changes**
- Move to xerrors - Move to xerrors
- Improved logging: burn events are not logged with reasons and burned value. - Improved logging: burn events are not logged with reasons and burned value.
- github.com/filecoin-project/go-state-types (v0.1.1-0.20210810190654-139e0e79e69e -> v0.1.1-0.20210915140513-d354ccf10379): - github.com/filecoin-project/go-state-types (v0.1.1-0.20210810190654-139e0e79e69e -> v0.1.1-0.20210915140513-d354ccf10379):
## Others ## Others
@ -583,13 +707,13 @@ improvements in different area, like deal making, sealing and so on.
## Highlights ## Highlights
- 🌟🌟Introduce `MaxStagingDealsBytes - reject new deals if our staging deals area is full ([filecoin-project/lotus#7276](https://github.com/filecoin-project/lotus/pull/7276)) - 🌟🌟Introduce `MaxStagingDealsBytes - reject new deals if our staging deals area is full ([filecoin-project/lotus#7276](https://github.com/filecoin-project/lotus/pull/7276))
- Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large. - Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large.
- 🌟🌟miner: Command to list/remove expired sectors locally ([filecoin-project/lotus#7140](https://github.com/filecoin-project/lotus/pull/7140)) - 🌟🌟miner: Command to list/remove expired sectors locally ([filecoin-project/lotus#7140](https://github.com/filecoin-project/lotus/pull/7140))
- run `./lotus-miner sectors expired -h` for more details. - run `./lotus-miner sectors expired -h` for more details.
- 🚀update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369)) - 🚀update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369))
- MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation! - MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation!
- 🌟dealpublisher: Fully validate deals before publishing ([filecoin-project/lotus#7234](https://github.com/filecoin-project/lotus/pull/7234)) - 🌟dealpublisher: Fully validate deals before publishing ([filecoin-project/lotus#7234](https://github.com/filecoin-project/lotus/pull/7234))
- This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals. - This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals.
- 🌟Simple alert system; FD limit alerts ([filecoin-project/lotus#7108](https://github.com/filecoin-project/lotus/pull/7108)) - 🌟Simple alert system; FD limit alerts ([filecoin-project/lotus#7108](https://github.com/filecoin-project/lotus/pull/7108))
## New Features ## New Features
@ -714,24 +838,24 @@ storage providers and clients.
- It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that - It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that
are still transferring**, using the `lotus-miner data-transfers cancel` command before upgrade your market nodes. Reason being that the new dagstore changes attributes in the internal deal state objects, and the paths to the staging CARs where the deal data was being placed will be lost. are still transferring**, using the `lotus-miner data-transfers cancel` command before upgrade your market nodes. Reason being that the new dagstore changes attributes in the internal deal state objects, and the paths to the staging CARs where the deal data was being placed will be lost.
- ‼Having your dags initialized will become important in the near feature for you to provide a better storage - ‼Having your dags initialized will become important in the near feature for you to provide a better storage
and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process
places relatively high IP workload on your storage system and is better to be carried out gradually and over a places relatively high IP workload on your storage system and is better to be carried out gradually and over a
longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization). longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization).
- ⏮ Rollback Alert(from v1.11.2-rcX to any version lower): If a storages deal is initiated with M1/v1.11.2(-rcX) - ⏮ Rollback Alert(from v1.11.2-rcX to any version lower): If a storages deal is initiated with M1/v1.11.2(-rcX)
release, it needs to get to the `StorageDealAwaitingPrecommit` state before you can do a version rollback or the markets process may panic. release, it needs to get to the `StorageDealAwaitingPrecommit` state before you can do a version rollback or the markets process may panic.
- 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!** - 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!**
- 🌟🌟 rpcenc: Support reader redirect ([filecoin-project/lotus#6952](https://github.com/filecoin-project/lotus/pull/6952)) - 🌟🌟 rpcenc: Support reader redirect ([filecoin-project/lotus#6952](https://github.com/filecoin-project/lotus/pull/6952))
- This allows market processes to send piece bytes directly to workers involved on `AddPiece`. - This allows market processes to send piece bytes directly to workers involved on `AddPiece`.
- Extending sectors: more practical and flexible tools ([filecoin-project/lotus#6097](https://github.com/filecoin-project/lotus/pull/6097)) - Extending sectors: more practical and flexible tools ([filecoin-project/lotus#6097](https://github.com/filecoin-project/lotus/pull/6097))
- `lotus-miner sectors check-expire` to inspect expiring sectors. - `lotus-miner sectors check-expire` to inspect expiring sectors.
- `lotus-miner sectors renew` for renewing expiring sectors, see the command help menu for customizable option - `lotus-miner sectors renew` for renewing expiring sectors, see the command help menu for customizable option
like `extension`, `new-expiration` and so on. like `extension`, `new-expiration` and so on.
- ‼️ MpoolReplaceCmd ( lotus mpool replace`) now takes FIL for fee-limit ([filecoin-project/lotus#6927](https://github.com/filecoin-project/lotus/pull/6927)) - ‼️ MpoolReplaceCmd ( lotus mpool replace`) now takes FIL for fee-limit ([filecoin-project/lotus#6927](https://github.com/filecoin-project/lotus/pull/6927))
- Drop townhall/chainwatch ([filecoin-project/lotus#6912](https://github.com/filecoin-project/lotus/pull/6912)) - Drop townhall/chainwatch ([filecoin-project/lotus#6912](https://github.com/filecoin-project/lotus/pull/6912))
- ChainWatch is no longer supported by lotus. - ChainWatch is no longer supported by lotus.
- Configurable CC Sector Expiration ([filecoin-project/lotus#6803](https://github.com/filecoin-project/lotus/pull/6803)) - Configurable CC Sector Expiration ([filecoin-project/lotus#6803](https://github.com/filecoin-project/lotus/pull/6803))
- Set `CommittedCapacitySectorLifetime` in lotus-miner/config.toml to specify the default expiration for a new CC - Set `CommittedCapacitySectorLifetime` in lotus-miner/config.toml to specify the default expiration for a new CC
sector, value must be between 180-540 days inclusive. sector, value must be between 180-540 days inclusive.
## New Features ## New Features
- api/command for encoding actor params ([filecoin-project/lotus#7150](https://github.com/filecoin-project/lotus/pull/7150)) - api/command for encoding actor params ([filecoin-project/lotus#7150](https://github.com/filecoin-project/lotus/pull/7150))
@ -862,110 +986,110 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
- You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated` - You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated`
## New Features ## New Features
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356)) - ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
- **⭐️⭐️ Experimental** [Splitstore]((https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)) (more details coming in v1.11.2! Stay tuned! Join the discussion [here](https://github.com/filecoin-project/lotus/discussions/5788) if you have questions!) : - **⭐️⭐️ Experimental** [Splitstore]((https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)) (more details coming in v1.11.2! Stay tuned! Join the discussion [here](https://github.com/filecoin-project/lotus/discussions/5788) if you have questions!) :
- Improve splitstore warmup ([filecoin-project/lotus#6867](https://github.com/filecoin-project/lotus/pull/6867)) - Improve splitstore warmup ([filecoin-project/lotus#6867](https://github.com/filecoin-project/lotus/pull/6867))
- Moving GC for badger ([filecoin-project/lotus#6854](https://github.com/filecoin-project/lotus/pull/6854)) - Moving GC for badger ([filecoin-project/lotus#6854](https://github.com/filecoin-project/lotus/pull/6854))
- splitstore shed utils ([filecoin-project/lotus#6811](https://github.com/filecoin-project/lotus/pull/6811)) - splitstore shed utils ([filecoin-project/lotus#6811](https://github.com/filecoin-project/lotus/pull/6811))
- fix warmup by decoupling state from message receipt walk ([filecoin-project/lotus#6841](https://github.com/filecoin-project/lotus/pull/6841)) - fix warmup by decoupling state from message receipt walk ([filecoin-project/lotus#6841](https://github.com/filecoin-project/lotus/pull/6841))
- Splitstore: support on-disk marksets using badger ([filecoin-project/lotus#6833](https://github.com/filecoin-project/lotus/pull/6833)) - Splitstore: support on-disk marksets using badger ([filecoin-project/lotus#6833](https://github.com/filecoin-project/lotus/pull/6833))
- cache loaded block messages ([filecoin-project/lotus#6760](https://github.com/filecoin-project/lotus/pull/6760)) - cache loaded block messages ([filecoin-project/lotus#6760](https://github.com/filecoin-project/lotus/pull/6760))
- Splitstore: add retention policy option for keeping messages in the hotstore ([filecoin-project/lotus#6775](https://github.com/filecoin-project/lotus/pull/6775)) - Splitstore: add retention policy option for keeping messages in the hotstore ([filecoin-project/lotus#6775](https://github.com/filecoin-project/lotus/pull/6775))
- Introduce the LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC envvar ([filecoin-project/lotus#6817](https://github.com/filecoin-project/lotus/pull/6817)) - Introduce the LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC envvar ([filecoin-project/lotus#6817](https://github.com/filecoin-project/lotus/pull/6817))
- Splitstore: add support for protecting out of chain references in the blockstore ([filecoin-project/lotus#6777](https://github.com/filecoin-project/lotus/pull/6777)) - Splitstore: add support for protecting out of chain references in the blockstore ([filecoin-project/lotus#6777](https://github.com/filecoin-project/lotus/pull/6777))
- Implement exposed splitstore ([filecoin-project/lotus#6762](https://github.com/filecoin-project/lotus/pull/6762)) - Implement exposed splitstore ([filecoin-project/lotus#6762](https://github.com/filecoin-project/lotus/pull/6762))
- Splitstore code reorg ([filecoin-project/lotus#6756](https://github.com/filecoin-project/lotus/pull/6756)) - Splitstore code reorg ([filecoin-project/lotus#6756](https://github.com/filecoin-project/lotus/pull/6756))
- Splitstore: Some small fixes ([filecoin-project/lotus#6754](https://github.com/filecoin-project/lotus/pull/6754)) - Splitstore: Some small fixes ([filecoin-project/lotus#6754](https://github.com/filecoin-project/lotus/pull/6754))
- Splitstore Enhanchements ([filecoin-project/lotus#6474](https://github.com/filecoin-project/lotus/pull/6474)) - Splitstore Enhanchements ([filecoin-project/lotus#6474](https://github.com/filecoin-project/lotus/pull/6474))
- lotus-shed: initial export cmd for markets related metadata ([filecoin-project/lotus#6840](https://github.com/filecoin-project/lotus/pull/6840)) - lotus-shed: initial export cmd for markets related metadata ([filecoin-project/lotus#6840](https://github.com/filecoin-project/lotus/pull/6840))
- add a very verbose -vv flag to lotus and lotus-miner. ([filecoin-project/lotus#6888](https://github.com/filecoin-project/lotus/pull/6888)) - add a very verbose -vv flag to lotus and lotus-miner. ([filecoin-project/lotus#6888](https://github.com/filecoin-project/lotus/pull/6888))
- Add allocated sectorid vis ([filecoin-project/lotus#4638](https://github.com/filecoin-project/lotus/pull/4638)) - Add allocated sectorid vis ([filecoin-project/lotus#4638](https://github.com/filecoin-project/lotus/pull/4638))
- add a command for compacting sector numbers bitfield ([filecoin-project/lotus#4640](https://github.com/filecoin-project/lotus/pull/4640)) - add a command for compacting sector numbers bitfield ([filecoin-project/lotus#4640](https://github.com/filecoin-project/lotus/pull/4640))
- Run `lotus-miner actor compact-allocated` to compact sector number allocations to reduce the size of the allocated sector number bitfield. - Run `lotus-miner actor compact-allocated` to compact sector number allocations to reduce the size of the allocated sector number bitfield.
- Add ChainGetMessagesInTipset API ([filecoin-project/lotus#6642](https://github.com/filecoin-project/lotus/pull/6642)) - Add ChainGetMessagesInTipset API ([filecoin-project/lotus#6642](https://github.com/filecoin-project/lotus/pull/6642))
- Handle the --color flag via proper global state ([filecoin-project/lotus#6743](https://github.com/filecoin-project/lotus/pull/6743)) - Handle the --color flag via proper global state ([filecoin-project/lotus#6743](https://github.com/filecoin-project/lotus/pull/6743))
- Enable color by default only if os.Stdout is a TTY ([filecoin-project/lotus#6696](https://github.com/filecoin-project/lotus/pull/6696)) - Enable color by default only if os.Stdout is a TTY ([filecoin-project/lotus#6696](https://github.com/filecoin-project/lotus/pull/6696))
- Stop outputing ANSI color on non-TTY ([filecoin-project/lotus#6694](https://github.com/filecoin-project/lotus/pull/6694)) - Stop outputing ANSI color on non-TTY ([filecoin-project/lotus#6694](https://github.com/filecoin-project/lotus/pull/6694))
- Envvar to disable slash filter ([filecoin-project/lotus#6620](https://github.com/filecoin-project/lotus/pull/6620)) - Envvar to disable slash filter ([filecoin-project/lotus#6620](https://github.com/filecoin-project/lotus/pull/6620))
- commit batch: AggregateAboveBaseFee config ([filecoin-project/lotus#6650](https://github.com/filecoin-project/lotus/pull/6650)) - commit batch: AggregateAboveBaseFee config ([filecoin-project/lotus#6650](https://github.com/filecoin-project/lotus/pull/6650))
- shed tool to estimate aggregate network fees ([filecoin-project/lotus#6631](https://github.com/filecoin-project/lotus/pull/6631)) - shed tool to estimate aggregate network fees ([filecoin-project/lotus#6631](https://github.com/filecoin-project/lotus/pull/6631))
## Bug Fixes ## Bug Fixes
- Fix padding of deals, which only partially shipped in #5988 ([filecoin-project/lotus#6683](https://github.com/filecoin-project/lotus/pull/6683)) - Fix padding of deals, which only partially shipped in #5988 ([filecoin-project/lotus#6683](https://github.com/filecoin-project/lotus/pull/6683))
- fix deal concurrency test failures by upgrading graphsync and others ([filecoin-project/lotus#6724](https://github.com/filecoin-project/lotus/pull/6724)) - fix deal concurrency test failures by upgrading graphsync and others ([filecoin-project/lotus#6724](https://github.com/filecoin-project/lotus/pull/6724))
- fix: on randomness change, use new rand ([filecoin-project/lotus#6805](https://github.com/filecoin-project/lotus/pull/6805)) - fix: always check if StateSearchMessage returns nil ([filecoin-project/lotus#6802](https://github.com/filecoin-project/lotus/pull/6802)) - fix: on randomness change, use new rand ([filecoin-project/lotus#6805](https://github.com/filecoin-project/lotus/pull/6805)) - fix: always check if StateSearchMessage returns nil ([filecoin-project/lotus#6802](https://github.com/filecoin-project/lotus/pull/6802))
- test: fix flaky window post tests ([filecoin-project/lotus#6804](https://github.com/filecoin-project/lotus/pull/6804)) - test: fix flaky window post tests ([filecoin-project/lotus#6804](https://github.com/filecoin-project/lotus/pull/6804))
- wrap close(wait) with sync.Once to avoid panic ([filecoin-project/lotus#6800](https://github.com/filecoin-project/lotus/pull/6800)) - wrap close(wait) with sync.Once to avoid panic ([filecoin-project/lotus#6800](https://github.com/filecoin-project/lotus/pull/6800))
- fixes #6786 segfault ([filecoin-project/lotus#6787](https://github.com/filecoin-project/lotus/pull/6787)) - fixes #6786 segfault ([filecoin-project/lotus#6787](https://github.com/filecoin-project/lotus/pull/6787))
- ClientRetrieve stops on cancel([filecoin-project/lotus#6739](https://github.com/filecoin-project/lotus/pull/6739)) - ClientRetrieve stops on cancel([filecoin-project/lotus#6739](https://github.com/filecoin-project/lotus/pull/6739))
- Fix bugs in sectors extend --v1-sectors ([filecoin-project/lotus#6066](https://github.com/filecoin-project/lotus/pull/6066)) - Fix bugs in sectors extend --v1-sectors ([filecoin-project/lotus#6066](https://github.com/filecoin-project/lotus/pull/6066))
- fix "lotus-seed genesis car" error "merkledag: not found" ([filecoin-project/lotus#6688](https://github.com/filecoin-project/lotus/pull/6688)) - fix "lotus-seed genesis car" error "merkledag: not found" ([filecoin-project/lotus#6688](https://github.com/filecoin-project/lotus/pull/6688))
- Get retrieval pricing input should not error out on a deal state fetch ([filecoin-project/lotus#6679](https://github.com/filecoin-project/lotus/pull/6679)) - Get retrieval pricing input should not error out on a deal state fetch ([filecoin-project/lotus#6679](https://github.com/filecoin-project/lotus/pull/6679))
- Fix more CID double-encoding as hex ([filecoin-project/lotus#6680](https://github.com/filecoin-project/lotus/pull/6680)) - Fix more CID double-encoding as hex ([filecoin-project/lotus#6680](https://github.com/filecoin-project/lotus/pull/6680))
- storage: Fix FinalizeSector with sectors in stoage paths ([filecoin-project/lotus#6653](https://github.com/filecoin-project/lotus/pull/6653)) - storage: Fix FinalizeSector with sectors in stoage paths ([filecoin-project/lotus#6653](https://github.com/filecoin-project/lotus/pull/6653))
- Fix tiny error in check-client-datacap ([filecoin-project/lotus#6664](https://github.com/filecoin-project/lotus/pull/6664)) - Fix tiny error in check-client-datacap ([filecoin-project/lotus#6664](https://github.com/filecoin-project/lotus/pull/6664))
- Fix: precommit_batch method used the wrong cfg.CommitBatchWait ([filecoin-project/lotus#6658](https://github.com/filecoin-project/lotus/pull/6658)) - Fix: precommit_batch method used the wrong cfg.CommitBatchWait ([filecoin-project/lotus#6658](https://github.com/filecoin-project/lotus/pull/6658))
- fix ticket expiration check ([filecoin-project/lotus#6635](https://github.com/filecoin-project/lotus/pull/6635)) - fix ticket expiration check ([filecoin-project/lotus#6635](https://github.com/filecoin-project/lotus/pull/6635))
- remove precommit check in handleCommitFailed ([filecoin-project/lotus#6634](https://github.com/filecoin-project/lotus/pull/6634)) - remove precommit check in handleCommitFailed ([filecoin-project/lotus#6634](https://github.com/filecoin-project/lotus/pull/6634))
- fix prove commit aggregate send token amount ([filecoin-project/lotus#6625](https://github.com/filecoin-project/lotus/pull/6625)) - fix prove commit aggregate send token amount ([filecoin-project/lotus#6625](https://github.com/filecoin-project/lotus/pull/6625))
## Improvements ## Improvements
- Eliminate inefficiency in markets logging ([filecoin-project/lotus#6895](https://github.com/filecoin-project/lotus/pull/6895)) - Eliminate inefficiency in markets logging ([filecoin-project/lotus#6895](https://github.com/filecoin-project/lotus/pull/6895))
- rename `cmd/lotus{-storage=>}-miner` to match binary. ([filecoin-project/lotus#6886](https://github.com/filecoin-project/lotus/pull/6886)) - rename `cmd/lotus{-storage=>}-miner` to match binary. ([filecoin-project/lotus#6886](https://github.com/filecoin-project/lotus/pull/6886))
- fix racy TestSimultanenousTransferLimit. ([filecoin-project/lotus#6862](https://github.com/filecoin-project/lotus/pull/6862)) - fix racy TestSimultanenousTransferLimit. ([filecoin-project/lotus#6862](https://github.com/filecoin-project/lotus/pull/6862))
- ValidateBlock: Assert that block header height's are greater than parents ([filecoin-project/lotus#6872](https://github.com/filecoin-project/lotus/pull/6872)) - ValidateBlock: Assert that block header height's are greater than parents ([filecoin-project/lotus#6872](https://github.com/filecoin-project/lotus/pull/6872))
- feat: Don't panic when api impl is nil ([filecoin-project/lotus#6857](https://github.com/filecoin-project/lotus/pull/6857)) - feat: Don't panic when api impl is nil ([filecoin-project/lotus#6857](https://github.com/filecoin-project/lotus/pull/6857))
- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) - add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544))
- easy way to make install app ([filecoin-project/lotus#5183](https://github.com/filecoin-project/lotus/pull/5183)) - easy way to make install app ([filecoin-project/lotus#5183](https://github.com/filecoin-project/lotus/pull/5183))
- api: Separate the Net interface from Common ([filecoin-project/lotus#6627](https://github.com/filecoin-project/lotus/pull/6627)) - add StateReadState to gateway api ([filecoin-project/lotus#6818](https://github.com/filecoin-project/lotus/pull/6818)) - api: Separate the Net interface from Common ([filecoin-project/lotus#6627](https://github.com/filecoin-project/lotus/pull/6627)) - add StateReadState to gateway api ([filecoin-project/lotus#6818](https://github.com/filecoin-project/lotus/pull/6818))
- add SealProof in SectorBuilder ([filecoin-project/lotus#6815](https://github.com/filecoin-project/lotus/pull/6815)) - add SealProof in SectorBuilder ([filecoin-project/lotus#6815](https://github.com/filecoin-project/lotus/pull/6815))
- sealing: Handle preCommitParams errors more correctly ([filecoin-project/lotus#6763](https://github.com/filecoin-project/lotus/pull/6763)) - sealing: Handle preCommitParams errors more correctly ([filecoin-project/lotus#6763](https://github.com/filecoin-project/lotus/pull/6763))
- ClientFindData: always fetch peer id from chain ([filecoin-project/lotus#6807](https://github.com/filecoin-project/lotus/pull/6807)) - ClientFindData: always fetch peer id from chain ([filecoin-project/lotus#6807](https://github.com/filecoin-project/lotus/pull/6807))
- test: handle null blocks in TestForkRefuseCall ([filecoin-project/lotus#6758](https://github.com/filecoin-project/lotus/pull/6758)) - test: handle null blocks in TestForkRefuseCall ([filecoin-project/lotus#6758](https://github.com/filecoin-project/lotus/pull/6758))
- Add more deal details to lotus-miner info ([filecoin-project/lotus#6708](https://github.com/filecoin-project/lotus/pull/6708)) - Add more deal details to lotus-miner info ([filecoin-project/lotus#6708](https://github.com/filecoin-project/lotus/pull/6708))
- add election backtest ([filecoin-project/lotus#5950](https://github.com/filecoin-project/lotus/pull/5950)) - add election backtest ([filecoin-project/lotus#5950](https://github.com/filecoin-project/lotus/pull/5950))
- add dollar sign ([filecoin-project/lotus#6690](https://github.com/filecoin-project/lotus/pull/6690)) - add dollar sign ([filecoin-project/lotus#6690](https://github.com/filecoin-project/lotus/pull/6690))
- get-actor cli spelling fix ([filecoin-project/lotus#6681](https://github.com/filecoin-project/lotus/pull/6681)) - get-actor cli spelling fix ([filecoin-project/lotus#6681](https://github.com/filecoin-project/lotus/pull/6681))
- polish(statetree): accept a context in statetree diff for timeouts ([filecoin-project/lotus#6639](https://github.com/filecoin-project/lotus/pull/6639)) - polish(statetree): accept a context in statetree diff for timeouts ([filecoin-project/lotus#6639](https://github.com/filecoin-project/lotus/pull/6639))
- Add helptext to lotus chain export ([filecoin-project/lotus#6672](https://github.com/filecoin-project/lotus/pull/6672)) - Add helptext to lotus chain export ([filecoin-project/lotus#6672](https://github.com/filecoin-project/lotus/pull/6672))
- add an incremental nonce itest. ([filecoin-project/lotus#6663](https://github.com/filecoin-project/lotus/pull/6663)) - add an incremental nonce itest. ([filecoin-project/lotus#6663](https://github.com/filecoin-project/lotus/pull/6663))
- commit batch: Initialize the FailedSectors map ([filecoin-project/lotus#6647](https://github.com/filecoin-project/lotus/pull/6647)) - commit batch: Initialize the FailedSectors map ([filecoin-project/lotus#6647](https://github.com/filecoin-project/lotus/pull/6647))
- Fast-path retry submitting commit aggregate if commit is still valid ([filecoin-project/lotus#6638](https://github.com/filecoin-project/lotus/pull/6638)) - Fast-path retry submitting commit aggregate if commit is still valid ([filecoin-project/lotus#6638](https://github.com/filecoin-project/lotus/pull/6638))
- Reuse timers in sealing batch logic ([filecoin-project/lotus#6636](https://github.com/filecoin-project/lotus/pull/6636)) - Reuse timers in sealing batch logic ([filecoin-project/lotus#6636](https://github.com/filecoin-project/lotus/pull/6636))
## Dependency Updates ## Dependency Updates
- Update to proof v8.0.3 ([filecoin-project/lotus#6890](https://github.com/filecoin-project/lotus/pull/6890)) - Update to proof v8.0.3 ([filecoin-project/lotus#6890](https://github.com/filecoin-project/lotus/pull/6890))
- update to go-fil-market v1.6.0 ([filecoin-project/lotus#6885](https://github.com/filecoin-project/lotus/pull/6885)) - update to go-fil-market v1.6.0 ([filecoin-project/lotus#6885](https://github.com/filecoin-project/lotus/pull/6885))
- Bump go-multihash, adjust test for supported version ([filecoin-project/lotus#6674](https://github.com/filecoin-project/lotus/pull/6674)) - Bump go-multihash, adjust test for supported version ([filecoin-project/lotus#6674](https://github.com/filecoin-project/lotus/pull/6674))
- github.com/filecoin-project/go-data-transfer (v1.6.0 -> v1.7.2): - github.com/filecoin-project/go-data-transfer (v1.6.0 -> v1.7.2):
- github.com/filecoin-project/go-fil-markets (v1.5.0 -> v1.6.2): - github.com/filecoin-project/go-fil-markets (v1.5.0 -> v1.6.2):
- github.com/filecoin-project/go-padreader (v0.0.0-20200903213702-ed5fae088b20 -> v0.0.0-20210723183308-812a16dc01b1) - github.com/filecoin-project/go-padreader (v0.0.0-20200903213702-ed5fae088b20 -> v0.0.0-20210723183308-812a16dc01b1)
- github.com/filecoin-project/go-state-types (v0.1.1-0.20210506134452-99b279731c48 -> v0.1.1-0.20210810190654-139e0e79e69e) - github.com/filecoin-project/go-state-types (v0.1.1-0.20210506134452-99b279731c48 -> v0.1.1-0.20210810190654-139e0e79e69e)
- github.com/filecoin-project/go-statemachine (v0.0.0-20200925024713-05bd7c71fbfe -> v1.0.1) - github.com/filecoin-project/go-statemachine (v0.0.0-20200925024713-05bd7c71fbfe -> v1.0.1)
- update go-libp2p-pubsub to v0.5.0 ([filecoin-project/lotus#6764](https://github.com/filecoin-project/lotus/pull/6764)) - update go-libp2p-pubsub to v0.5.0 ([filecoin-project/lotus#6764](https://github.com/filecoin-project/lotus/pull/6764))
## Others ## Others
- Master->v1.11.1 ([filecoin-project/lotus#7051](https://github.com/filecoin-project/lotus/pull/7051)) - Master->v1.11.1 ([filecoin-project/lotus#7051](https://github.com/filecoin-project/lotus/pull/7051))
- v1.11.1-rc2 ([filecoin-project/lotus#6966](https://github.com/filecoin-project/lotus/pull/6966)) - v1.11.1-rc2 ([filecoin-project/lotus#6966](https://github.com/filecoin-project/lotus/pull/6966))
- Backport master -> v1.11.1 ([filecoin-project/lotus#6965](https://github.com/filecoin-project/lotus/pull/6965)) - Backport master -> v1.11.1 ([filecoin-project/lotus#6965](https://github.com/filecoin-project/lotus/pull/6965))
- Fixes in master -> release ([filecoin-project/lotus#6933](https://github.com/filecoin-project/lotus/pull/6933)) - Fixes in master -> release ([filecoin-project/lotus#6933](https://github.com/filecoin-project/lotus/pull/6933))
- Add changelog for v1.11.1-rc1 and bump the version ([filecoin-project/lotus#6900](https://github.com/filecoin-project/lotus/pull/6900)) - Add changelog for v1.11.1-rc1 and bump the version ([filecoin-project/lotus#6900](https://github.com/filecoin-project/lotus/pull/6900))
- Fix merge release -> v1.11.1 ([filecoin-project/lotus#6897](https://github.com/filecoin-project/lotus/pull/6897)) - Fix merge release -> v1.11.1 ([filecoin-project/lotus#6897](https://github.com/filecoin-project/lotus/pull/6897))
- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6880](https://github.com/filecoin-project/lotus/pull/6880)) - Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6880](https://github.com/filecoin-project/lotus/pull/6880))
- Add github actions for staled pr ([filecoin-project/lotus#6879](https://github.com/filecoin-project/lotus/pull/6879)) - Add github actions for staled pr ([filecoin-project/lotus#6879](https://github.com/filecoin-project/lotus/pull/6879))
- Update issue templates and add templates for M1 ([filecoin-project/lotus#6856](https://github.com/filecoin-project/lotus/pull/6856)) - Update issue templates and add templates for M1 ([filecoin-project/lotus#6856](https://github.com/filecoin-project/lotus/pull/6856))
- Fix links in issue templates - Fix links in issue templates
- Update issue templates to forms ([filecoin-project/lotus#6798](https://github.com/filecoin-project/lotus/pull/6798) - Update issue templates to forms ([filecoin-project/lotus#6798](https://github.com/filecoin-project/lotus/pull/6798)
- Nerpa v13 upgrade ([filecoin-project/lotus#6837](https://github.com/filecoin-project/lotus/pull/6837)) - Nerpa v13 upgrade ([filecoin-project/lotus#6837](https://github.com/filecoin-project/lotus/pull/6837))
- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) - add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544))
- release -> master ([filecoin-project/lotus#6828](https://github.com/filecoin-project/lotus/pull/6828)) - release -> master ([filecoin-project/lotus#6828](https://github.com/filecoin-project/lotus/pull/6828))
- Resurrect CODEOWNERS, but for maintainers group ([filecoin-project/lotus#6773](https://github.com/filecoin-project/lotus/pull/6773)) - Resurrect CODEOWNERS, but for maintainers group ([filecoin-project/lotus#6773](https://github.com/filecoin-project/lotus/pull/6773))
- Master disclaimer ([filecoin-project/lotus#6757](https://github.com/filecoin-project/lotus/pull/6757)) - Master disclaimer ([filecoin-project/lotus#6757](https://github.com/filecoin-project/lotus/pull/6757))
- Create stale.yml ([filecoin-project/lotus#6747](https://github.com/filecoin-project/lotus/pull/6747)) - Create stale.yml ([filecoin-project/lotus#6747](https://github.com/filecoin-project/lotus/pull/6747))
- Release template: Update all testnet infra at once ([filecoin-project/lotus#6710](https://github.com/filecoin-project/lotus/pull/6710)) - Release template: Update all testnet infra at once ([filecoin-project/lotus#6710](https://github.com/filecoin-project/lotus/pull/6710))
- Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709)) - Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709))
- Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689)) - Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689))
- Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621)) - Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621))
## Contributors ## Contributors
@ -1289,10 +1413,10 @@ FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.m
**Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.** **Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.**
Note: Note:
- We recommend to keep `PreCommitSectorsBatch` as 1. - We recommend to keep `PreCommitSectorsBatch` as 1.
- We recommend miners to set `PreCommitBatchWait` lower than 30 hours. - We recommend miners to set `PreCommitBatchWait` lower than 30 hours.
- We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures - We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures
due to expirations. due to expirations.
### Projected state tree growth ### Projected state tree growth
@ -1303,9 +1427,9 @@ Given these assumptions:
- We'd expect a network storage growth rate of around 530PiB per day. 😳 🎉 🥳 😅 - We'd expect a network storage growth rate of around 530PiB per day. 😳 🎉 🥳 😅
- We'd expect network bandwidth dedicated to `SubmitWindowedPoSt` to grow by about 0.02% per day. - We'd expect network bandwidth dedicated to `SubmitWindowedPoSt` to grow by about 0.02% per day.
- We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://docs.filecoin.io/get-started/lotus/chain/#lightweight-snapshot)) size to grow by 1.16GiB per day. - We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://docs.filecoin.io/get-started/lotus/chain/#lightweight-snapshot)) size to grow by 1.16GiB per day.
- Nearly all of the state-tree growth is expected to come from new sector metadata. - Nearly all of the state-tree growth is expected to come from new sector metadata.
- We'd expect the daily lotus datastore growth rate to increase by about 10-15% (from current ~21GiB/day). - We'd expect the daily lotus datastore growth rate to increase by about 10-15% (from current ~21GiB/day).
- Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree. - Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree.
### Future improvements ### Future improvements

View File

@ -28,6 +28,14 @@ WORKDIR /opt/filecoin
RUN make clean deps RUN make clean deps
FROM builder-local AS builder-test
MAINTAINER Lotus Development Team
WORKDIR /opt/filecoin
RUN make debug
FROM builder-local AS builder FROM builder-local AS builder
MAINTAINER Lotus Development Team MAINTAINER Lotus Development Team
@ -197,6 +205,43 @@ RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner
VOLUME /var/lib/lotus-worker
VOLUME /var/lib/lotus-wallet
EXPOSE 1234
EXPOSE 2345
EXPOSE 3456
EXPOSE 1777
###
from base as lotus-test
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV WALLET_PATH /var/lib/lotus-wallet
COPY --from=builder-test /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder-test /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=builder-test /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=builder-test /opt/filecoin/lotus-seed /usr/local/bin/
RUN mkdir /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/tmp/filecoin-proof-parameters VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner VOLUME /var/lib/lotus-miner

View File

@ -345,6 +345,8 @@ gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli" @echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
.PHONY: gen .PHONY: gen
jen: gen
snap: lotus lotus-miner lotus-worker snap: lotus lotus-miner lotus-worker
snapcraft snapcraft
# snapcraft upload ./lotus_*.snap # snapcraft upload ./lotus_*.snap

View File

@ -689,7 +689,17 @@ type FullNode interface {
// MethodGroup: Paych // MethodGroup: Paych
// The Paych methods are for interacting with and managing payment channels // The Paych methods are for interacting with and managing payment channels
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign // PaychGet gets or creates a payment channel between address pair
// The specified amount will be reserved for use. If there aren't enough non-reserved funds
// available, funds will be added through an on-chain message.
// - When opts.OffChain is true, this call will not cause any messages to be sent to the chain (no automatic
// channel creation/funds adding). If the operation can't be performed without sending a message an error will be
// returned. Note that even when this option is specified, this call can be blocked by previous operations on the
// channel waiting for on-chain operations.
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt, opts PaychGetOpts) (*ChannelInfo, error) //perm:sign
// PaychFund gets or creates a payment channel between address pair.
// The specified amount will be added to the channel through on-chain send for future use
PaychFund(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign
@ -828,6 +838,10 @@ const (
PCHOutbound PCHOutbound
) )
type PaychGetOpts struct {
OffChain bool
}
type PaychStatus struct { type PaychStatus struct {
ControlAddr address.Address ControlAddr address.Address
Direction PCHDir Direction PCHDir
@ -845,16 +859,23 @@ type ChannelAvailableFunds struct {
From address.Address From address.Address
// To is the to address of the channel // To is the to address of the channel
To address.Address To address.Address
// ConfirmedAmt is the amount of funds that have been confirmed on-chain
// for the channel // ConfirmedAmt is the total amount of funds that have been confirmed on-chain for the channel
ConfirmedAmt types.BigInt ConfirmedAmt types.BigInt
// PendingAmt is the amount of funds that are pending confirmation on-chain // PendingAmt is the amount of funds that are pending confirmation on-chain
PendingAmt types.BigInt PendingAmt types.BigInt
// NonReservedAmt is part of ConfirmedAmt that is available for use (e.g. when the payment channel was pre-funded)
NonReservedAmt types.BigInt
// PendingAvailableAmt is the amount of funds that are pending confirmation on-chain that will become available once confirmed
PendingAvailableAmt types.BigInt
// PendingWaitSentinel can be used with PaychGetWaitReady to wait for // PendingWaitSentinel can be used with PaychGetWaitReady to wait for
// confirmation of pending funds // confirmation of pending funds
PendingWaitSentinel *cid.Cid PendingWaitSentinel *cid.Cid
// QueuedAmt is the amount that is queued up behind a pending request // QueuedAmt is the amount that is queued up behind a pending request
QueuedAmt types.BigInt QueuedAmt types.BigInt
// VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain // VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain
// and in the local datastore // and in the local datastore
VoucherReedeemedAmt types.BigInt VoucherReedeemedAmt types.BigInt

View File

@ -45,8 +45,9 @@ type Gateway interface {
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)

View File

@ -51,6 +51,15 @@ type Net interface {
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
NetProtectAdd(ctx context.Context, acl []peer.ID) error //perm:admin
NetProtectRemove(ctx context.Context, acl []peer.ID) error //perm:admin
NetProtectList(ctx context.Context) ([]peer.ID, error) //perm:read
// ResourceManager API
NetStat(ctx context.Context, scope string) (NetStat, error) //perm:read
NetLimit(ctx context.Context, scope string) (NetLimit, error) //perm:read
NetSetLimit(ctx context.Context, scope string, limit NetLimit) error //perm:admin
// ID returns peerID of libp2p node backing this API // ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) //perm:read ID(context.Context) (peer.ID, error) //perm:read
} }

View File

@ -222,6 +222,16 @@ type StorageMiner interface {
// DagstoreGC runs garbage collection on the DAG store. // DagstoreGC runs garbage collection on the DAG store.
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
// IndexerAnnounceDeal informs indexer nodes that a new deal was received,
// so they can download its index
IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin
// IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals.
IndexerAnnounceAllDeals(ctx context.Context) error //perm:admin
// DagstoreLookupPieces returns information about shards that contain the given CID.
DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]DagstoreShardInfo, error) //perm:admin
// RuntimeSubsystems returns the subsystems that are enabled // RuntimeSubsystems returns the subsystems that are enabled
// in this instance. // in this instance.
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read

View File

@ -122,7 +122,7 @@ func init() {
addExample(api.FullAPIVersion1) addExample(api.FullAPIVersion1)
addExample(api.PCHInbound) addExample(api.PCHInbound)
addExample(time.Minute) addExample(time.Minute)
addExample(graphsync.RequestID(4)) addExample(graphsync.NewRequestID())
addExample(datatransfer.TransferID(3)) addExample(datatransfer.TransferID(3))
addExample(datatransfer.Ongoing) addExample(datatransfer.Ongoing)
addExample(storeIDExample) addExample(storeIDExample)
@ -300,6 +300,34 @@ func init() {
Error: "<error>", Error: "<error>",
}) })
addExample(storiface.ResourceTable) addExample(storiface.ResourceTable)
addExample(network.ScopeStat{
Memory: 123,
NumStreamsInbound: 1,
NumStreamsOutbound: 2,
NumConnsInbound: 3,
NumConnsOutbound: 4,
NumFD: 5,
})
addExample(map[string]network.ScopeStat{
"abc": {
Memory: 123,
NumStreamsInbound: 1,
NumStreamsOutbound: 2,
NumConnsInbound: 3,
NumConnsOutbound: 4,
NumFD: 5,
}})
addExample(api.NetLimit{
Memory: 123,
StreamsInbound: 1,
StreamsOutbound: 2,
Streams: 3,
ConnsInbound: 3,
ConnsOutbound: 4,
Conns: 4,
FD: 5,
})
} }
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {

View File

@ -1811,6 +1811,21 @@ func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1)
} }
// NetLimit mocks base method.
func (m *MockFullNode) NetLimit(arg0 context.Context, arg1 string) (api.NetLimit, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetLimit", arg0, arg1)
ret0, _ := ret[0].(api.NetLimit)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetLimit indicates an expected call of NetLimit.
func (mr *MockFullNodeMockRecorder) NetLimit(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetLimit", reflect.TypeOf((*MockFullNode)(nil).NetLimit), arg0, arg1)
}
// NetPeerInfo mocks base method. // NetPeerInfo mocks base method.
func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -1841,6 +1856,49 @@ func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
} }
// NetProtectAdd mocks base method.
func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetProtectAdd", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// NetProtectAdd indicates an expected call of NetProtectAdd.
func (mr *MockFullNodeMockRecorder) NetProtectAdd(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectAdd", reflect.TypeOf((*MockFullNode)(nil).NetProtectAdd), arg0, arg1)
}
// NetProtectList mocks base method.
func (m *MockFullNode) NetProtectList(arg0 context.Context) ([]peer.ID, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetProtectList", arg0)
ret0, _ := ret[0].([]peer.ID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetProtectList indicates an expected call of NetProtectList.
func (mr *MockFullNodeMockRecorder) NetProtectList(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectList", reflect.TypeOf((*MockFullNode)(nil).NetProtectList), arg0)
}
// NetProtectRemove mocks base method.
func (m *MockFullNode) NetProtectRemove(arg0 context.Context, arg1 []peer.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetProtectRemove", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// NetProtectRemove indicates an expected call of NetProtectRemove.
func (mr *MockFullNodeMockRecorder) NetProtectRemove(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectRemove", reflect.TypeOf((*MockFullNode)(nil).NetProtectRemove), arg0, arg1)
}
// NetPubsubScores mocks base method. // NetPubsubScores mocks base method.
func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) { func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -1856,6 +1914,35 @@ func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Ca
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0)
} }
// NetSetLimit mocks base method.
func (m *MockFullNode) NetSetLimit(arg0 context.Context, arg1 string, arg2 api.NetLimit) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetSetLimit", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// NetSetLimit indicates an expected call of NetSetLimit.
func (mr *MockFullNodeMockRecorder) NetSetLimit(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetSetLimit", reflect.TypeOf((*MockFullNode)(nil).NetSetLimit), arg0, arg1, arg2)
}
// NetStat mocks base method.
func (m *MockFullNode) NetStat(arg0 context.Context, arg1 string) (api.NetStat, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetStat", arg0, arg1)
ret0, _ := ret[0].(api.NetStat)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetStat indicates an expected call of NetStat.
func (mr *MockFullNodeMockRecorder) NetStat(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetStat", reflect.TypeOf((*MockFullNode)(nil).NetStat), arg0, arg1)
}
// NodeStatus mocks base method. // NodeStatus mocks base method.
func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) { func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -1931,19 +2018,34 @@ func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1)
} }
// PaychGet mocks base method. // PaychFund mocks base method.
func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) { func (m *MockFullNode) PaychFund(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "PaychFund", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*api.ChannelInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PaychFund indicates an expected call of PaychFund.
func (mr *MockFullNodeMockRecorder) PaychFund(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychFund", reflect.TypeOf((*MockFullNode)(nil).PaychFund), arg0, arg1, arg2, arg3)
}
// PaychGet mocks base method.
func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 api.PaychGetOpts) (*api.ChannelInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*api.ChannelInfo) ret0, _ := ret[0].(*api.ChannelInfo)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// PaychGet indicates an expected call of PaychGet. // PaychGet indicates an expected call of PaychGet.
func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3, arg4)
} }
// PaychGetWaitReady mocks base method. // PaychGetWaitReady mocks base method.

View File

@ -306,7 +306,9 @@ type FullNodeStruct struct {
PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"` PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) `perm:"sign"` PaychFund func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) `perm:"sign"`
PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 PaychGetOpts) (*ChannelInfo, error) `perm:"sign"`
PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"` PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"`
@ -516,6 +518,8 @@ type GatewayStruct struct {
MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `` MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) ``
MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) ``
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) `` StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) ``
@ -587,11 +591,23 @@ type NetStruct struct {
NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"` NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"`
NetLimit func(p0 context.Context, p1 string) (NetLimit, error) `perm:"read"`
NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"` NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"`
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"` NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
NetProtectAdd func(p0 context.Context, p1 []peer.ID) error `perm:"admin"`
NetProtectList func(p0 context.Context) ([]peer.ID, error) `perm:"read"`
NetProtectRemove func(p0 context.Context, p1 []peer.ID) error `perm:"admin"`
NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"` NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
NetSetLimit func(p0 context.Context, p1 string, p2 NetLimit) error `perm:"admin"`
NetStat func(p0 context.Context, p1 string) (NetStat, error) `perm:"read"`
} }
} }
@ -633,6 +649,8 @@ type StorageMinerStruct struct {
DagstoreListShards func(p0 context.Context) ([]DagstoreShardInfo, error) `perm:"read"` DagstoreListShards func(p0 context.Context) ([]DagstoreShardInfo, error) `perm:"read"`
DagstoreLookupPieces func(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) `perm:"admin"`
DagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"write"` DagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"write"`
DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
@ -667,6 +685,10 @@ type StorageMinerStruct struct {
DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"` DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"`
IndexerAnnounceAllDeals func(p0 context.Context) error `perm:"admin"`
IndexerAnnounceDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
MarketDataTransferDiagnostics func(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) `perm:"write"` MarketDataTransferDiagnostics func(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) `perm:"write"`
@ -2179,14 +2201,25 @@ func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid
return *new(cid.Cid), ErrNotSupported return *new(cid.Cid), ErrNotSupported
} }
func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) { func (s *FullNodeStruct) PaychFund(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
if s.Internal.PaychFund == nil {
return nil, ErrNotSupported
}
return s.Internal.PaychFund(p0, p1, p2, p3)
}
func (s *FullNodeStub) PaychFund(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 PaychGetOpts) (*ChannelInfo, error) {
if s.Internal.PaychGet == nil { if s.Internal.PaychGet == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
return s.Internal.PaychGet(p0, p1, p2, p3) return s.Internal.PaychGet(p0, p1, p2, p3, p4)
} }
func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) { func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 PaychGetOpts) (*ChannelInfo, error) {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
@ -3279,6 +3312,17 @@ func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 t
return *new(types.BigInt), ErrNotSupported return *new(types.BigInt), ErrNotSupported
} }
func (s *GatewayStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
if s.Internal.MsigGetVestingSchedule == nil {
return *new(MsigVesting), ErrNotSupported
}
return s.Internal.MsigGetVestingSchedule(p0, p1, p2)
}
func (s *GatewayStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
return *new(MsigVesting), ErrNotSupported
}
func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
if s.Internal.StateAccountKey == nil { if s.Internal.StateAccountKey == nil {
return *new(address.Address), ErrNotSupported return *new(address.Address), ErrNotSupported
@ -3631,6 +3675,17 @@ func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, er
return *new(peer.AddrInfo), ErrNotSupported return *new(peer.AddrInfo), ErrNotSupported
} }
func (s *NetStruct) NetLimit(p0 context.Context, p1 string) (NetLimit, error) {
if s.Internal.NetLimit == nil {
return *new(NetLimit), ErrNotSupported
}
return s.Internal.NetLimit(p0, p1)
}
func (s *NetStub) NetLimit(p0 context.Context, p1 string) (NetLimit, error) {
return *new(NetLimit), ErrNotSupported
}
func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
if s.Internal.NetPeerInfo == nil { if s.Internal.NetPeerInfo == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -3653,6 +3708,39 @@ func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return *new([]peer.AddrInfo), ErrNotSupported return *new([]peer.AddrInfo), ErrNotSupported
} }
func (s *NetStruct) NetProtectAdd(p0 context.Context, p1 []peer.ID) error {
if s.Internal.NetProtectAdd == nil {
return ErrNotSupported
}
return s.Internal.NetProtectAdd(p0, p1)
}
func (s *NetStub) NetProtectAdd(p0 context.Context, p1 []peer.ID) error {
return ErrNotSupported
}
func (s *NetStruct) NetProtectList(p0 context.Context) ([]peer.ID, error) {
if s.Internal.NetProtectList == nil {
return *new([]peer.ID), ErrNotSupported
}
return s.Internal.NetProtectList(p0)
}
func (s *NetStub) NetProtectList(p0 context.Context) ([]peer.ID, error) {
return *new([]peer.ID), ErrNotSupported
}
func (s *NetStruct) NetProtectRemove(p0 context.Context, p1 []peer.ID) error {
if s.Internal.NetProtectRemove == nil {
return ErrNotSupported
}
return s.Internal.NetProtectRemove(p0, p1)
}
func (s *NetStub) NetProtectRemove(p0 context.Context, p1 []peer.ID) error {
return ErrNotSupported
}
func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
if s.Internal.NetPubsubScores == nil { if s.Internal.NetPubsubScores == nil {
return *new([]PubsubScore), ErrNotSupported return *new([]PubsubScore), ErrNotSupported
@ -3664,6 +3752,28 @@ func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return *new([]PubsubScore), ErrNotSupported return *new([]PubsubScore), ErrNotSupported
} }
func (s *NetStruct) NetSetLimit(p0 context.Context, p1 string, p2 NetLimit) error {
if s.Internal.NetSetLimit == nil {
return ErrNotSupported
}
return s.Internal.NetSetLimit(p0, p1, p2)
}
func (s *NetStub) NetSetLimit(p0 context.Context, p1 string, p2 NetLimit) error {
return ErrNotSupported
}
func (s *NetStruct) NetStat(p0 context.Context, p1 string) (NetStat, error) {
if s.Internal.NetStat == nil {
return *new(NetStat), ErrNotSupported
}
return s.Internal.NetStat(p0, p1)
}
func (s *NetStub) NetStat(p0 context.Context, p1 string) (NetStat, error) {
return *new(NetStat), ErrNotSupported
}
func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error { func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error {
if s.Internal.Sign == nil { if s.Internal.Sign == nil {
return ErrNotSupported return ErrNotSupported
@ -3785,6 +3895,17 @@ func (s *StorageMinerStub) DagstoreListShards(p0 context.Context) ([]DagstoreSha
return *new([]DagstoreShardInfo), ErrNotSupported return *new([]DagstoreShardInfo), ErrNotSupported
} }
func (s *StorageMinerStruct) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) {
if s.Internal.DagstoreLookupPieces == nil {
return *new([]DagstoreShardInfo), ErrNotSupported
}
return s.Internal.DagstoreLookupPieces(p0, p1)
}
func (s *StorageMinerStub) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) {
return *new([]DagstoreShardInfo), ErrNotSupported
}
func (s *StorageMinerStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error { func (s *StorageMinerStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error {
if s.Internal.DagstoreRecoverShard == nil { if s.Internal.DagstoreRecoverShard == nil {
return ErrNotSupported return ErrNotSupported
@ -3972,6 +4093,28 @@ func (s *StorageMinerStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []ci
return ErrNotSupported return ErrNotSupported
} }
func (s *StorageMinerStruct) IndexerAnnounceAllDeals(p0 context.Context) error {
if s.Internal.IndexerAnnounceAllDeals == nil {
return ErrNotSupported
}
return s.Internal.IndexerAnnounceAllDeals(p0)
}
func (s *StorageMinerStub) IndexerAnnounceAllDeals(p0 context.Context) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error {
if s.Internal.IndexerAnnounceDeal == nil {
return ErrNotSupported
}
return s.Internal.IndexerAnnounceDeal(p0, p1)
}
func (s *StorageMinerStub) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
if s.Internal.MarketCancelDataTransfer == nil { if s.Internal.MarketCancelDataTransfer == nil {
return ErrNotSupported return ErrNotSupported

View File

@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/libp2p/go-libp2p-core/network"
datatransfer "github.com/filecoin-project/go-data-transfer" datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
@ -57,7 +59,7 @@ type MessageSendSpec struct {
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync // GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
type GraphSyncDataTransfer struct { type GraphSyncDataTransfer struct {
// GraphSync request id for this transfer // GraphSync request id for this transfer
RequestID graphsync.RequestID RequestID *graphsync.RequestID
// Graphsync state for this transfer // Graphsync state for this transfer
RequestState string RequestState string
// If a channel ID is present, indicates whether this is the current graphsync request for this channel // If a channel ID is present, indicates whether this is the current graphsync request for this channel
@ -123,6 +125,28 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
return channel return channel
} }
type NetStat struct {
System *network.ScopeStat `json:",omitempty"`
Transient *network.ScopeStat `json:",omitempty"`
Services map[string]network.ScopeStat `json:",omitempty"`
Protocols map[string]network.ScopeStat `json:",omitempty"`
Peers map[string]network.ScopeStat `json:",omitempty"`
}
type NetLimit struct {
Dynamic bool `json:",omitempty"`
// set if Dynamic is false
Memory int64 `json:",omitempty"`
// set if Dynamic is true
MemoryFraction float64 `json:",omitempty"`
MinMemory int64 `json:",omitempty"`
MaxMemory int64 `json:",omitempty"`
Streams, StreamsInbound, StreamsOutbound int
Conns, ConnsInbound, ConnsOutbound int
FD int
}
type NetBlockList struct { type NetBlockList struct {
Peers []peer.ID Peers []peer.ID
IPAddrs []string IPAddrs []string

View File

@ -1724,6 +1724,21 @@ func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1)
} }
// NetLimit mocks base method.
func (m *MockFullNode) NetLimit(arg0 context.Context, arg1 string) (api.NetLimit, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetLimit", arg0, arg1)
ret0, _ := ret[0].(api.NetLimit)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetLimit indicates an expected call of NetLimit.
func (mr *MockFullNodeMockRecorder) NetLimit(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetLimit", reflect.TypeOf((*MockFullNode)(nil).NetLimit), arg0, arg1)
}
// NetPeerInfo mocks base method. // NetPeerInfo mocks base method.
func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -1754,6 +1769,49 @@ func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
} }
// NetProtectAdd mocks base method.
func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetProtectAdd", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// NetProtectAdd indicates an expected call of NetProtectAdd.
func (mr *MockFullNodeMockRecorder) NetProtectAdd(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectAdd", reflect.TypeOf((*MockFullNode)(nil).NetProtectAdd), arg0, arg1)
}
// NetProtectList mocks base method.
func (m *MockFullNode) NetProtectList(arg0 context.Context) ([]peer.ID, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetProtectList", arg0)
ret0, _ := ret[0].([]peer.ID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetProtectList indicates an expected call of NetProtectList.
func (mr *MockFullNodeMockRecorder) NetProtectList(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectList", reflect.TypeOf((*MockFullNode)(nil).NetProtectList), arg0)
}
// NetProtectRemove mocks base method.
func (m *MockFullNode) NetProtectRemove(arg0 context.Context, arg1 []peer.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetProtectRemove", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// NetProtectRemove indicates an expected call of NetProtectRemove.
func (mr *MockFullNodeMockRecorder) NetProtectRemove(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectRemove", reflect.TypeOf((*MockFullNode)(nil).NetProtectRemove), arg0, arg1)
}
// NetPubsubScores mocks base method. // NetPubsubScores mocks base method.
func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) { func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -1769,6 +1827,35 @@ func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Ca
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0)
} }
// NetSetLimit mocks base method.
func (m *MockFullNode) NetSetLimit(arg0 context.Context, arg1 string, arg2 api.NetLimit) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetSetLimit", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// NetSetLimit indicates an expected call of NetSetLimit.
func (mr *MockFullNodeMockRecorder) NetSetLimit(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetSetLimit", reflect.TypeOf((*MockFullNode)(nil).NetSetLimit), arg0, arg1, arg2)
}
// NetStat mocks base method.
func (m *MockFullNode) NetStat(arg0 context.Context, arg1 string) (api.NetStat, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetStat", arg0, arg1)
ret0, _ := ret[0].(api.NetStat)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetStat indicates an expected call of NetStat.
func (mr *MockFullNodeMockRecorder) NetStat(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetStat", reflect.TypeOf((*MockFullNode)(nil).NetStat), arg0, arg1)
}
// PaychAllocateLane mocks base method. // PaychAllocateLane mocks base method.
func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -337,4 +337,8 @@ func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder
finish(w.ClientExport(ctx, eref, *ref)) finish(w.ClientExport(ctx, eref, *ref))
} }
func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
return w.FullNode.PaychFund(ctx, from, to, amt)
}
var _ FullNode = &WrapperV1Full{} var _ FullNode = &WrapperV1Full{}

View File

@ -57,8 +57,8 @@ var (
FullAPIVersion0 = newVer(1, 5, 0) FullAPIVersion0 = newVer(1, 5, 0)
FullAPIVersion1 = newVer(2, 2, 0) FullAPIVersion1 = newVer(2, 2, 0)
MinerAPIVersion0 = newVer(1, 4, 0) MinerAPIVersion0 = newVer(1, 5, 0)
WorkerAPIVersion0 = newVer(1, 5, 0) WorkerAPIVersion0 = newVer(1, 6, 0)
) )
//nolint:varcheck,deadcode //nolint:varcheck,deadcode

21
blockstore/context.go Normal file
View File

@ -0,0 +1,21 @@
package blockstore
import (
"context"
)
type hotViewKey struct{}
var hotView = hotViewKey{}
// WithHotView constructs a new context with an option that provides a hint to the blockstore
// (e.g. the splitstore) that the object (and its ipld references) should be kept hot.
func WithHotView(ctx context.Context) context.Context {
return context.WithValue(ctx, hotView, struct{}{})
}
// IsHotView returns true if the hot view option is set in the context
func IsHotView(ctx context.Context) bool {
v := ctx.Value(hotView)
return v != nil
}

View File

@ -161,6 +161,13 @@ type SplitStore struct {
txnSyncCond sync.Cond txnSyncCond sync.Cond
txnSync bool txnSync bool
// background cold object reification
reifyWorkers sync.WaitGroup
reifyMx sync.Mutex
reifyCond sync.Cond
reifyPend map[cid.Cid]struct{}
reifyInProgress map[cid.Cid]struct{}
// registered protectors // registered protectors
protectors []func(func(cid.Cid) error) error protectors []func(func(cid.Cid) error) error
} }
@ -202,6 +209,10 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
ss.txnSyncCond.L = &ss.txnSyncMx ss.txnSyncCond.L = &ss.txnSyncMx
ss.ctx, ss.cancel = context.WithCancel(context.Background()) ss.ctx, ss.cancel = context.WithCancel(context.Background())
ss.reifyCond.L = &ss.reifyMx
ss.reifyPend = make(map[cid.Cid]struct{})
ss.reifyInProgress = make(map[cid.Cid]struct{})
if enableDebugLog { if enableDebugLog {
ss.debug, err = openDebugLog(path) ss.debug, err = openDebugLog(path)
if err != nil { if err != nil {
@ -264,7 +275,13 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
return true, nil return true, nil
} }
return s.cold.Has(ctx, cid) has, err = s.cold.Has(ctx, cid)
if has && bstore.IsHotView(ctx) {
s.reifyColdObject(cid)
}
return has, err
} }
func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
@ -308,8 +325,11 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
blk, err = s.cold.Get(ctx, cid) blk, err = s.cold.Get(ctx, cid)
if err == nil { if err == nil {
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) if bstore.IsHotView(ctx) {
s.reifyColdObject(cid)
}
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
} }
return blk, err return blk, err
@ -359,6 +379,10 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
size, err = s.cold.GetSize(ctx, cid) size, err = s.cold.GetSize(ctx, cid)
if err == nil { if err == nil {
if bstore.IsHotView(ctx) {
s.reifyColdObject(cid)
}
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
} }
return size, err return size, err
@ -536,6 +560,10 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
err = s.cold.View(ctx, cid, cb) err = s.cold.View(ctx, cid, cb)
if err == nil { if err == nil {
if bstore.IsHotView(ctx) {
s.reifyColdObject(cid)
}
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
} }
return err return err
@ -645,6 +673,9 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
} }
} }
// spawn the reifier
go s.reifyOrchestrator()
// watch the chain // watch the chain
chain.SubscribeHeadChanges(s.HeadChange) chain.SubscribeHeadChanges(s.HeadChange)
@ -676,6 +707,8 @@ func (s *SplitStore) Close() error {
} }
} }
s.reifyCond.Broadcast()
s.reifyWorkers.Wait()
s.cancel() s.cancel()
return multierr.Combine(s.markSetEnv.Close(), s.debug.Close()) return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
} }

View File

@ -0,0 +1,214 @@
package splitstore
import (
"errors"
"runtime"
"sync/atomic"
"golang.org/x/xerrors"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
)
var (
errReifyLimit = errors.New("reification limit reached")
ReifyLimit = 16384
)
func (s *SplitStore) reifyColdObject(c cid.Cid) {
if !s.isWarm() {
return
}
if isUnitaryObject(c) {
return
}
s.reifyMx.Lock()
defer s.reifyMx.Unlock()
_, ok := s.reifyInProgress[c]
if ok {
return
}
s.reifyPend[c] = struct{}{}
s.reifyCond.Broadcast()
}
func (s *SplitStore) reifyOrchestrator() {
workers := runtime.NumCPU() / 4
if workers < 2 {
workers = 2
}
workch := make(chan cid.Cid, workers)
defer close(workch)
for i := 0; i < workers; i++ {
s.reifyWorkers.Add(1)
go s.reifyWorker(workch)
}
for {
s.reifyMx.Lock()
for len(s.reifyPend) == 0 && atomic.LoadInt32(&s.closing) == 0 {
s.reifyCond.Wait()
}
if atomic.LoadInt32(&s.closing) != 0 {
s.reifyMx.Unlock()
return
}
reifyPend := s.reifyPend
s.reifyPend = make(map[cid.Cid]struct{})
s.reifyMx.Unlock()
for c := range reifyPend {
select {
case workch <- c:
case <-s.ctx.Done():
return
}
}
}
}
func (s *SplitStore) reifyWorker(workch chan cid.Cid) {
defer s.reifyWorkers.Done()
for c := range workch {
s.doReify(c)
}
}
func (s *SplitStore) doReify(c cid.Cid) {
var toreify, totrack, toforget []cid.Cid
defer func() {
s.reifyMx.Lock()
defer s.reifyMx.Unlock()
for _, c := range toreify {
delete(s.reifyInProgress, c)
}
for _, c := range totrack {
delete(s.reifyInProgress, c)
}
for _, c := range toforget {
delete(s.reifyInProgress, c)
}
}()
s.txnLk.RLock()
defer s.txnLk.RUnlock()
count := 0
err := s.walkObjectIncomplete(c, newTmpVisitor(),
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk
}
count++
if count > ReifyLimit {
return errReifyLimit
}
s.reifyMx.Lock()
_, inProgress := s.reifyInProgress[c]
if !inProgress {
s.reifyInProgress[c] = struct{}{}
}
s.reifyMx.Unlock()
if inProgress {
return errStopWalk
}
has, err := s.hot.Has(s.ctx, c)
if err != nil {
return xerrors.Errorf("error checking hotstore: %w", err)
}
if has {
if s.txnMarkSet != nil {
hasMark, err := s.txnMarkSet.Has(c)
if err != nil {
log.Warnf("error checking markset: %s", err)
} else if hasMark {
toforget = append(toforget, c)
return errStopWalk
}
} else {
totrack = append(totrack, c)
return errStopWalk
}
}
toreify = append(toreify, c)
return nil
},
func(missing cid.Cid) error {
log.Warnf("missing reference while reifying %s: %s", c, missing)
return errStopWalk
})
if err != nil {
if xerrors.Is(err, errReifyLimit) {
log.Debug("reification aborted; reify limit reached")
return
}
log.Warnf("error walking cold object for reification (cid: %s): %s", c, err)
return
}
log.Debugf("reifying %d objects rooted at %s", len(toreify), c)
// this should not get too big, maybe some 100s of objects.
batch := make([]blocks.Block, 0, len(toreify))
for _, c := range toreify {
blk, err := s.cold.Get(s.ctx, c)
if err != nil {
log.Warnf("error retrieving cold object for reification (cid: %s): %s", c, err)
continue
}
if err := s.checkClosing(); err != nil {
return
}
batch = append(batch, blk)
}
if len(batch) > 0 {
err = s.hot.PutMany(s.ctx, batch)
if err != nil {
log.Warnf("error reifying cold object (cid: %s): %s", c, err)
return
}
}
if s.txnMarkSet != nil {
if len(toreify) > 0 {
if err := s.txnMarkSet.MarkMany(toreify); err != nil {
log.Warnf("error marking reified objects: %s", err)
}
}
if len(totrack) > 0 {
if err := s.txnMarkSet.MarkMany(totrack); err != nil {
log.Warnf("error marking tracked objects: %s", err)
}
}
} else {
// if txnActive is false these are noops
if len(toreify) > 0 {
s.trackTxnRefMany(toreify)
}
if len(totrack) > 0 {
s.trackTxnRefMany(totrack)
}
}
}

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/rand"
"os" "os"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -387,6 +388,235 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
} }
} }
func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) {
ds := dssync.MutexWrap(datastore.NewMapDatastore())
hot := newMockStore()
cold := newMockStore()
mkRandomBlock := func() blocks.Block {
data := make([]byte, 128)
_, err := rand.Read(data)
if err != nil {
t.Fatal(err)
}
return blocks.NewBlock(data)
}
block1 := mkRandomBlock()
block2 := mkRandomBlock()
block3 := mkRandomBlock()
hdr := mock.MkBlock(nil, 0, 0)
hdr.Messages = block1.Cid()
hdr.ParentMessageReceipts = block2.Cid()
hdr.ParentStateRoot = block3.Cid()
block4, err := hdr.ToStorageBlock()
if err != nil {
t.Fatal(err)
}
allBlocks := []blocks.Block{block1, block2, block3, block4}
for _, blk := range allBlocks {
err := cold.Put(context.Background(), blk)
if err != nil {
t.Fatal(err)
}
}
path, err := ioutil.TempDir("", "splitstore.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(path)
})
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
if err != nil {
t.Fatal(err)
}
defer ss.Close() //nolint
ss.warmupEpoch = 1
go ss.reifyOrchestrator()
waitForReification := func() {
for {
ss.reifyMx.Lock()
ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0
ss.reifyMx.Unlock()
if ready {
return
}
time.Sleep(time.Millisecond)
}
}
// first access using the standard view
err = f(context.Background(), ss, block4.Cid())
if err != nil {
t.Fatal(err)
}
// nothing should be reified
waitForReification()
for _, blk := range allBlocks {
has, err := hot.Has(context.Background(), blk.Cid())
if err != nil {
t.Fatal(err)
}
if has {
t.Fatal("block unexpectedly reified")
}
}
// now make the hot/reifying view and ensure access reifies
err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid())
if err != nil {
t.Fatal(err)
}
// everything should be reified
waitForReification()
for i, blk := range allBlocks {
has, err := hot.Has(context.Background(), blk.Cid())
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatalf("block%d was not reified", i+1)
}
}
}
func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) {
ds := dssync.MutexWrap(datastore.NewMapDatastore())
hot := newMockStore()
cold := newMockStore()
mkRandomBlock := func() blocks.Block {
data := make([]byte, 128)
_, err := rand.Read(data)
if err != nil {
t.Fatal(err)
}
return blocks.NewBlock(data)
}
block1 := mkRandomBlock()
block2 := mkRandomBlock()
block3 := mkRandomBlock()
hdr := mock.MkBlock(nil, 0, 0)
hdr.Messages = block1.Cid()
hdr.ParentMessageReceipts = block2.Cid()
hdr.ParentStateRoot = block3.Cid()
block4, err := hdr.ToStorageBlock()
if err != nil {
t.Fatal(err)
}
allBlocks := []blocks.Block{block1, block2, block3, block4}
for _, blk := range allBlocks {
err := cold.Put(context.Background(), blk)
if err != nil {
t.Fatal(err)
}
}
path, err := ioutil.TempDir("", "splitstore.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(path)
})
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
if err != nil {
t.Fatal(err)
}
defer ss.Close() //nolint
ss.warmupEpoch = 1
go ss.reifyOrchestrator()
waitForReification := func() {
for {
ss.reifyMx.Lock()
ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0
ss.reifyMx.Unlock()
if ready {
return
}
time.Sleep(time.Millisecond)
}
}
// do a hot access -- nothing should be reified as the limit should be exceeded
oldReifyLimit := ReifyLimit
ReifyLimit = 2
t.Cleanup(func() {
ReifyLimit = oldReifyLimit
})
err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid())
if err != nil {
t.Fatal(err)
}
waitForReification()
for _, blk := range allBlocks {
has, err := hot.Has(context.Background(), blk.Cid())
if err != nil {
t.Fatal(err)
}
if has {
t.Fatal("block unexpectedly reified")
}
}
}
func TestSplitStoreReification(t *testing.T) {
t.Log("test reification with Has")
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
_, err := s.Has(ctx, c)
return err
})
t.Log("test reification with Get")
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
_, err := s.Get(ctx, c)
return err
})
t.Log("test reification with GetSize")
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
_, err := s.GetSize(ctx, c)
return err
})
t.Log("test reification with View")
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
return s.View(ctx, c, func(_ []byte) error { return nil })
})
t.Log("test reification limit")
testSplitStoreReificationLimit(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
_, err := s.Has(ctx, c)
return err
})
}
type mockChain struct { type mockChain struct {
t testing.TB t testing.TB

View File

@ -26,6 +26,10 @@ type tmpVisitor struct {
var _ ObjectVisitor = (*tmpVisitor)(nil) var _ ObjectVisitor = (*tmpVisitor)(nil)
func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) { func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) {
if isUnitaryObject(c) {
return false, nil
}
return v.set.Visit(c), nil return v.set.Visit(c), nil
} }
@ -45,6 +49,10 @@ func newConcurrentVisitor() *concurrentVisitor {
} }
func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) { func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) {
if isUnitaryObject(c) {
return false, nil
}
v.mx.Lock() v.mx.Lock()
defer v.mx.Unlock() defer v.mx.Unlock()

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -13,6 +13,17 @@ import (
func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) } func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) }
func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) } func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) }
func IndexerIngestTopic(netName dtypes.NetworkName) string {
nn := string(netName)
// The network name testnetnet is here for historical reasons.
// Going forward we aim to use the name `mainnet` where possible.
if nn == "testnetnet" {
nn = "mainnet"
}
return "/indexer/ingest/" + nn
}
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
return protocol.ID("/fil/kad/" + string(netName)) return protocol.ID("/fil/kad/" + string(netName))
} }

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
} }
// BuildVersion is the local build version // BuildVersion is the local build version
const BuildVersion = "1.15.0" const BuildVersion = "1.15.1"
func UserVersion() string { func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -16,6 +16,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
) )
func init() { func init() {
@ -62,6 +63,11 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
} }
type RemoveDataCapProposal = verifreg{{.latestVersion}}.RemoveDataCapProposal
type RemoveDataCapRequest = verifreg{{.latestVersion}}.RemoveDataCapRequest
type RemoveDataCapParams = verifreg{{.latestVersion}}.RemoveDataCapParams
type RmDcProposalID = verifreg{{.latestVersion}}.RmDcProposalID
const SignatureDomainSeparation_RemoveDataCap = verifreg{{.latestVersion}}.SignatureDomainSeparation_RemoveDataCap
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
@ -69,6 +75,7 @@ type State interface {
RootKey() (address.Address, error) RootKey() (address.Address, error)
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
VerifierDataCap(address.Address) (bool, abi.StoragePower, error) VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error)
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
GetState() interface{} GetState() interface{}

View File

@ -61,6 +61,10 @@ func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePo
return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr) return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr)
} }
func (s *state{{.v}}) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version{{.v}}, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb) return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb)
} }
@ -77,6 +81,11 @@ func (s *state{{.v}}) verifiers() (adt.Map, error) {
return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
} }
func (s *state{{.v}}) removeDataCapProposalIDs() (adt.Map, error) {
{{if le .v 6}}return nil, nil
{{else}}return adt{{.v}}.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin{{.v}}.DefaultHamtBitwidth){{end}}
}
func (s *state{{.v}}) GetState() interface{} { func (s *state{{.v}}) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
"golang.org/x/xerrors" "golang.org/x/xerrors"
) )
@ -50,3 +51,28 @@ func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr
return cb(a, dcap) return cb(a, dcap)
}) })
} }
func getRemoveDataCapProposalID(store adt.Store, ver actors.Version, root rootFunc, verifier address.Address, client address.Address) (bool, uint64, error) {
if verifier.Protocol() != address.ID {
return false, 0, xerrors.Errorf("can only look up ID addresses")
}
if client.Protocol() != address.ID {
return false, 0, xerrors.Errorf("can only look up ID addresses")
}
vh, err := root()
if err != nil {
return false, 0, xerrors.Errorf("loading verifreg: %w", err)
}
if vh == nil {
return false, 0, xerrors.Errorf("remove data cap proposal hamt not found. you are probably using an incompatible version of actors")
}
var id verifreg.RmDcProposalID
if found, err := vh.Get(abi.NewAddrPairKey(verifier, client), &id); err != nil {
return false, 0, xerrors.Errorf("looking up addr pair: %w", err)
} else if !found {
return false, 0, nil
}
return true, id.ProposalID, nil
}

View File

@ -53,6 +53,10 @@ func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
return getDataCap(s.store, actors.Version0, s.verifiers, addr) return getDataCap(s.store, actors.Version0, s.verifiers, addr)
} }
func (s *state0) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version0, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version0, s.verifiers, cb) return forEachCap(s.store, actors.Version0, s.verifiers, cb)
} }
@ -69,6 +73,11 @@ func (s *state0) verifiers() (adt.Map, error) {
return adt0.AsMap(s.store, s.Verifiers) return adt0.AsMap(s.store, s.Verifiers)
} }
func (s *state0) removeDataCapProposalIDs() (adt.Map, error) {
return nil, nil
}
func (s *state0) GetState() interface{} { func (s *state0) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -53,6 +53,10 @@ func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
return getDataCap(s.store, actors.Version2, s.verifiers, addr) return getDataCap(s.store, actors.Version2, s.verifiers, addr)
} }
func (s *state2) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version2, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version2, s.verifiers, cb) return forEachCap(s.store, actors.Version2, s.verifiers, cb)
} }
@ -69,6 +73,11 @@ func (s *state2) verifiers() (adt.Map, error) {
return adt2.AsMap(s.store, s.Verifiers) return adt2.AsMap(s.store, s.Verifiers)
} }
func (s *state2) removeDataCapProposalIDs() (adt.Map, error) {
return nil, nil
}
func (s *state2) GetState() interface{} { func (s *state2) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -54,6 +54,10 @@ func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
return getDataCap(s.store, actors.Version3, s.verifiers, addr) return getDataCap(s.store, actors.Version3, s.verifiers, addr)
} }
func (s *state3) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version3, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version3, s.verifiers, cb) return forEachCap(s.store, actors.Version3, s.verifiers, cb)
} }
@ -70,6 +74,11 @@ func (s *state3) verifiers() (adt.Map, error) {
return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth) return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth)
} }
func (s *state3) removeDataCapProposalIDs() (adt.Map, error) {
return nil, nil
}
func (s *state3) GetState() interface{} { func (s *state3) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -54,6 +54,10 @@ func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
return getDataCap(s.store, actors.Version4, s.verifiers, addr) return getDataCap(s.store, actors.Version4, s.verifiers, addr)
} }
func (s *state4) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version4, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version4, s.verifiers, cb) return forEachCap(s.store, actors.Version4, s.verifiers, cb)
} }
@ -70,6 +74,11 @@ func (s *state4) verifiers() (adt.Map, error) {
return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth) return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth)
} }
func (s *state4) removeDataCapProposalIDs() (adt.Map, error) {
return nil, nil
}
func (s *state4) GetState() interface{} { func (s *state4) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -54,6 +54,10 @@ func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
return getDataCap(s.store, actors.Version5, s.verifiers, addr) return getDataCap(s.store, actors.Version5, s.verifiers, addr)
} }
func (s *state5) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version5, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version5, s.verifiers, cb) return forEachCap(s.store, actors.Version5, s.verifiers, cb)
} }
@ -70,6 +74,11 @@ func (s *state5) verifiers() (adt.Map, error) {
return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth) return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth)
} }
func (s *state5) removeDataCapProposalIDs() (adt.Map, error) {
return nil, nil
}
func (s *state5) GetState() interface{} { func (s *state5) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -54,6 +54,10 @@ func (s *state6) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
return getDataCap(s.store, actors.Version6, s.verifiers, addr) return getDataCap(s.store, actors.Version6, s.verifiers, addr)
} }
func (s *state6) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version6, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version6, s.verifiers, cb) return forEachCap(s.store, actors.Version6, s.verifiers, cb)
} }
@ -70,6 +74,11 @@ func (s *state6) verifiers() (adt.Map, error) {
return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth) return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth)
} }
func (s *state6) removeDataCapProposalIDs() (adt.Map, error) {
return nil, nil
}
func (s *state6) GetState() interface{} { func (s *state6) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -54,6 +54,10 @@ func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
return getDataCap(s.store, actors.Version7, s.verifiers, addr) return getDataCap(s.store, actors.Version7, s.verifiers, addr)
} }
func (s *state7) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
return getRemoveDataCapProposalID(s.store, actors.Version7, s.removeDataCapProposalIDs, verifier, client)
}
func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version7, s.verifiers, cb) return forEachCap(s.store, actors.Version7, s.verifiers, cb)
} }
@ -70,6 +74,10 @@ func (s *state7) verifiers() (adt.Map, error) {
return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth) return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth)
} }
func (s *state7) removeDataCapProposalIDs() (adt.Map, error) {
return adt7.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin7.DefaultHamtBitwidth)
}
func (s *state7) GetState() interface{} { func (s *state7) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -27,6 +27,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
) )
func init() { func init() {
@ -151,12 +152,20 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
} }
type RemoveDataCapProposal = verifreg7.RemoveDataCapProposal
type RemoveDataCapRequest = verifreg7.RemoveDataCapRequest
type RemoveDataCapParams = verifreg7.RemoveDataCapParams
type RmDcProposalID = verifreg7.RmDcProposalID
const SignatureDomainSeparation_RemoveDataCap = verifreg7.SignatureDomainSeparation_RemoveDataCap
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
RootKey() (address.Address, error) RootKey() (address.Address, error)
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
VerifierDataCap(address.Address) (bool, abi.StoragePower, error) VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error)
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
GetState() interface{} GetState() interface{}

View File

@ -2,6 +2,7 @@ package filcns
import ( import (
"context" "context"
"os"
"sync/atomic" "sync/atomic"
"github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/rand"
@ -32,6 +33,7 @@ import (
/* inline-gen end */ /* inline-gen end */
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
@ -92,7 +94,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
partDone() partDone()
}() }()
makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) { ctx = blockstore.WithHotView(ctx)
makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (vm.Interface, error) {
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: base, StateBase: base,
Epoch: e, Epoch: e,
@ -106,10 +109,23 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts), LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
} }
if os.Getenv("LOTUS_USE_FVM_EXPERIMENTAL") == "1" {
// This is needed so that the FVM does not have to duplicate the genesis vesting schedule, one
// of the components of the circ supply calc.
// This field is NOT needed by the LegacyVM, and also NOT needed by the FVM from v15 onwards.
filVested, err := sm.GetFilVested(ctx, e)
if err != nil {
return nil, err
}
vmopt.FilVested = filVested
return vm.NewFVM(ctx, vmopt)
}
return sm.VMConstructor()(ctx, vmopt) return sm.VMConstructor()(ctx, vmopt)
} }
runCron := func(vmCron *vm.VM, epoch abi.ChainEpoch) error { runCron := func(vmCron vm.Interface, epoch abi.ChainEpoch) error {
cronMsg := &types.Message{ cronMsg := &types.Message{
To: cron.Address, To: cron.Address,
From: builtin.SystemActorAddr, From: builtin.SystemActorAddr,

View File

@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
} }
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height) nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
pl := vm.PricelistByEpoch(baseTs.Height()) pl := vm.PricelistByEpoch(b.Header.Height)
var sumGasLimit int64 var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error { checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage() m := msg.VMMessage()

View File

@ -491,12 +491,13 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
Actors: filcns.NewActorRegistry(), Actors: filcns.NewActorRegistry(),
Syscalls: mkFakedSigSyscalls(sys), Syscalls: mkFakedSigSyscalls(sys),
CircSupplyCalc: csc, CircSupplyCalc: csc,
FilVested: big.Zero(),
NetworkVersion: nv, NetworkVersion: nv,
BaseFee: types.NewInt(0), BaseFee: big.Zero(),
} }
vm, err := vm.NewVM(ctx, &vmopt) vm, err := vm.NewLegacyVM(ctx, &vmopt)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) return cid.Undef, xerrors.Errorf("failed to create NewLegacyVM: %w", err)
} }
for mi, m := range template.Miners { for mi, m := range template.Miners {

View File

@ -95,12 +95,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
Syscalls: mkFakedSigSyscalls(sys), Syscalls: mkFakedSigSyscalls(sys),
CircSupplyCalc: csc, CircSupplyCalc: csc,
NetworkVersion: nv, NetworkVersion: nv,
BaseFee: types.NewInt(0), BaseFee: big.Zero(),
FilVested: big.Zero(),
} }
vm, err := vm.NewVM(ctx, vmopt) vm, err := vm.NewLegacyVM(ctx, vmopt)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) return cid.Undef, xerrors.Errorf("failed to create NewLegacyVM: %w", err)
} }
if len(miners) == 0 { if len(miners) == 0 {
@ -520,7 +521,7 @@ func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization cry
return out, nil return out, nil
} }
func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) { func currentTotalPower(ctx context.Context, vm *vm.LegacyVM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) {
pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin0.MethodsPower.CurrentTotalPower, nil) pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin0.MethodsPower.CurrentTotalPower, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -533,7 +534,7 @@ func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*
return &pwr, nil return &pwr, nil
} }
func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) { func dealWeight(ctx context.Context, vm *vm.LegacyVM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) {
// TODO: This hack should move to market actor wrapper // TODO: This hack should move to market actor wrapper
if av <= actors.Version2 { if av <= actors.Version2 {
params := &market0.VerifyDealsForActivationParams{ params := &market0.VerifyDealsForActivationParams{
@ -593,7 +594,7 @@ func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs [
return dealWeights.Sectors[0].DealWeight, dealWeights.Sectors[0].VerifiedDealWeight, nil return dealWeights.Sectors[0].DealWeight, dealWeights.Sectors[0].VerifiedDealWeight, nil
} }
func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) { func currentEpochBlockReward(ctx context.Context, vm *vm.LegacyVM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) {
rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil) rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil)
if err != nil { if err != nil {
return big.Zero(), builtin.FilterEstimate{}, err return big.Zero(), builtin.FilterEstimate{}, err
@ -628,7 +629,7 @@ func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Addre
return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil
} }
func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount { func circSupply(ctx context.Context, vmi *vm.LegacyVM, maddr address.Address) abi.TokenAmount {
unsafeVM := &vm.UnsafeVM{VM: vmi} unsafeVM := &vm.UnsafeVM{VM: vmi}
rt := unsafeVM.MakeRuntime(ctx, &types.Message{ rt := unsafeVM.MakeRuntime(ctx, &types.Message{
GasLimit: 1_000_000_000, GasLimit: 1_000_000_000,

View File

@ -21,7 +21,7 @@ func mustEnc(i cbg.CBORMarshaler) []byte {
return enc return enc
} }
func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { func doExecValue(ctx context.Context, vm *vm.LegacyVM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) {
act, err := vm.StateTree().GetActor(from) act, err := vm.StateTree().GetActor(from)
if err != nil { if err != nil {
return nil, xerrors.Errorf("doExec failed to get from actor (%s): %w", from, err) return nil, xerrors.Errorf("doExec failed to get from actor (%s): %w", from, err)

View File

@ -106,7 +106,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
curTs := mp.curTs curTs := mp.curTs
mp.curTsLk.Unlock() mp.curTsLk.Unlock()
epoch := curTs.Height() epoch := curTs.Height() + 1
var baseFee big.Int var baseFee big.Int
if len(curTs.Blocks()) > 0 { if len(curTs.Blocks()) > 0 {

View File

@ -0,0 +1,224 @@
//stm: #unit
package messagepool
import (
"context"
"fmt"
"testing"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/assert"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
)
func init() {
_ = logging.SetLogLevel("*", "INFO")
}
func getCheckMessageStatus(statusCode api.CheckStatusCode, msgStatuses []api.MessageCheckStatus) (*api.MessageCheckStatus, error) {
for i := 0; i < len(msgStatuses); i++ {
iMsgStatuses := msgStatuses[i]
if iMsgStatuses.CheckStatus.Code == statusCode {
return &iMsgStatuses, nil
}
}
return nil, fmt.Errorf("Could not find CheckStatusCode %s", statusCode)
}
func TestCheckMessages(t *testing.T) {
//stm: @CHAIN_MEMPOOL_CHECK_MESSAGES_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
tma.setBalance(sender, 1000e15)
target := mock.Address(1001)
var protos []*api.MessagePrototype
for i := 0; i < 5; i++ {
msg := &types.Message{
To: target,
From: sender,
Value: types.NewInt(1),
Nonce: uint64(i),
GasLimit: 50000000,
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
GasPremium: types.NewInt(1),
Params: make([]byte, 2<<10),
}
proto := &api.MessagePrototype{
Message: *msg,
ValidNonce: true,
}
protos = append(protos, proto)
}
messageStatuses, err := mp.CheckMessages(context.TODO(), protos)
assert.NoError(t, err)
for i := 0; i < len(messageStatuses); i++ {
iMsgStatuses := messageStatuses[i]
for j := 0; j < len(iMsgStatuses); j++ {
jStatus := iMsgStatuses[i]
assert.True(t, jStatus.OK)
}
}
}
func TestCheckPendingMessages(t *testing.T) {
//stm: @CHAIN_MEMPOOL_CHECK_PENDING_MESSAGES_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
tma.setBalance(sender, 1000e15)
target := mock.Address(1001)
// add a valid message to the pool
msg := &types.Message{
To: target,
From: sender,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
GasPremium: types.NewInt(1),
Params: make([]byte, 2<<10),
}
sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{})
if err != nil {
panic(err)
}
sm := &types.SignedMessage{
Message: *msg,
Signature: *sig,
}
mustAdd(t, mp, sm)
messageStatuses, err := mp.CheckPendingMessages(context.TODO(), sender)
assert.NoError(t, err)
for i := 0; i < len(messageStatuses); i++ {
iMsgStatuses := messageStatuses[i]
for j := 0; j < len(iMsgStatuses); j++ {
jStatus := iMsgStatuses[i]
assert.True(t, jStatus.OK)
}
}
}
func TestCheckReplaceMessages(t *testing.T) {
//stm: @CHAIN_MEMPOOL_CHECK_REPLACE_MESSAGES_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
tma.setBalance(sender, 1000e15)
target := mock.Address(1001)
// add a valid message to the pool
msg := &types.Message{
To: target,
From: sender,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
GasPremium: types.NewInt(1),
Params: make([]byte, 2<<10),
}
sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{})
if err != nil {
panic(err)
}
sm := &types.SignedMessage{
Message: *msg,
Signature: *sig,
}
mustAdd(t, mp, sm)
// create a new message with the same data, except that it is too big
var msgs []*types.Message
invalidmsg := &types.Message{
To: target,
From: sender,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
GasPremium: types.NewInt(1),
Params: make([]byte, 128<<10),
}
msgs = append(msgs, invalidmsg)
{
messageStatuses, err := mp.CheckReplaceMessages(context.TODO(), msgs)
if err != nil {
t.Fatal(err)
}
for i := 0; i < len(messageStatuses); i++ {
iMsgStatuses := messageStatuses[i]
status, err := getCheckMessageStatus(api.CheckStatusMessageSize, iMsgStatuses)
if err != nil {
t.Fatal(err)
}
// the replacement message should cause a status error
assert.False(t, status.OK)
}
}
}

View File

@ -628,7 +628,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
// For non local messages, if the message cannot be included in the next 20 blocks it returns // For non local messages, if the message cannot be included in the next 20 blocks it returns
// a (soft) validation error. // a (soft) validation error.
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) { func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
epoch := curTs.Height() epoch := curTs.Height() + 1
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil { if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {

View File

@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
@ -226,6 +227,8 @@ func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) {
} }
func TestMessagePool(t *testing.T) { func TestMessagePool(t *testing.T) {
//stm: @CHAIN_MEMPOOL_GET_NONCE_001
tma := newTestMpoolAPI() tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore()) w, err := wallet.NewWallet(wallet.NewMemKeyStore())
@ -327,6 +330,7 @@ func TestCheckMessageBig(t *testing.T) {
Message: *msg, Message: *msg,
Signature: *sig, Signature: *sig,
} }
//stm: @CHAIN_MEMPOOL_PUSH_001
err = mp.Add(context.TODO(), sm) err = mp.Add(context.TODO(), sm)
assert.ErrorIs(t, err, ErrMessageTooBig) assert.ErrorIs(t, err, ErrMessageTooBig)
} }
@ -760,3 +764,302 @@ func TestUpdates(t *testing.T) {
t.Fatal("expected closed channel, but got an update instead") t.Fatal("expected closed channel, but got an update instead")
} }
} }
func TestMessageBelowMinGasFee(t *testing.T) {
//stm: @CHAIN_MEMPOOL_PUSH_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
// fee is just below minimum gas fee
fee := minimumBaseFee.Uint64() - 1
{
msg := &types.Message{
To: to,
From: from,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(fee),
GasPremium: types.NewInt(1),
Params: make([]byte, 32<<10),
}
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
if err != nil {
panic(err)
}
sm := &types.SignedMessage{
Message: *msg,
Signature: *sig,
}
err = mp.Add(context.TODO(), sm)
assert.ErrorIs(t, err, ErrGasFeeCapTooLow)
}
}
func TestMessageValueTooHigh(t *testing.T) {
//stm: @CHAIN_MEMPOOL_PUSH_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
totalFil := types.TotalFilecoinInt
extra := types.NewInt(1)
value := types.BigAdd(totalFil, extra)
{
msg := &types.Message{
To: to,
From: from,
Value: value,
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
GasPremium: types.NewInt(1),
Params: make([]byte, 32<<10),
}
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
if err != nil {
panic(err)
}
sm := &types.SignedMessage{
Message: *msg,
Signature: *sig,
}
err = mp.Add(context.TODO(), sm)
assert.Error(t, err)
}
}
func TestMessageSignatureInvalid(t *testing.T) {
//stm: @CHAIN_MEMPOOL_PUSH_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
{
msg := &types.Message{
To: to,
From: from,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
GasPremium: types.NewInt(1),
Params: make([]byte, 32<<10),
}
badSig := &crypto.Signature{
Type: crypto.SigTypeSecp256k1,
Data: make([]byte, 0),
}
sm := &types.SignedMessage{
Message: *msg,
Signature: *badSig,
}
err = mp.Add(context.TODO(), sm)
assert.Error(t, err)
// assert.Contains(t, err.Error(), "invalid signature length")
assert.Error(t, err)
}
}
func TestAddMessageTwice(t *testing.T) {
//stm: @CHAIN_MEMPOOL_PUSH_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
{
// create a valid messages
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
mustAdd(t, mp, sm)
// try to add it twice
err = mp.Add(context.TODO(), sm)
// assert.Contains(t, err.Error(), "with nonce 0 already in mpool")
assert.Error(t, err)
}
}
func TestAddMessageTwiceNonceGap(t *testing.T) {
//stm: @CHAIN_MEMPOOL_PUSH_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
{
// create message with invalid nonce (1)
sm := makeTestMessage(w, from, to, 1, 50_000_000, minimumBaseFee.Uint64())
mustAdd(t, mp, sm)
// then try to add message again
err = mp.Add(context.TODO(), sm)
// assert.Contains(t, err.Error(), "unfulfilled nonce gap")
assert.Error(t, err)
}
}
func TestAddMessageTwiceCidDiff(t *testing.T) {
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
{
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
mustAdd(t, mp, sm)
// Create message with different data, so CID is different
sm2 := makeTestMessage(w, from, to, 0, 50_000_001, minimumBaseFee.Uint64())
//stm: @CHAIN_MEMPOOL_PUSH_001
// then try to add message again
err = mp.Add(context.TODO(), sm2)
// assert.Contains(t, err.Error(), "replace by fee has too low GasPremium")
assert.Error(t, err)
}
}
func TestAddMessageTwiceCidDiffReplaced(t *testing.T) {
//stm: @CHAIN_MEMPOOL_PUSH_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
{
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
mustAdd(t, mp, sm)
// Create message with different data, so CID is different
sm2 := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()*2)
mustAdd(t, mp, sm2)
}
}
func TestRemoveMessage(t *testing.T) {
//stm: @CHAIN_MEMPOOL_PUSH_001
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
{
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
mustAdd(t, mp, sm)
//stm: @CHAIN_MEMPOOL_REMOVE_001
// remove message for sender
mp.Remove(context.TODO(), from, sm.Message.Nonce, true)
//stm: @CHAIN_MEMPOOL_PENDING_FOR_001
// check messages in pool: should be none present
msgs := mp.pendingFor(context.TODO(), from)
assert.Len(t, msgs, 0)
}
}

View File

@ -1,3 +1,4 @@
//stm: #unit
package messagepool package messagepool
import ( import (
@ -16,6 +17,7 @@ import (
) )
func TestRepubMessages(t *testing.T) { func TestRepubMessages(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001
oldRepublishBatchDelay := RepublishBatchDelay oldRepublishBatchDelay := RepublishBatchDelay
RepublishBatchDelay = time.Microsecond RepublishBatchDelay = time.Microsecond
defer func() { defer func() {
@ -57,6 +59,7 @@ func TestRepubMessages(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
//stm: @CHAIN_MEMPOOL_PUSH_001
_, err := mp.Push(context.TODO(), m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -1,3 +1,4 @@
//stm: #unit
package messagepool package messagepool
import ( import (
@ -74,6 +75,8 @@ func makeTestMpool() (*MessagePool, *testMpoolAPI) {
} }
func TestMessageChains(t *testing.T) { func TestMessageChains(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001
//stm: @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -310,6 +313,8 @@ func TestMessageChains(t *testing.T) {
} }
func TestMessageChainSkipping(t *testing.T) { func TestMessageChainSkipping(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001
// regression test for chain skip bug // regression test for chain skip bug
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
@ -382,6 +387,7 @@ func TestMessageChainSkipping(t *testing.T) {
} }
func TestBasicMessageSelection(t *testing.T) { func TestBasicMessageSelection(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
oldMaxNonceGap := MaxNonceGap oldMaxNonceGap := MaxNonceGap
MaxNonceGap = 1000 MaxNonceGap = 1000
defer func() { defer func() {
@ -532,6 +538,7 @@ func TestBasicMessageSelection(t *testing.T) {
} }
func TestMessageSelectionTrimmingGas(t *testing.T) { func TestMessageSelectionTrimmingGas(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -595,6 +602,7 @@ func TestMessageSelectionTrimmingGas(t *testing.T) {
} }
func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -641,6 +649,7 @@ func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) {
} }
func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -707,6 +716,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) {
} }
func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -788,6 +798,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) {
} }
func TestPriorityMessageSelection(t *testing.T) { func TestPriorityMessageSelection(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -867,6 +878,7 @@ func TestPriorityMessageSelection(t *testing.T) {
} }
func TestPriorityMessageSelection2(t *testing.T) { func TestPriorityMessageSelection2(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -934,6 +946,7 @@ func TestPriorityMessageSelection2(t *testing.T) {
} }
func TestPriorityMessageSelection3(t *testing.T) { func TestPriorityMessageSelection3(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
mp, tma := makeTestMpool() mp, tma := makeTestMpool()
// the actors // the actors
@ -1028,6 +1041,8 @@ func TestPriorityMessageSelection3(t *testing.T) {
} }
func TestOptimalMessageSelection1(t *testing.T) { func TestOptimalMessageSelection1(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
// this test uses just a single actor sending messages with a low tq // this test uses just a single actor sending messages with a low tq
// the chain depenent merging algorithm should pick messages from the actor // the chain depenent merging algorithm should pick messages from the actor
// from the start // from the start
@ -1094,6 +1109,8 @@ func TestOptimalMessageSelection1(t *testing.T) {
} }
func TestOptimalMessageSelection2(t *testing.T) { func TestOptimalMessageSelection2(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
// this test uses two actors sending messages to each other, with the first // this test uses two actors sending messages to each other, with the first
// actor paying (much) higher gas premium than the second. // actor paying (much) higher gas premium than the second.
// We select with a low ticket quality; the chain depenent merging algorithm should pick // We select with a low ticket quality; the chain depenent merging algorithm should pick
@ -1173,6 +1190,8 @@ func TestOptimalMessageSelection2(t *testing.T) {
} }
func TestOptimalMessageSelection3(t *testing.T) { func TestOptimalMessageSelection3(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
// this test uses 10 actors sending a block of messages to each other, with the the first // this test uses 10 actors sending a block of messages to each other, with the the first
// actors paying higher gas premium than the subsequent actors. // actors paying higher gas premium than the subsequent actors.
// We select with a low ticket quality; the chain dependent merging algorithm should pick // We select with a low ticket quality; the chain dependent merging algorithm should pick
@ -1416,6 +1435,8 @@ func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 {
} }
func TestCompetitiveMessageSelectionExp(t *testing.T) { func TestCompetitiveMessageSelectionExp(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
if testing.Short() { if testing.Short() {
t.Skip("skipping in short mode") t.Skip("skipping in short mode")
} }
@ -1439,6 +1460,8 @@ func TestCompetitiveMessageSelectionExp(t *testing.T) {
} }
func TestCompetitiveMessageSelectionZipf(t *testing.T) { func TestCompetitiveMessageSelectionZipf(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
if testing.Short() { if testing.Short() {
t.Skip("skipping in short mode") t.Skip("skipping in short mode")
} }
@ -1462,6 +1485,7 @@ func TestCompetitiveMessageSelectionZipf(t *testing.T) {
} }
func TestGasReward(t *testing.T) { func TestGasReward(t *testing.T) {
//stm: @CHAIN_MEMPOOL_GET_GAS_REWARD_001
tests := []struct { tests := []struct {
Premium uint64 Premium uint64
FeeCap uint64 FeeCap uint64
@ -1494,6 +1518,8 @@ func TestGasReward(t *testing.T) {
} }
func TestRealWorldSelection(t *testing.T) { func TestRealWorldSelection(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_SELECT_001
// load test-messages.json.gz and rewrite the messages so that // load test-messages.json.gz and rewrite the messages so that
// 1) we map each real actor to a test actor so that we can sign the messages // 1) we map each real actor to a test actor so that we can sign the messages
// 2) adjust the nonces so that they start from 0 // 2) adjust the nonces so that they start from 0

View File

@ -5,6 +5,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/filecoin-project/lotus/blockstore"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -64,6 +70,8 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
pheight = ts.Height() - 1 pheight = ts.Height() - 1
} }
// Since we're simulating a future message, pretend we're applying it in the "next" tipset
vmHeight := pheight + 1
bstate := ts.ParentState() bstate := ts.ParentState()
// Run the (not expensive) migration. // Run the (not expensive) migration.
@ -72,9 +80,14 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
return nil, fmt.Errorf("failed to handle fork: %w", err) return nil, fmt.Errorf("failed to handle fork: %w", err)
} }
filVested, err := sm.GetFilVested(ctx, vmHeight)
if err != nil {
return nil, err
}
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: bstate, StateBase: bstate,
Epoch: pheight + 1, Epoch: vmHeight,
Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion), Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion),
Bstore: sm.cs.StateBlockstore(), Bstore: sm.cs.StateBlockstore(),
Actors: sm.tsExec.NewActorRegistry(), Actors: sm.tsExec.NewActorRegistry(),
@ -82,6 +95,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
CircSupplyCalc: sm.GetVMCirculatingSupply, CircSupplyCalc: sm.GetVMCirculatingSupply,
NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1), NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1),
BaseFee: types.NewInt(0), BaseFee: types.NewInt(0),
FilVested: filVested,
LookbackState: LookbackStateGetterForTipset(sm, ts), LookbackState: LookbackStateGetterForTipset(sm, ts),
} }
@ -112,7 +126,12 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
) )
} }
fromActor, err := vmi.StateTree().GetActor(msg.From) stTree, err := sm.StateTree(bstate)
if err != nil {
return nil, xerrors.Errorf("failed to load state tree: %w", err)
}
fromActor, err := stTree.GetActor(msg.From)
if err != nil { if err != nil {
return nil, xerrors.Errorf("call raw get actor: %s", err) return nil, xerrors.Errorf("call raw get actor: %s", err)
} }
@ -175,13 +194,16 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
} }
} }
state, _, err := sm.TipSetState(ctx, ts) // Since we're simulating a future message, pretend we're applying it in the "next" tipset
vmHeight := ts.Height() + 1
stateCid, _, err := sm.TipSetState(ctx, ts)
if err != nil { if err != nil {
return nil, xerrors.Errorf("computing tipset state: %w", err) return nil, xerrors.Errorf("computing tipset state: %w", err)
} }
// Technically, the tipset we're passing in here should be ts+1, but that may not exist. // Technically, the tipset we're passing in here should be ts+1, but that may not exist.
state, err = sm.HandleStateForks(ctx, state, ts.Height(), nil, ts) stateCid, err = sm.HandleStateForks(ctx, stateCid, ts.Height(), nil, ts)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err) return nil, fmt.Errorf("failed to handle fork: %w", err)
} }
@ -196,16 +218,23 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
) )
} }
filVested, err := sm.GetFilVested(ctx, vmHeight)
if err != nil {
return nil, err
}
buffStore := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: state, StateBase: stateCid,
Epoch: ts.Height() + 1, Epoch: vmHeight,
Rand: r, Rand: r,
Bstore: sm.cs.StateBlockstore(), Bstore: buffStore,
Actors: sm.tsExec.NewActorRegistry(), Actors: sm.tsExec.NewActorRegistry(),
Syscalls: sm.Syscalls, Syscalls: sm.Syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply, CircSupplyCalc: sm.GetVMCirculatingSupply,
NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1), NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1),
BaseFee: ts.Blocks()[0].ParentBaseFee, BaseFee: ts.Blocks()[0].ParentBaseFee,
FilVested: filVested,
LookbackState: LookbackStateGetterForTipset(sm, ts), LookbackState: LookbackStateGetterForTipset(sm, ts),
} }
vmi, err := sm.newVM(ctx, vmopt) vmi, err := sm.newVM(ctx, vmopt)
@ -219,7 +248,19 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
} }
} }
fromActor, err := vmi.StateTree().GetActor(msg.From) // We flush to get the VM's view of the state tree after applying the above messages
// This is needed to get the correct nonce from the actor state to match the VM
stateCid, err = vmi.Flush(ctx)
if err != nil {
return nil, xerrors.Errorf("flushing vm: %w", err)
}
stTree, err := state.LoadStateTree(cbor.NewCborStore(buffStore), stateCid)
if err != nil {
return nil, xerrors.Errorf("loading state tree: %w", err)
}
fromActor, err := stTree.GetActor(msg.From)
if err != nil { if err != nil {
return nil, xerrors.Errorf("call raw get actor: %s", err) return nil, xerrors.Errorf("call raw get actor: %s", err)
} }

View File

@ -166,8 +166,8 @@ func TestForkHeightTriggers(t *testing.T) {
inv := filcns.NewActorRegistry() inv := filcns.NewActorRegistry()
inv.Register(nil, testActor{}) inv.Register(nil, testActor{})
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewVM(ctx, vmopt) nvm, err := vm.NewLegacyVM(ctx, vmopt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -281,8 +281,8 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
inv := filcns.NewActorRegistry() inv := filcns.NewActorRegistry()
inv.Register(nil, testActor{}) inv.Register(nil, testActor{})
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewVM(ctx, vmopt) nvm, err := vm.NewLegacyVM(ctx, vmopt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -500,8 +500,8 @@ func TestForkPreMigration(t *testing.T) {
inv := filcns.NewActorRegistry() inv := filcns.NewActorRegistry()
inv.Register(nil, testActor{}) inv.Register(nil, testActor{})
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewVM(ctx, vmopt) nvm, err := vm.NewLegacyVM(ctx, vmopt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -84,7 +84,7 @@ type StateManager struct {
compWait map[string]chan struct{} compWait map[string]chan struct{}
stlk sync.Mutex stlk sync.Mutex
genesisMsigLk sync.Mutex genesisMsigLk sync.Mutex
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) newVM func(context.Context, *vm.VMOpts) (vm.Interface, error)
Syscalls vm.SyscallBuilder Syscalls vm.SyscallBuilder
preIgnitionVesting []msig0.State preIgnitionVesting []msig0.State
postIgnitionVesting []msig0.State postIgnitionVesting []msig0.State
@ -347,12 +347,12 @@ func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) err
return nil return nil
} }
func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (*vm.VM, error)) { func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (vm.Interface, error)) {
sm.newVM = nvm sm.newVM = nvm
} }
func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (*vm.VM, error) { func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (vm.Interface, error) {
return func(ctx context.Context, opts *vm.VMOpts) (*vm.VM, error) { return func(ctx context.Context, opts *vm.VMOpts) (vm.Interface, error) {
return sm.newVM(ctx, opts) return sm.newVM(ctx, opts)
} }
} }

View File

@ -196,8 +196,32 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
// GetVestedFunds returns all funds that have "left" actors that are in the genesis state: // GetVestedFunds returns all funds that have "left" actors that are in the genesis state:
// - For Multisigs, it counts the actual amounts that have vested at the given epoch // - For Multisigs, it counts the actual amounts that have vested at the given epoch
// - For Accounts, it counts max(currentBalance - genesisBalance, 0). // - For Accounts, it counts max(currentBalance - genesisBalance, 0).
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) {
vf := big.Zero() vf := big.Zero()
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
// TODO: combine all this?
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
err := sm.setupGenesisVestingSchedule(ctx)
if err != nil {
return vf, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
}
}
if sm.postIgnitionVesting == nil {
err := sm.setupPostIgnitionVesting(ctx)
if err != nil {
return vf, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
}
}
if sm.postCalicoVesting == nil {
err := sm.setupPostCalicoVesting(ctx)
if err != nil {
return vf, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err)
}
}
if height <= build.UpgradeIgnitionHeight { if height <= build.UpgradeIgnitionHeight {
for _, v := range sm.preIgnitionVesting { for _, v := range sm.preIgnitionVesting {
au := big.Sub(v.InitialBalance, v.AmountLocked(height)) au := big.Sub(v.InitialBalance, v.AmountLocked(height))
@ -282,7 +306,7 @@ func getFilPowerLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmoun
return pst.TotalLocked() return pst.TotalLocked()
} }
func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { func GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
filMarketLocked, err := getFilMarketLocked(ctx, st) filMarketLocked, err := getFilMarketLocked(ctx, st)
if err != nil { if err != nil {
@ -316,28 +340,7 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C
} }
func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) {
sm.genesisMsigLk.Lock() filVested, err := sm.GetFilVested(ctx, height)
defer sm.genesisMsigLk.Unlock()
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
err := sm.setupGenesisVestingSchedule(ctx)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
}
}
if sm.postIgnitionVesting == nil {
err := sm.setupPostIgnitionVesting(ctx)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
}
}
if sm.postCalicoVesting == nil {
err := sm.setupPostCalicoVesting(ctx)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err)
}
}
filVested, err := sm.GetFilVested(ctx, height, st)
if err != nil { if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err) return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err)
} }
@ -360,7 +363,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filBurnt: %w", err) return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filBurnt: %w", err)
} }
filLocked, err := sm.GetFilLocked(ctx, st) filLocked, err := GetFilLocked(ctx, st)
if err != nil { if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filLocked: %w", err) return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filLocked: %w", err)
} }

View File

@ -79,6 +79,11 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
// future. It's not guaranteed to be accurate... but that's fine. // future. It's not guaranteed to be accurate... but that's fine.
} }
filVested, err := sm.GetFilVested(ctx, height)
if err != nil {
return cid.Undef, nil, err
}
r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion) r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion)
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: base, StateBase: base,
@ -90,6 +95,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
CircSupplyCalc: sm.GetVMCirculatingSupply, CircSupplyCalc: sm.GetVMCirculatingSupply,
NetworkVersion: sm.GetNetworkVersion(ctx, height), NetworkVersion: sm.GetNetworkVersion(ctx, height),
BaseFee: ts.Blocks()[0].ParentBaseFee, BaseFee: ts.Blocks()[0].ParentBaseFee,
FilVested: filVested,
LookbackState: LookbackStateGetterForTipset(sm, ts), LookbackState: LookbackStateGetterForTipset(sm, ts),
} }
vmi, err := sm.newVM(ctx, vmopt) vmi, err := sm.newVM(ctx, vmopt)

View File

@ -18,6 +18,10 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
func (cs *ChainStore) UnionStore() bstore.Blockstore {
return bstore.Union(cs.stateBlockstore, cs.chainBlockstore)
}
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error { func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
h := &car.CarHeader{ h := &car.CarHeader{
Roots: ts.Cids(), Roots: ts.Cids(),
@ -28,7 +32,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
return xerrors.Errorf("failed to write car header: %s", err) return xerrors.Errorf("failed to write car header: %s", err)
} }
unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore) unionBs := cs.UnionStore()
return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error {
blk, err := unionBs.Get(ctx, c) blk, err := unionBs.Get(ctx, c)
if err != nil { if err != nil {

View File

@ -1,19 +1,24 @@
package sub package sub
import ( import (
"bytes"
"context" "context"
"fmt" "encoding/binary"
"sync"
"time" "time"
address "github.com/filecoin-project/go-address" address "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-legs/dtsync"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/sub/ratelimit"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/client" "github.com/filecoin-project/lotus/node/impl/client"
"github.com/filecoin-project/lotus/node/impl/full"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
blocks "github.com/ipfs/go-block-format" blocks "github.com/ipfs/go-block-format"
bserv "github.com/ipfs/go-blockservice" bserv "github.com/ipfs/go-blockservice"
@ -168,12 +173,12 @@ func fetchCids(
cidIndex := make(map[cid.Cid]int) cidIndex := make(map[cid.Cid]int)
for i, c := range cids { for i, c := range cids {
if c.Prefix() != msgCidPrefix { if c.Prefix() != msgCidPrefix {
return fmt.Errorf("invalid msg CID: %s", c) return xerrors.Errorf("invalid msg CID: %s", c)
} }
cidIndex[c] = i cidIndex[c] = i
} }
if len(cids) != len(cidIndex) { if len(cids) != len(cidIndex) {
return fmt.Errorf("duplicate CIDs in fetchCids input") return xerrors.Errorf("duplicate CIDs in fetchCids input")
} }
for block := range bserv.GetBlocks(ctx, cids) { for block := range bserv.GetBlocks(ctx, cids) {
@ -196,7 +201,7 @@ func fetchCids(
if len(cidIndex) > 0 { if len(cidIndex) > 0 {
err := ctx.Err() err := ctx.Err()
if err == nil { if err == nil {
err = fmt.Errorf("failed to fetch %d messages for unknown reasons", len(cidIndex)) err = xerrors.Errorf("failed to fetch %d messages for unknown reasons", len(cidIndex))
} }
return err return err
} }
@ -444,3 +449,168 @@ func recordFailure(ctx context.Context, metric *stats.Int64Measure, failureType
) )
stats.Record(ctx, metric.M(1)) stats.Record(ctx, metric.M(1))
} }
type peerMsgInfo struct {
peerID peer.ID
lastCid cid.Cid
lastSeqno uint64
rateLimit *ratelimit.Window
mutex sync.Mutex
}
type IndexerMessageValidator struct {
self peer.ID
peerCache *lru.TwoQueueCache
chainApi full.ChainModuleAPI
stateApi full.StateModuleAPI
}
func NewIndexerMessageValidator(self peer.ID, chainApi full.ChainModuleAPI, stateApi full.StateModuleAPI) *IndexerMessageValidator {
peerCache, _ := lru.New2Q(8192)
return &IndexerMessageValidator{
self: self,
peerCache: peerCache,
chainApi: chainApi,
stateApi: stateApi,
}
}
func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
// This chain-node should not be publishing its own messages. These are
// relayed from market-nodes. If a node appears to be local, reject it.
if pid == v.self {
log.Debug("ignoring indexer message from self")
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
return pubsub.ValidationIgnore
}
originPeer := msg.GetFrom()
if originPeer == v.self {
log.Debug("ignoring indexer message originating from self")
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
return pubsub.ValidationIgnore
}
idxrMsg := dtsync.Message{}
err := idxrMsg.UnmarshalCBOR(bytes.NewBuffer(msg.Data))
if err != nil {
log.Errorw("Could not decode indexer pubsub message", "err", err)
return pubsub.ValidationReject
}
if len(idxrMsg.ExtraData) == 0 {
log.Debugw("ignoring messsage missing miner id", "peer", originPeer)
return pubsub.ValidationIgnore
}
// Get miner info from lotus
minerAddr, err := address.NewFromBytes(idxrMsg.ExtraData)
if err != nil {
log.Warnw("cannot parse extra data as miner address", "err", err, "extraData", idxrMsg.ExtraData)
return pubsub.ValidationReject
}
minerID := minerAddr.String()
msgCid := idxrMsg.Cid
var msgInfo *peerMsgInfo
val, ok := v.peerCache.Get(minerID)
if !ok {
msgInfo = &peerMsgInfo{}
} else {
msgInfo = val.(*peerMsgInfo)
}
// Lock this peer's message info.
msgInfo.mutex.Lock()
defer msgInfo.mutex.Unlock()
if ok {
// Reject replayed messages.
seqno := binary.BigEndian.Uint64(msg.Message.GetSeqno())
if seqno <= msgInfo.lastSeqno {
log.Debugf("ignoring replayed indexer message")
return pubsub.ValidationIgnore
}
msgInfo.lastSeqno = seqno
}
if !ok || originPeer != msgInfo.peerID {
// Check that the miner ID maps to the peer that sent the message.
err = v.authenticateMessage(ctx, minerAddr, originPeer)
if err != nil {
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerID)
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
return pubsub.ValidationReject
}
msgInfo.peerID = originPeer
if !ok {
// Add msgInfo to cache only after being authenticated. If two
// messages from the same peer are handled concurrently, there is a
// small chance that one msgInfo could replace the other here when
// the info is first cached. This is OK, so no need to prevent it.
v.peerCache.Add(minerID, msgInfo)
}
}
// See if message needs to be ignored due to rate limiting.
if v.rateLimitPeer(msgInfo, msgCid) {
return pubsub.ValidationIgnore
}
stats.Record(ctx, metrics.IndexerMessageValidationSuccess.M(1))
return pubsub.ValidationAccept
}
func (v *IndexerMessageValidator) rateLimitPeer(msgInfo *peerMsgInfo, msgCid cid.Cid) bool {
const (
msgLimit = 5
msgTimeLimit = 10 * time.Second
repeatTimeLimit = 2 * time.Hour
)
timeWindow := msgInfo.rateLimit
// Check overall message rate.
if timeWindow == nil {
timeWindow = ratelimit.NewWindow(msgLimit, msgTimeLimit)
msgInfo.rateLimit = timeWindow
} else if msgInfo.lastCid == msgCid {
// Check if this is a repeat of the previous message data.
if time.Since(timeWindow.Newest()) < repeatTimeLimit {
log.Warnw("ignoring repeated indexer message", "sender", msgInfo.peerID)
return true
}
}
err := timeWindow.Add()
if err != nil {
log.Warnw("ignoring indexer message", "sender", msgInfo.peerID, "err", err)
return true
}
msgInfo.lastCid = msgCid
return false
}
func (v *IndexerMessageValidator) authenticateMessage(ctx context.Context, minerAddress address.Address, peerID peer.ID) error {
ts, err := v.chainApi.ChainHead(ctx)
if err != nil {
return err
}
minerInfo, err := v.stateApi.StateMinerInfo(ctx, minerAddress, ts.Key())
if err != nil {
return err
}
if minerInfo.PeerId == nil {
return xerrors.New("no peer id for miner")
}
if *minerInfo.PeerId != peerID {
return xerrors.New("miner id does not map to peer that sent message")
}
return nil
}

View File

@ -2,13 +2,20 @@
package sub package sub
import ( import (
"bytes"
"context" "context"
"testing" "testing"
address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-legs/dtsync"
"github.com/filecoin-project/lotus/api/mocks"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/golang/mock/gomock"
blocks "github.com/ipfs/go-block-format" blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
) )
type getter struct { type getter struct {
@ -63,3 +70,65 @@ func TestFetchCidsWithDedup(t *testing.T) {
t.Fatalf("there is a nil message: first %p, last %p", res[0], res[len(res)-1]) t.Fatalf("there is a nil message: first %p, last %p", res[0], res[len(res)-1])
} }
} }
func TestIndexerMessageValidator_Validate(t *testing.T) {
validCid, err := cid.Decode("QmbpDgg5kRLDgMxS8vPKNFXEcA6D5MC4CkuUdSWDVtHPGK")
if err != nil {
t.Fatal(err)
}
tests := []struct {
name string
selfPID string
senderPID string
extraData []byte
wantValidation pubsub.ValidationResult
}{
{
name: "invalid extra data is rejected",
selfPID: "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW",
senderPID: "12D3KooWE8yt84RVwW3sFcd6WMjbUdWrZer2YtT4dmtj3dHdahSZ",
extraData: []byte("f0127896"), // note, casting encoded address to byte is invalid.
wantValidation: pubsub.ValidationReject,
},
{
name: "same sender and receiver is ignored",
selfPID: "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW",
senderPID: "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW",
wantValidation: pubsub.ValidationIgnore,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
mc := gomock.NewController(t)
node := mocks.NewMockFullNode(mc)
subject := NewIndexerMessageValidator(peer.ID(tc.selfPID), node, node)
message := dtsync.Message{
Cid: validCid,
Addrs: nil,
ExtraData: tc.extraData,
}
buf := bytes.NewBuffer(nil)
if err := message.MarshalCBOR(buf); err != nil {
t.Fatal(err)
}
topic := "topic"
pbm := &pb.Message{
Data: buf.Bytes(),
Topic: &topic,
From: nil,
Seqno: nil,
}
validate := subject.Validate(context.Background(), peer.ID(tc.senderPID), &pubsub.Message{
Message: pbm,
ReceivedFrom: peer.ID(tc.senderPID),
ValidatorData: nil,
})
if validate != tc.wantValidation {
t.Fatalf("expected %v but got %v", tc.wantValidation, validate)
}
})
}
}

View File

@ -0,0 +1,89 @@
package ratelimit
import "errors"
var ErrRateLimitExceeded = errors.New("rate limit exceeded")
type queue struct {
buf []int64
count int
head int
tail int
}
// cap returns the queue capacity
func (q *queue) cap() int {
return len(q.buf)
}
// len returns the number of items in the queue
func (q *queue) len() int {
return q.count
}
// push adds an element to the end of the queue.
func (q *queue) push(elem int64) error {
if q.count == len(q.buf) {
return ErrRateLimitExceeded
}
q.buf[q.tail] = elem
// Calculate new tail position.
q.tail = q.next(q.tail)
q.count++
return nil
}
// pop removes and returns the element from the front of the queue.
func (q *queue) pop() int64 {
if q.count <= 0 {
panic("pop from empty queue")
}
ret := q.buf[q.head]
// Calculate new head position.
q.head = q.next(q.head)
q.count--
return ret
}
// front returns the element at the front of the queue. This is the element
// that would be returned by pop(). This call panics if the queue is empty.
func (q *queue) front() int64 {
if q.count <= 0 {
panic("front() called when empty")
}
return q.buf[q.head]
}
// back returns the element at the back of the queue. This call panics if the
// queue is empty.
func (q *queue) back() int64 {
if q.count <= 0 {
panic("back() called when empty")
}
return q.buf[q.prev(q.tail)]
}
// prev returns the previous buffer position wrapping around buffer.
func (q *queue) prev(i int) int {
if i == 0 {
return len(q.buf) - 1
}
return (i - 1) % len(q.buf)
}
// next returns the next buffer position wrapping around buffer.
func (q *queue) next(i int) int {
return (i + 1) % len(q.buf)
}
// truncate pops values that are less than or equal the specified threshold.
func (q *queue) truncate(threshold int64) {
for q.count != 0 && q.buf[q.head] <= threshold {
// pop() without returning a value
q.head = q.next(q.head)
q.count--
}
}

View File

@ -0,0 +1,70 @@
package ratelimit
import "time"
// Window is a time windows for counting events within a span of time. The
// windows slides forward in time so that it spans from the most recent event
// to size time in the past.
type Window struct {
q *queue
size int64
}
// NewWindow creates a new Window that limits the number of events to maximum
// count of events within a duration of time. The capacity sets the maximum
// number of events, and size sets the span of time over which the events are
// counted.
func NewWindow(capacity int, size time.Duration) *Window {
return &Window{
q: &queue{
buf: make([]int64, capacity),
},
size: int64(size),
}
}
// Add attempts to append a new timestamp into the current window. Previously
// added values that are not not within `size` difference from the value being
// added are first removed. Add fails if adding the value would cause the
// window to exceed capacity.
func (w *Window) Add() error {
now := time.Now().UnixNano()
if w.Len() != 0 {
w.q.truncate(now - w.size)
}
return w.q.push(now)
}
// Cap returns the maximum number of items the window can hold.
func (w *Window) Cap() int {
return w.q.cap()
}
// Len returns the number of elements currently in the window.
func (w *Window) Len() int {
return w.q.len()
}
// Span returns the distance from the first to the last item in the window.
func (w *Window) Span() time.Duration {
if w.q.len() < 2 {
return 0
}
return time.Duration(w.q.back() - w.q.front())
}
// Oldest returns the oldest timestamp in the window.
func (w *Window) Oldest() time.Time {
if w.q.len() == 0 {
return time.Time{}
}
return time.Unix(0, w.q.front())
}
// Newest returns the newest timestamp in the window.
func (w *Window) Newest() time.Time {
if w.q.len() == 0 {
return time.Time{}
}
return time.Unix(0, w.q.back())
}

View File

@ -0,0 +1,61 @@
package ratelimit
import (
"testing"
"time"
)
func TestWindow(t *testing.T) {
const (
maxEvents = 3
timeLimit = 100 * time.Millisecond
)
w := NewWindow(maxEvents, timeLimit)
if w.Len() != 0 {
t.Fatal("q.Len() =", w.Len(), "expect 0")
}
if w.Cap() != maxEvents {
t.Fatal("q.Cap() =", w.Cap(), "expect 3")
}
if !w.Newest().IsZero() {
t.Fatal("expected newest to be zero time with empty window")
}
if !w.Oldest().IsZero() {
t.Fatal("expected oldest to be zero time with empty window")
}
if w.Span() != 0 {
t.Fatal("expected span to be zero time with empty window")
}
var err error
for i := 0; i < maxEvents; i++ {
err = w.Add()
if err != nil {
t.Fatalf("cannot add event %d", i)
}
}
if w.Len() != maxEvents {
t.Fatalf("q.Len() is %d, expected %d", w.Len(), maxEvents)
}
if err = w.Add(); err != ErrRateLimitExceeded {
t.Fatalf("add event %d within time limit should have failed with err: %s", maxEvents+1, ErrRateLimitExceeded)
}
time.Sleep(timeLimit)
if err = w.Add(); err != nil {
t.Fatalf("cannot add event after time limit: %s", err)
}
prev := w.Newest()
time.Sleep(timeLimit)
err = w.Add()
if err != nil {
t.Fatalf("cannot add event")
}
if w.Newest().Before(prev) {
t.Fatal("newest is before previous value")
}
if w.Oldest().Before(prev) {
t.Fatal("oldest is before previous value")
}
}

View File

@ -1244,25 +1244,3 @@ func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
bbr, ok := syncer.bad.Has(blk) bbr, ok := syncer.bad.Has(blk)
return bbr.String(), ok return bbr.String(), ok
} }
func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
next, err := syncer.store.LoadTipSet(ctx, cur.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}

View File

@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/types/mock"
"github.com/stretchr/testify/require"
) )
func init() { func init() {
@ -240,3 +241,34 @@ func TestSyncManager(t *testing.T) {
op3.done() op3.done()
}) })
} }
func TestSyncManagerBucketSet(t *testing.T) {
ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0))
ts2 := mock.TipSet(mock.MkBlock(ts1, 1, 0))
bucket1 := newSyncTargetBucket(ts1, ts2)
bucketSet := syncBucketSet{buckets: []*syncTargetBucket{bucket1}}
// inserting a tipset (potential sync target) from an existing chain, should add to an existing bucket
//stm: @CHAIN_SYNCER_ADD_SYNC_TARGET_001
ts3 := mock.TipSet(mock.MkBlock(ts2, 2, 0))
bucketSet.Insert(ts3)
require.Equal(t, 1, len(bucketSet.buckets))
require.Equal(t, 3, len(bucketSet.buckets[0].tips))
// inserting a tipset from new chain, should create a new bucket
ts4fork := mock.TipSet(mock.MkBlock(nil, 1, 1))
bucketSet.Insert(ts4fork)
require.Equal(t, 2, len(bucketSet.buckets))
require.Equal(t, 3, len(bucketSet.buckets[0].tips))
require.Equal(t, 1, len(bucketSet.buckets[1].tips))
// Pop removes the best bucket (best sync target), e.g. bucket1
//stm: @CHAIN_SYNCER_SELECT_SYNC_TARGET_001
popped := bucketSet.Pop()
require.Equal(t, popped, bucket1)
require.Equal(t, 1, len(bucketSet.buckets))
// PopRelated removes the bucket containing the given tipset, leaving the set empty
bucketSet.PopRelated(ts4fork)
require.Equal(t, 0, len(bucketSet.buckets))
}

View File

@ -104,7 +104,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
mn: mocknet.New(ctx), mn: mocknet.New(),
g: g, g: g,
us: filcns.DefaultUpgradeSchedule(), us: filcns.DefaultUpgradeSchedule(),
} }
@ -158,7 +158,7 @@ func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syn
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
mn: mocknet.New(ctx), mn: mocknet.New(),
g: g, g: g,
us: sched, us: sched,
} }
@ -1098,3 +1098,158 @@ func TestInvalidHeight(t *testing.T) {
tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true) tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true)
} }
// TestIncomingBlocks mines new blocks and checks if the incoming channel streams new block headers properly
func TestIncomingBlocks(t *testing.T) {
H := 50
tu := prepSyncTest(t, H)
client := tu.addClientNode()
require.NoError(t, tu.mn.LinkAll())
clientNode := tu.nds[client]
//stm: @CHAIN_SYNCER_INCOMING_BLOCKS_001
incoming, err := clientNode.SyncIncomingBlocks(tu.ctx)
require.NoError(tu.t, err)
tu.connect(client, 0)
tu.waitUntilSync(0, client)
tu.compareSourceState(client)
timeout := time.After(10 * time.Second)
for i := 0; i < 5; i++ {
tu.mineNewBlock(0, nil)
tu.waitUntilSync(0, client)
tu.compareSourceState(client)
// just in case, so we don't get deadlocked
select {
case <-incoming:
case <-timeout:
tu.t.Fatal("TestIncomingBlocks timeout")
}
}
}
// TestSyncManualBadTS tests manually marking and unmarking blocks in the bad TS cache
func TestSyncManualBadTS(t *testing.T) {
// Test setup:
// - source node is fully synced,
// - client node is unsynced
// - client manually marked source's head and it's parent as bad
H := 50
tu := prepSyncTest(t, H)
client := tu.addClientNode()
require.NoError(t, tu.mn.LinkAll())
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
require.NoError(tu.t, err)
clientHead, err := tu.nds[client].ChainHead(tu.ctx)
require.NoError(tu.t, err)
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync in test setup")
//stm: @CHAIN_SYNCER_MARK_BAD_001
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHead.Cids()[0])
require.NoError(tu.t, err)
sourceHeadParent := sourceHead.Parents().Cids()[0]
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHeadParent)
require.NoError(tu.t, err)
//stm: @CHAIN_SYNCER_CHECK_BAD_001
reason, err := tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
require.NoError(tu.t, err)
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
require.NoError(tu.t, err)
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
// Assertion 1:
// - client shouldn't be synced after timeout, because the source TS is marked bad.
// - bad block is the first block that should be synced, 1sec should be enough
tu.connect(1, 0)
timeout := time.After(1 * time.Second)
<-timeout
clientHead, err = tu.nds[client].ChainHead(tu.ctx)
require.NoError(tu.t, err)
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync if source head is bad")
// Assertion 2:
// - after unmarking blocks as bad and reconnecting, source & client should be in sync
//stm: @CHAIN_SYNCER_UNMARK_BAD_001
err = tu.nds[client].SyncUnmarkBad(tu.ctx, sourceHead.Cids()[0])
require.NoError(tu.t, err)
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
require.NoError(tu.t, err)
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
err = tu.nds[client].SyncUnmarkAllBad(tu.ctx)
require.NoError(tu.t, err)
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
require.NoError(tu.t, err)
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
tu.disconnect(1, 0)
tu.connect(1, 0)
tu.waitUntilSync(0, client)
tu.compareSourceState(client)
}
// TestState tests fetching the sync worker state before, during & after the sync
func TestSyncState(t *testing.T) {
H := 50
tu := prepSyncTest(t, H)
client := tu.addClientNode()
require.NoError(t, tu.mn.LinkAll())
clientNode := tu.nds[client]
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
require.NoError(tu.t, err)
// sync state should be empty before the sync
state, err := clientNode.SyncState(tu.ctx)
require.NoError(tu.t, err)
require.Equal(tu.t, len(state.ActiveSyncs), 0)
tu.connect(client, 0)
// wait until sync starts, or at most `timeout` seconds
timeout := time.After(5 * time.Second)
activeSyncs := []api.ActiveSync{}
for len(activeSyncs) == 0 {
//stm: @CHAIN_SYNCER_STATE_001
state, err = clientNode.SyncState(tu.ctx)
require.NoError(tu.t, err)
activeSyncs = state.ActiveSyncs
sleep := time.After(100 * time.Millisecond)
select {
case <-sleep:
case <-timeout:
tu.t.Fatal("TestSyncState timeout")
}
}
// check state during sync
require.Equal(tu.t, len(activeSyncs), 1)
require.True(tu.t, activeSyncs[0].Target.Equals(sourceHead))
tu.waitUntilSync(0, client)
tu.compareSourceState(client)
// check state after sync
state, err = clientNode.SyncState(tu.ctx)
require.NoError(tu.t, err)
require.Equal(tu.t, len(state.ActiveSyncs), 1)
require.Equal(tu.t, state.ActiveSyncs[0].Stage, api.StageSyncComplete)
}

View File

@ -3,6 +3,7 @@ package mock
import ( import (
"context" "context"
"fmt" "fmt"
"math/rand"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
@ -24,15 +25,7 @@ func Address(i uint64) address.Address {
} }
func MkMessage(from, to address.Address, nonce uint64, w *wallet.LocalWallet) *types.SignedMessage { func MkMessage(from, to address.Address, nonce uint64, w *wallet.LocalWallet) *types.SignedMessage {
msg := &types.Message{ msg := UnsignedMessage(from, to, nonce)
To: to,
From: from,
Value: types.NewInt(1),
Nonce: nonce,
GasLimit: 1000000,
GasFeeCap: types.NewInt(100),
GasPremium: types.NewInt(1),
}
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{}) sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
if err != nil { if err != nil {
@ -96,3 +89,35 @@ func TipSet(blks ...*types.BlockHeader) *types.TipSet {
} }
return ts return ts
} }
// Generates count new addresses using the provided seed, and returns them
func RandomActorAddresses(seed int64, count int) ([]*address.Address, error) {
randAddrs := make([]*address.Address, count)
source := rand.New(rand.NewSource(seed))
for i := 0; i < count; i++ {
bytes := make([]byte, 32)
_, err := source.Read(bytes)
if err != nil {
return nil, err
}
addr, err := address.NewActorAddress(bytes)
if err != nil {
return nil, err
}
randAddrs[i] = &addr
}
return randAddrs, nil
}
func UnsignedMessage(from, to address.Address, nonce uint64) *types.Message {
return &types.Message{
To: to,
From: from,
Value: types.NewInt(1),
Nonce: nonce,
GasLimit: 1000000,
GasFeeCap: types.NewInt(100),
GasPremium: types.NewInt(1),
}
}

326
chain/vm/fvm.go Normal file
View File

@ -0,0 +1,326 @@
package vm
import (
"bytes"
"context"
"time"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/state"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/lib/sigs"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/blockstore"
ffi "github.com/filecoin-project/filecoin-ffi"
ffi_cgo "github.com/filecoin-project/filecoin-ffi/cgo"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
)
var _ Interface = (*FVM)(nil)
var _ ffi_cgo.Externs = (*FvmExtern)(nil)
type FvmExtern struct {
Rand
blockstore.Blockstore
epoch abi.ChainEpoch
lbState LookbackStateGetter
base cid.Cid
}
// VerifyConsensusFault is similar to the one in syscalls.go used by the LegacyVM, except it never errors
// Errors are logged and "no fault" is returned, which is functionally what go-actors does anyway
func (x *FvmExtern) VerifyConsensusFault(ctx context.Context, a, b, extra []byte) (*ffi_cgo.ConsensusFault, int64) {
totalGas := int64(0)
ret := &ffi_cgo.ConsensusFault{
Type: ffi_cgo.ConsensusFaultNone,
}
// Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions.
// Whether or not it could ever have been accepted in a chain is not checked/does not matter here.
// for that reason when checking block parent relationships, rather than instantiating a Tipset to do so
// (which runs a syntactic check), we do it directly on the CIDs.
// (0) cheap preliminary checks
// can blocks be decoded properly?
var blockA, blockB types.BlockHeader
if decodeErr := blockA.UnmarshalCBOR(bytes.NewReader(a)); decodeErr != nil {
log.Info("invalid consensus fault: cannot decode first block header: %w", decodeErr)
return ret, totalGas
}
if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil {
log.Info("invalid consensus fault: cannot decode second block header: %w", decodeErr)
return ret, totalGas
}
// are blocks the same?
if blockA.Cid().Equals(blockB.Cid()) {
log.Info("invalid consensus fault: submitted blocks are the same")
return ret, totalGas
}
// (1) check conditions necessary to any consensus fault
// were blocks mined by same miner?
if blockA.Miner != blockB.Miner {
log.Info("invalid consensus fault: blocks not mined by the same miner")
return ret, totalGas
}
// block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain).
if blockB.Height < blockA.Height {
log.Info("invalid consensus fault: first block must not be of higher height than second")
return ret, totalGas
}
ret.Epoch = blockB.Height
faultType := ffi_cgo.ConsensusFaultNone
// (2) check for the consensus faults themselves
// (a) double-fork mining fault
if blockA.Height == blockB.Height {
faultType = ffi_cgo.ConsensusFaultDoubleForkMining
}
// (b) time-offset mining fault
// strictly speaking no need to compare heights based on double fork mining check above,
// but at same height this would be a different fault.
if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height {
faultType = ffi_cgo.ConsensusFaultTimeOffsetMining
}
// (c) parent-grinding fault
// Here extra is the "witness", a third block that shows the connection between A and B as
// A's sibling and B's parent.
// Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset
//
// B
// |
// [A, C]
var blockC types.BlockHeader
if len(extra) > 0 {
if decodeErr := blockC.UnmarshalCBOR(bytes.NewReader(extra)); decodeErr != nil {
log.Info("invalid consensus fault: cannot decode extra: %w", decodeErr)
return ret, totalGas
}
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
faultType = ffi_cgo.ConsensusFaultParentGrinding
}
}
// (3) return if no consensus fault by now
if faultType == ffi_cgo.ConsensusFaultNone {
log.Info("invalid consensus fault: no fault detected")
return ret, totalGas
}
// else
// (4) expensive final checks
// check blocks are properly signed by their respective miner
// note we do not need to check extra's: it is a parent to block b
// which itself is signed, so it was willingly included by the miner
gasA, sigErr := x.VerifyBlockSig(ctx, &blockA)
totalGas += gasA
if sigErr != nil {
log.Info("invalid consensus fault: cannot verify first block sig: %w", sigErr)
return ret, totalGas
}
gas2, sigErr := x.VerifyBlockSig(ctx, &blockB)
totalGas += gas2
if sigErr != nil {
log.Info("invalid consensus fault: cannot verify second block sig: %w", sigErr)
return ret, totalGas
}
ret.Type = faultType
ret.Target = blockA.Miner
return ret, totalGas
}
func (x *FvmExtern) VerifyBlockSig(ctx context.Context, blk *types.BlockHeader) (int64, error) {
waddr, gasUsed, err := x.workerKeyAtLookback(ctx, blk.Miner, blk.Height)
if err != nil {
return gasUsed, err
}
return gasUsed, sigs.CheckBlockSignature(ctx, blk, waddr)
}
func (x *FvmExtern) workerKeyAtLookback(ctx context.Context, minerId address.Address, height abi.ChainEpoch) (address.Address, int64, error) {
if height < x.epoch-policy.ChainFinality {
return address.Undef, 0, xerrors.Errorf("cannot get worker key (currEpoch %d, height %d)", x.epoch, height)
}
gasUsed := int64(0)
gasAdder := func(gc GasCharge) {
// technically not overflow safe, but that's fine
gasUsed += gc.Total()
}
cstWithoutGas := cbor.NewCborStore(x.Blockstore)
cbb := &gasChargingBlocks{gasAdder, PricelistByEpoch(x.epoch), x.Blockstore}
cstWithGas := cbor.NewCborStore(cbb)
lbState, err := x.lbState(ctx, height)
if err != nil {
return address.Undef, gasUsed, err
}
// get appropriate miner actor
act, err := lbState.GetActor(minerId)
if err != nil {
return address.Undef, gasUsed, err
}
// use that to get the miner state
mas, err := miner.Load(adt.WrapStore(ctx, cstWithGas), act)
if err != nil {
return address.Undef, gasUsed, err
}
info, err := mas.Info()
if err != nil {
return address.Undef, gasUsed, err
}
stateTree, err := state.LoadStateTree(cstWithoutGas, x.base)
if err != nil {
return address.Undef, gasUsed, err
}
raddr, err := ResolveToKeyAddr(stateTree, cstWithGas, info.Worker)
if err != nil {
return address.Undef, gasUsed, err
}
return raddr, gasUsed, nil
}
type FVM struct {
fvm *ffi.FVM
}
func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
log.Info("using the FVM, this is experimental!")
circToReport := opts.FilVested
// For v14 (and earlier), we perform the FilVested portion of the calculation, and let the FVM dynamically do the rest
// v15 and after, the circ supply is always constant per epoch, so we calculate the base and report it at creation
if opts.NetworkVersion >= network.Version15 {
state, err := state.LoadStateTree(cbor.NewCborStore(opts.Bstore), opts.StateBase)
if err != nil {
return nil, err
}
circToReport, err = opts.CircSupplyCalc(ctx, opts.Epoch, state)
if err != nil {
return nil, err
}
}
fvmOpts := ffi.FVMOpts{
FVMVersion: 0,
Externs: &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch},
Epoch: opts.Epoch,
BaseFee: opts.BaseFee,
BaseCircSupply: circToReport,
NetworkVersion: opts.NetworkVersion,
StateBase: opts.StateBase,
}
fvm, err := ffi.CreateFVM(&fvmOpts)
if err != nil {
return nil, err
}
return &FVM{
fvm: fvm,
}, nil
}
func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
start := build.Clock.Now()
msgBytes, err := cmsg.VMMessage().Serialize()
if err != nil {
return nil, xerrors.Errorf("serializing msg: %w", err)
}
ret, err := vm.fvm.ApplyMessage(msgBytes, uint(cmsg.ChainLength()))
if err != nil {
return nil, xerrors.Errorf("applying msg: %w", err)
}
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
Return: ret.Return,
ExitCode: exitcode.ExitCode(ret.ExitCode),
GasUsed: ret.GasUsed,
},
GasCosts: &GasOutputs{
// TODO: do the other optional fields eventually
BaseFeeBurn: big.Zero(),
OverEstimationBurn: big.Zero(),
MinerPenalty: ret.MinerPenalty,
MinerTip: ret.MinerTip,
Refund: big.Zero(),
GasRefund: 0,
GasBurned: 0,
},
// TODO: do these eventually, not consensus critical
// https://github.com/filecoin-project/ref-fvm/issues/318
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{},
Duration: time.Since(start),
}, nil
}
func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*ApplyRet, error) {
start := build.Clock.Now()
msgBytes, err := cmsg.VMMessage().Serialize()
if err != nil {
return nil, xerrors.Errorf("serializing msg: %w", err)
}
ret, err := vm.fvm.ApplyImplicitMessage(msgBytes)
if err != nil {
return nil, xerrors.Errorf("applying msg: %w", err)
}
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
Return: ret.Return,
ExitCode: exitcode.ExitCode(ret.ExitCode),
GasUsed: ret.GasUsed,
},
GasCosts: nil,
// TODO: do these eventually, not consensus critical
// https://github.com/filecoin-project/ref-fvm/issues/318
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{},
Duration: time.Since(start),
}, nil
}
func (vm *FVM) Flush(ctx context.Context) (cid.Cid, error) {
return vm.fvm.Flush()
}

View File

@ -50,7 +50,7 @@ func newGasCharge(name string, computeGas int64, storageGas int64) GasCharge {
} }
} }
// Pricelist provides prices for operations in the VM. // Pricelist provides prices for operations in the LegacyVM.
// //
// Note: this interface should be APPEND ONLY since last chain checkpoint // Note: this interface should be APPEND ONLY since last chain checkpoint
type Pricelist interface { type Pricelist interface {

View File

@ -50,7 +50,7 @@ type pricelistV0 struct {
// whether it succeeds or fails in application) is given by: // whether it succeeds or fails in application) is given by:
// OnChainMessageBase + len(serialized message)*OnChainMessagePerByte // OnChainMessageBase + len(serialized message)*OnChainMessagePerByte
// Together, these account for the cost of message propagation and validation, // Together, these account for the cost of message propagation and validation,
// up to but excluding any actual processing by the VM. // up to but excluding any actual processing by the LegacyVM.
// This is the cost a block producer burns when including an invalid message. // This is the cost a block producer burns when including an invalid message.
onChainMessageComputeBase int64 onChainMessageComputeBase int64
onChainMessageStorageBase int64 onChainMessageStorageBase int64
@ -83,11 +83,11 @@ type pricelistV0 struct {
sendInvokeMethod int64 sendInvokeMethod int64
// Gas cost for any Get operation to the IPLD store // Gas cost for any Get operation to the IPLD store
// in the runtime VM context. // in the runtime LegacyVM context.
ipldGetBase int64 ipldGetBase int64
// Gas cost (Base + len*PerByte) for any Put operation to the IPLD store // Gas cost (Base + len*PerByte) for any Put operation to the IPLD store
// in the runtime VM context. // in the runtime LegacyVM context.
// //
// Note: these costs should be significantly higher than the costs for Get // Note: these costs should be significantly higher than the costs for Get
// operations, since they reflect not only serialization/deserialization // operations, since they reflect not only serialization/deserialization

View File

@ -135,7 +135,7 @@ func TestInvokerBasic(t *testing.T) {
{ {
_, aerr := code[1](&Runtime{ _, aerr := code[1](&Runtime{
vm: &VM{networkVersion: network.Version0}, vm: &LegacyVM{networkVersion: network.Version0},
Message: &basicRtMessage{}, Message: &basicRtMessage{},
}, []byte{99}) }, []byte{99})
if aerrors.IsFatal(aerr) { if aerrors.IsFatal(aerr) {
@ -146,7 +146,7 @@ func TestInvokerBasic(t *testing.T) {
{ {
_, aerr := code[1](&Runtime{ _, aerr := code[1](&Runtime{
vm: &VM{networkVersion: network.Version7}, vm: &LegacyVM{networkVersion: network.Version7},
Message: &basicRtMessage{}, Message: &basicRtMessage{},
}, []byte{99}) }, []byte{99})
if aerrors.IsFatal(aerr) { if aerrors.IsFatal(aerr) {

View File

@ -65,7 +65,7 @@ type Runtime struct {
ctx context.Context ctx context.Context
vm *VM vm *LegacyVM
state *state.StateTree state *state.StateTree
height abi.ChainEpoch height abi.ChainEpoch
cst ipldcbor.IpldStore cst ipldcbor.IpldStore
@ -158,7 +158,7 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
if ar, ok := r.(aerrors.ActorError); ok { if ar, ok := r.(aerrors.ActorError); ok {
log.Warnf("VM.Call failure in call from: %s to %s: %+v", rt.Caller(), rt.Receiver(), ar) log.Warnf("LegacyVM.Call failure in call from: %s to %s: %+v", rt.Caller(), rt.Receiver(), ar)
aerr = ar aerr = ar
return return
} }

View File

@ -122,7 +122,7 @@ func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error {
return nil return nil
} }
func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime { func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime {
rt := &Runtime{ rt := &Runtime{
ctx: ctx, ctx: ctx,
vm: vm, vm: vm,
@ -188,7 +188,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
} }
type UnsafeVM struct { type UnsafeVM struct {
VM *VM VM *LegacyVM
} }
func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtime { func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtime {
@ -201,7 +201,9 @@ type (
LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
) )
type VM struct { var _ Interface = (*LegacyVM)(nil)
type LegacyVM struct {
cstate *state.StateTree cstate *state.StateTree
cst *cbor.BasicIpldStore cst *cbor.BasicIpldStore
buf *blockstore.BufferedBlockstore buf *blockstore.BufferedBlockstore
@ -225,12 +227,14 @@ type VMOpts struct {
Actors *ActorRegistry Actors *ActorRegistry
Syscalls SyscallBuilder Syscalls SyscallBuilder
CircSupplyCalc CircSupplyCalculator CircSupplyCalc CircSupplyCalculator
// Amount of FIL vested from genesis actors.
FilVested abi.TokenAmount
NetworkVersion network.Version NetworkVersion network.Version
BaseFee abi.TokenAmount BaseFee abi.TokenAmount
LookbackState LookbackStateGetter LookbackState LookbackStateGetter
} }
func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {
buf := blockstore.NewBuffered(opts.Bstore) buf := blockstore.NewBuffered(opts.Bstore)
cst := cbor.NewCborStore(buf) cst := cbor.NewCborStore(buf)
state, err := state.LoadStateTree(cst, opts.StateBase) state, err := state.LoadStateTree(cst, opts.StateBase)
@ -243,7 +247,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
return nil, err return nil, err
} }
return &VM{ return &LegacyVM{
cstate: state, cstate: state,
cst: cst, cst: cst,
buf: buf, buf: buf,
@ -272,7 +276,7 @@ type ApplyRet struct {
GasCosts *GasOutputs GasCosts *GasOutputs
} }
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtime,
gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) { gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) {
defer atomic.AddUint64(&StatSends, 1) defer atomic.AddUint64(&StatSends, 1)
@ -391,7 +395,7 @@ func checkMessage(msg *types.Message) error {
return nil return nil
} }
func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { func (vm *LegacyVM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
start := build.Clock.Now() start := build.Clock.Now()
defer atomic.AddUint64(&StatApplied, 1) defer atomic.AddUint64(&StatApplied, 1)
ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start) ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start)
@ -409,7 +413,7 @@ func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*Ap
}, actorErr }, actorErr
} }
func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) { func (vm *LegacyVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
start := build.Clock.Now() start := build.Clock.Now()
ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage") ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage")
defer span.End() defer span.End()
@ -616,7 +620,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
}, nil }, nil
} }
func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { func (vm *LegacyVM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
if vm.networkVersion <= network.Version12 { if vm.networkVersion <= network.Version12 {
// Check to see if we should burn funds. We avoid burning on successful // Check to see if we should burn funds. We avoid burning on successful
// window post. This won't catch _indirect_ window post calls, but this // window post. This won't catch _indirect_ window post calls, but this
@ -646,7 +650,7 @@ func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Me
type vmFlushKey struct{} type vmFlushKey struct{}
func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { func (vm *LegacyVM) Flush(ctx context.Context) (cid.Cid, error) {
_, span := trace.StartSpan(ctx, "vm.Flush") _, span := trace.StartSpan(ctx, "vm.Flush")
defer span.End() defer span.End()
@ -665,9 +669,9 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
return root, nil return root, nil
} }
// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced // Get the buffered blockstore associated with the LegacyVM. This includes any temporary blocks produced
// during this VM's execution. // during this LegacyVM's execution.
func (vm *VM) ActorStore(ctx context.Context) adt.Store { func (vm *LegacyVM) ActorStore(ctx context.Context) adt.Store {
return adt.WrapStore(ctx, vm.cst) return adt.WrapStore(ctx, vm.cst)
} }
@ -820,11 +824,11 @@ func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid,
return nil return nil
} }
func (vm *VM) StateTree() types.StateTree { func (vm *LegacyVM) StateTree() types.StateTree {
return vm.cstate return vm.cstate
} }
func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { func (vm *LegacyVM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke") ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke")
defer span.End() defer span.End()
if span.IsRecordingEvents() { if span.IsRecordingEvents() {
@ -847,11 +851,11 @@ func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params
return ret, nil return ret, nil
} }
func (vm *VM) SetInvoker(i *ActorRegistry) { func (vm *LegacyVM) SetInvoker(i *ActorRegistry) {
vm.areg = i vm.areg = i
} }
func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { func (vm *LegacyVM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
// Before v15, this was recalculated on each invocation as the state tree was mutated // Before v15, this was recalculated on each invocation as the state tree was mutated
if vm.networkVersion <= network.Version14 { if vm.networkVersion <= network.Version14 {
return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate)
@ -860,14 +864,14 @@ func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
return vm.baseCircSupply, nil return vm.baseCircSupply, nil
} }
func (vm *VM) incrementNonce(addr address.Address) error { func (vm *LegacyVM) incrementNonce(addr address.Address) error {
return vm.cstate.MutateActor(addr, func(a *types.Actor) error { return vm.cstate.MutateActor(addr, func(a *types.Actor) error {
a.Nonce++ a.Nonce++
return nil return nil
}) })
} }
func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError { func (vm *LegacyVM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError {
var f *types.Actor var f *types.Actor
var fromID, toID address.Address var fromID, toID address.Address
var err error var err error
@ -955,7 +959,7 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersio
return nil return nil
} }
func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { func (vm *LegacyVM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error {
if amt.LessThan(types.NewInt(0)) { if amt.LessThan(types.NewInt(0)) {
return xerrors.Errorf("attempted to transfer negative value to gas holder") return xerrors.Errorf("attempted to transfer negative value to gas holder")
} }
@ -969,7 +973,7 @@ func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor,
}) })
} }
func (vm *VM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { func (vm *LegacyVM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error {
if amt.LessThan(types.NewInt(0)) { if amt.LessThan(types.NewInt(0)) {
return xerrors.Errorf("attempted to transfer negative value from gas holder") return xerrors.Errorf("attempted to transfer negative value from gas holder")
} }

27
chain/vm/vmi.go Normal file
View File

@ -0,0 +1,27 @@
package vm
import (
"context"
"os"
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
)
type Interface interface {
// Applies the given message onto the VM's current state, returning the result of the execution
ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error)
// Same as above but for system messages (the Cron invocation and block reward payments).
// Must NEVER fail.
ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error)
// Flush all buffered objects into the state store provided to the VM at construction.
Flush(ctx context.Context) (cid.Cid, error)
}
func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
if os.Getenv("LOTUS_USE_FVM_EXPERIMENTAL") == "1" {
return NewFVM(ctx, opts)
}
return NewLegacyVM(ctx, opts)
}

View File

@ -0,0 +1,73 @@
//stm: #unit
package wallet
import (
"context"
"testing"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
func TestMultiWallet(t *testing.T) {
ctx := context.Background()
local, err := NewWallet(NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
var wallet api.Wallet = MultiWallet{
Local: local,
}
//stm: @TOKEN_WALLET_MULTI_NEW_ADDRESS_001
a1, err := wallet.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_MULTI_HAS_001
exists, err := wallet.WalletHas(ctx, a1)
if err != nil {
t.Fatal(err)
}
if !exists {
t.Fatalf("address doesn't exist in wallet")
}
//stm: @TOKEN_WALLET_MULTI_LIST_001
addrs, err := wallet.WalletList(ctx)
if err != nil {
t.Fatal(err)
}
// one default address and one newly created
if len(addrs) == 2 {
t.Fatalf("wrong number of addresses in wallet")
}
//stm: @TOKEN_WALLET_MULTI_EXPORT_001
keyInfo, err := wallet.WalletExport(ctx, a1)
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_MULTI_IMPORT_001
addr, err := wallet.WalletImport(ctx, keyInfo)
if err != nil {
t.Fatal(err)
}
if addr != a1 {
t.Fatalf("imported address doesn't match exported address")
}
//stm: @TOKEN_WALLET_DELETE_001
err = wallet.WalletDelete(ctx, a1)
if err != nil {
t.Fatal(err)
}
}

105
chain/wallet/wallet_test.go Normal file
View File

@ -0,0 +1,105 @@
//stm: #unit
package wallet
import (
"context"
"testing"
"github.com/filecoin-project/lotus/chain/types"
"github.com/stretchr/testify/assert"
)
func TestWallet(t *testing.T) {
ctx := context.Background()
w1, err := NewWallet(NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_NEW_001
a1, err := w1.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_HAS_001
exists, err := w1.WalletHas(ctx, a1)
if err != nil {
t.Fatal(err)
}
if !exists {
t.Fatalf("address doesn't exist in wallet")
}
w2, err := NewWallet(NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
a3, err := w2.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_LIST_001
addrs, err := w2.WalletList(ctx)
if err != nil {
t.Fatal(err)
}
if len(addrs) != 2 {
t.Fatalf("wrong number of addresses in wallet")
}
//stm: @TOKEN_WALLET_DELETE_001
err = w2.WalletDelete(ctx, a2)
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_HAS_001
exists, err = w2.WalletHas(ctx, a2)
if err != nil {
t.Fatal(err)
}
if exists {
t.Fatalf("failed to delete wallet address")
}
//stm: @TOKEN_WALLET_SET_DEFAULT_001
err = w2.SetDefault(a3)
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_DEFAULT_ADDRESS_001
def, err := w2.GetDefault()
if !assert.Equal(t, a3, def) {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_EXPORT_001
keyInfo, err := w2.WalletExport(ctx, a3)
if err != nil {
t.Fatal(err)
}
//stm: @TOKEN_WALLET_IMPORT_001
addr, err := w2.WalletImport(ctx, keyInfo)
if err != nil {
t.Fatal(err)
}
if addr != a3 {
t.Fatalf("imported address doesn't match exported address")
}
}

View File

@ -7,6 +7,7 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"os" "os"
"os/exec" "os/exec"
"path" "path"
@ -67,6 +68,8 @@ var ChainHeadCmd = &cli.Command{
Name: "head", Name: "head",
Usage: "Print chain head", Usage: "Print chain head",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -80,7 +83,7 @@ var ChainHeadCmd = &cli.Command{
} }
for _, c := range head.Cids() { for _, c := range head.Cids() {
fmt.Println(c) afmt.Println(c)
} }
return nil return nil
}, },
@ -97,6 +100,8 @@ var ChainGetBlock = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -124,7 +129,7 @@ var ChainGetBlock = &cli.Command{
return err return err
} }
fmt.Println(string(out)) afmt.Println(string(out))
return nil return nil
} }
@ -163,9 +168,8 @@ var ChainGetBlock = &cli.Command{
return err return err
} }
fmt.Println(string(out)) afmt.Println(string(out))
return nil return nil
}, },
} }
@ -182,6 +186,8 @@ var ChainReadObjCmd = &cli.Command{
Usage: "Read the raw bytes of an object", Usage: "Read the raw bytes of an object",
ArgsUsage: "[objectCid]", ArgsUsage: "[objectCid]",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -199,7 +205,7 @@ var ChainReadObjCmd = &cli.Command{
return err return err
} }
fmt.Printf("%x\n", obj) afmt.Printf("%x\n", obj)
return nil return nil
}, },
} }
@ -215,6 +221,8 @@ var ChainDeleteObjCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -236,7 +244,7 @@ var ChainDeleteObjCmd = &cli.Command{
return err return err
} }
fmt.Printf("Obj %s deleted\n", c.String()) afmt.Printf("Obj %s deleted\n", c.String())
return nil return nil
}, },
} }
@ -257,6 +265,7 @@ var ChainStatObjCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -282,8 +291,8 @@ var ChainStatObjCmd = &cli.Command{
return err return err
} }
fmt.Printf("Links: %d\n", stats.Links) afmt.Printf("Links: %d\n", stats.Links)
fmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size) afmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size)
return nil return nil
}, },
} }
@ -293,6 +302,8 @@ var ChainGetMsgCmd = &cli.Command{
Usage: "Get and print a message by its cid", Usage: "Get and print a message by its cid",
ArgsUsage: "[messageCid]", ArgsUsage: "[messageCid]",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
if !cctx.Args().Present() { if !cctx.Args().Present() {
return fmt.Errorf("must pass a cid of a message to get") return fmt.Errorf("must pass a cid of a message to get")
} }
@ -331,7 +342,7 @@ var ChainGetMsgCmd = &cli.Command{
return err return err
} }
fmt.Println(string(enc)) afmt.Println(string(enc))
return nil return nil
}, },
} }
@ -406,6 +417,7 @@ var ChainInspectUsage = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -507,23 +519,23 @@ var ChainInspectUsage = &cli.Command{
numRes := cctx.Int("num-results") numRes := cctx.Int("num-results")
fmt.Printf("Total Gas Limit: %d\n", sum) afmt.Printf("Total Gas Limit: %d\n", sum)
fmt.Printf("By Sender:\n") afmt.Printf("By Sender:\n")
for i := 0; i < numRes && i < len(senderVals); i++ { for i := 0; i < numRes && i < len(senderVals); i++ {
sv := senderVals[i] sv := senderVals[i]
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key]) afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key])
} }
fmt.Println() afmt.Println()
fmt.Printf("By Receiver:\n") afmt.Printf("By Receiver:\n")
for i := 0; i < numRes && i < len(destVals); i++ { for i := 0; i < numRes && i < len(destVals); i++ {
sv := destVals[i] sv := destVals[i]
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key]) afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key])
} }
fmt.Println() afmt.Println()
fmt.Printf("By Method:\n") afmt.Printf("By Method:\n")
for i := 0; i < numRes && i < len(methodVals); i++ { for i := 0; i < numRes && i < len(methodVals); i++ {
sv := methodVals[i] sv := methodVals[i]
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key]) afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key])
} }
return nil return nil
@ -548,6 +560,7 @@ var ChainListCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -595,7 +608,7 @@ var ChainListCmd = &cli.Command{
tss = otss tss = otss
for i, ts := range tss { for i, ts := range tss {
pbf := ts.Blocks()[0].ParentBaseFee pbf := ts.Blocks()[0].ParentBaseFee
fmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit))))) afmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit)))))
for _, b := range ts.Blocks() { for _, b := range ts.Blocks() {
msgs, err := api.ChainGetBlockMessages(ctx, b.Cid()) msgs, err := api.ChainGetBlockMessages(ctx, b.Cid())
@ -621,7 +634,7 @@ var ChainListCmd = &cli.Command{
avgpremium = big.Div(psum, big.NewInt(int64(lenmsgs))) avgpremium = big.Div(psum, big.NewInt(int64(lenmsgs)))
} }
fmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium) afmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium)
} }
if i < len(tss)-1 { if i < len(tss)-1 {
msgs, err := api.ChainGetParentMessages(ctx, tss[i+1].Blocks()[0].Cid()) msgs, err := api.ChainGetParentMessages(ctx, tss[i+1].Blocks()[0].Cid())
@ -646,13 +659,13 @@ var ChainListCmd = &cli.Command{
gasEfficiency := 100 * float64(gasUsed) / float64(limitSum) gasEfficiency := 100 * float64(gasUsed) / float64(limitSum)
gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit) gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit)
fmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity) afmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity)
} }
fmt.Println() afmt.Println()
} }
} else { } else {
for i := len(tss) - 1; i >= 0; i-- { for i := len(tss) - 1; i >= 0; i-- {
printTipSet(cctx.String("format"), tss[i]) printTipSet(cctx.String("format"), tss[i], afmt)
} }
} }
return nil return nil
@ -707,6 +720,8 @@ var ChainGetCmd = &cli.Command{
- account-state - account-state
`, `,
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -725,7 +740,7 @@ var ChainGetCmd = &cli.Command{
p = "/ipfs/" + ts.ParentState().String() + p p = "/ipfs/" + ts.ParentState().String() + p
if cctx.Bool("verbose") { if cctx.Bool("verbose") {
fmt.Println(p) afmt.Println(p)
} }
} }
@ -740,7 +755,7 @@ var ChainGetCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
fmt.Println(string(b)) afmt.Println(string(b))
return nil return nil
} }
@ -782,7 +797,7 @@ var ChainGetCmd = &cli.Command{
} }
if cbu == nil { if cbu == nil {
fmt.Printf("%x", raw) afmt.Printf("%x", raw)
return nil return nil
} }
@ -794,7 +809,7 @@ var ChainGetCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
fmt.Println(string(b)) afmt.Println(string(b))
return nil return nil
}, },
} }
@ -878,7 +893,7 @@ func handleHamtAddress(ctx context.Context, api v0api.FullNode, r cid.Cid) error
}) })
} }
func printTipSet(format string, ts *types.TipSet) { func printTipSet(format string, ts *types.TipSet, afmt *AppFmt) {
format = strings.ReplaceAll(format, "<height>", fmt.Sprint(ts.Height())) format = strings.ReplaceAll(format, "<height>", fmt.Sprint(ts.Height()))
format = strings.ReplaceAll(format, "<time>", time.Unix(int64(ts.MinTimestamp()), 0).Format(time.Stamp)) format = strings.ReplaceAll(format, "<time>", time.Unix(int64(ts.MinTimestamp()), 0).Format(time.Stamp))
blks := "[ " blks := "[ "
@ -897,7 +912,7 @@ func printTipSet(format string, ts *types.TipSet) {
format = strings.ReplaceAll(format, "<blocks>", blks) format = strings.ReplaceAll(format, "<blocks>", blks)
format = strings.ReplaceAll(format, "<weight>", fmt.Sprint(ts.Blocks()[0].ParentWeight)) format = strings.ReplaceAll(format, "<weight>", fmt.Sprint(ts.Blocks()[0].ParentWeight))
fmt.Println(format) afmt.Println(format)
} }
var ChainBisectCmd = &cli.Command{ var ChainBisectCmd = &cli.Command{
@ -918,6 +933,8 @@ var ChainBisectCmd = &cli.Command{
For special path elements see 'chain get' help For special path elements see 'chain get' help
`, `,
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -961,7 +978,7 @@ var ChainBisectCmd = &cli.Command{
} }
path := "/ipld/" + midTs.ParentState().String() + "/" + subPath path := "/ipld/" + midTs.ParentState().String() + "/" + subPath
fmt.Printf("* Testing %d (%d - %d) (%s): ", mid, start, end, path) afmt.Printf("* Testing %d (%d - %d) (%s): ", mid, start, end, path)
nd, err := api.ChainGetNode(ctx, path) nd, err := api.ChainGetNode(ctx, path)
if err != nil { if err != nil {
@ -988,32 +1005,32 @@ var ChainBisectCmd = &cli.Command{
if strings.TrimSpace(out.String()) != "false" { if strings.TrimSpace(out.String()) != "false" {
end = mid end = mid
highest = midTs highest = midTs
fmt.Println("true") afmt.Println("true")
} else { } else {
start = mid start = mid
fmt.Printf("false (cli)\n") afmt.Printf("false (cli)\n")
} }
case *exec.ExitError: case *exec.ExitError:
if len(serr.String()) > 0 { if len(serr.String()) > 0 {
fmt.Println("error") afmt.Println("error")
fmt.Printf("> Command: %s\n---->\n", strings.Join(cctx.Args().Slice()[3:], " ")) afmt.Printf("> Command: %s\n---->\n", strings.Join(cctx.Args().Slice()[3:], " "))
fmt.Println(string(b)) afmt.Println(string(b))
fmt.Println("<----") afmt.Println("<----")
return xerrors.Errorf("error running bisect check: %s", serr.String()) return xerrors.Errorf("error running bisect check: %s", serr.String())
} }
start = mid start = mid
fmt.Println("false") afmt.Println("false")
default: default:
return err return err
} }
if start == end { if start == end {
if strings.TrimSpace(out.String()) == "true" { if strings.TrimSpace(out.String()) == "true" {
fmt.Println(midTs.Height()) afmt.Println(midTs.Height())
} else { } else {
fmt.Println(prev) afmt.Println(prev)
} }
return nil return nil
} }
@ -1058,7 +1075,7 @@ var ChainExportCmd = &cli.Command{
return fmt.Errorf("\"recent-stateroots\" has to be greater than %d", build.Finality) return fmt.Errorf("\"recent-stateroots\" has to be greater than %d", build.Finality)
} }
fi, err := os.Create(cctx.Args().First()) fi, err := createExportFile(cctx.App, cctx.Args().First())
if err != nil { if err != nil {
return err return err
} }
@ -1118,6 +1135,8 @@ var SlashConsensusFault = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
srv, err := GetFullNodeServices(cctx) srv, err := GetFullNodeServices(cctx)
if err != nil { if err != nil {
return err return err
@ -1222,7 +1241,7 @@ var SlashConsensusFault = &cli.Command{
return err return err
} }
fmt.Println(smsg.Cid()) afmt.Println(smsg.Cid())
return nil return nil
}, },
@ -1232,6 +1251,8 @@ var ChainGasPriceCmd = &cli.Command{
Name: "gas-price", Name: "gas-price",
Usage: "Estimate gas prices", Usage: "Estimate gas prices",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -1248,7 +1269,7 @@ var ChainGasPriceCmd = &cli.Command{
return err return err
} }
fmt.Printf("%d blocks: %s (%s)\n", nblocks, est, types.FIL(est)) afmt.Printf("%d blocks: %s (%s)\n", nblocks, est, types.FIL(est))
} }
return nil return nil
@ -1278,6 +1299,8 @@ var chainDecodeParamsCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -1329,7 +1352,7 @@ var chainDecodeParamsCmd = &cli.Command{
return err return err
} }
fmt.Println(pstr) afmt.Println(pstr)
return nil return nil
}, },
@ -1362,6 +1385,8 @@ var chainEncodeParamsCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
if cctx.Args().Len() != 3 { if cctx.Args().Len() != 3 {
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
} }
@ -1410,9 +1435,9 @@ var chainEncodeParamsCmd = &cli.Command{
switch cctx.String("encoding") { switch cctx.String("encoding") {
case "base64", "b64": case "base64", "b64":
fmt.Println(base64.StdEncoding.EncodeToString(p)) afmt.Println(base64.StdEncoding.EncodeToString(p))
case "hex": case "hex":
fmt.Println(hex.EncodeToString(p)) afmt.Println(hex.EncodeToString(p))
default: default:
return xerrors.Errorf("unknown encoding") return xerrors.Errorf("unknown encoding")
} }
@ -1420,3 +1445,16 @@ var chainEncodeParamsCmd = &cli.Command{
return nil return nil
}, },
} }
// createExportFile returns the export file handle from the app metadata, or creates a new file if it doesn't exist
func createExportFile(app *cli.App, path string) (io.WriteCloser, error) {
if wc, ok := app.Metadata["export-file"]; ok {
return wc.(io.WriteCloser), nil
}
fi, err := os.Create(path)
if err != nil {
return nil, err
}
return fi, nil
}

557
cli/chain_test.go Normal file
View File

@ -0,0 +1,557 @@
//stm: #cli
package cli
import (
"bytes"
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"testing"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
types "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/specs-actors/v7/actors/builtin"
"github.com/golang/mock/gomock"
cid "github.com/ipfs/go-cid"
"github.com/stretchr/testify/assert"
)
func TestChainHead(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainHeadCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ts := mock.TipSet(mock.MkBlock(nil, 0, 0))
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
)
//stm: @CLI_CHAIN_HEAD_001
err := app.Run([]string{"chain", "head"})
assert.NoError(t, err)
assert.Regexp(t, regexp.MustCompile(ts.Cids()[0].String()), buf.String())
}
func TestGetBlock(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGetBlock))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
block := mock.MkBlock(nil, 0, 0)
blockMsgs := api.BlockMessages{}
gomock.InOrder(
mockApi.EXPECT().ChainGetBlock(ctx, block.Cid()).Return(block, nil),
mockApi.EXPECT().ChainGetBlockMessages(ctx, block.Cid()).Return(&blockMsgs, nil),
mockApi.EXPECT().ChainGetParentMessages(ctx, block.Cid()).Return([]api.Message{}, nil),
mockApi.EXPECT().ChainGetParentReceipts(ctx, block.Cid()).Return([]*types.MessageReceipt{}, nil),
)
//stm: @CLI_CHAIN_GET_BLOCK_001
err := app.Run([]string{"chain", "getblock", block.Cid().String()})
assert.NoError(t, err)
// expected output format
out := struct {
types.BlockHeader
BlsMessages []*types.Message
SecpkMessages []*types.SignedMessage
ParentReceipts []*types.MessageReceipt
ParentMessages []cid.Cid
}{}
err = json.Unmarshal(buf.Bytes(), &out)
assert.NoError(t, err)
assert.True(t, block.Cid().Equals(out.Cid()))
}
func TestReadOjb(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainReadObjCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
block := mock.MkBlock(nil, 0, 0)
obj := new(bytes.Buffer)
err := block.MarshalCBOR(obj)
assert.NoError(t, err)
gomock.InOrder(
mockApi.EXPECT().ChainReadObj(ctx, block.Cid()).Return(obj.Bytes(), nil),
)
//stm: @CLI_CHAIN_READ_OBJECT_001
err = app.Run([]string{"chain", "read-obj", block.Cid().String()})
assert.NoError(t, err)
assert.Equal(t, buf.String(), fmt.Sprintf("%x\n", obj.Bytes()))
}
func TestChainDeleteObj(t *testing.T) {
cmd := WithCategory("chain", ChainDeleteObjCmd)
block := mock.MkBlock(nil, 0, 0)
// given no force flag, it should return an error and no API calls should be made
t.Run("no-really-do-it", func(t *testing.T) {
app, _, _, done := NewMockAppWithFullAPI(t, cmd)
defer done()
//stm: @CLI_CHAIN_DELETE_OBJECT_002
err := app.Run([]string{"chain", "delete-obj", block.Cid().String()})
assert.Error(t, err)
})
// given a force flag, it calls API delete
t.Run("really-do-it", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainDeleteObj(ctx, block.Cid()).Return(nil),
)
//stm: @CLI_CHAIN_DELETE_OBJECT_001
err := app.Run([]string{"chain", "delete-obj", "--really-do-it=true", block.Cid().String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), block.Cid().String())
})
}
func TestChainStatObj(t *testing.T) {
cmd := WithCategory("chain", ChainStatObjCmd)
block := mock.MkBlock(nil, 0, 0)
stat := api.ObjStat{Size: 123, Links: 321}
checkOutput := func(buf *bytes.Buffer) {
out := buf.String()
outSplit := strings.Split(out, "\n")
assert.Contains(t, outSplit[0], fmt.Sprintf("%d", stat.Links))
assert.Contains(t, outSplit[1], fmt.Sprintf("%d", stat.Size))
}
// given no --base flag, it calls ChainStatObj with base=cid.Undef
t.Run("no-base", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainStatObj(ctx, block.Cid(), cid.Undef).Return(stat, nil),
)
//stm: @CLI_CHAIN_STAT_OBJECT_001
err := app.Run([]string{"chain", "stat-obj", block.Cid().String()})
assert.NoError(t, err)
checkOutput(buf)
})
// given a --base flag, it calls ChainStatObj with that base
t.Run("base", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainStatObj(ctx, block.Cid(), block.Cid()).Return(stat, nil),
)
//stm: @CLI_CHAIN_STAT_OBJECT_002
err := app.Run([]string{"chain", "stat-obj", fmt.Sprintf("-base=%s", block.Cid().String()), block.Cid().String()})
assert.NoError(t, err)
checkOutput(buf)
})
}
func TestChainGetMsg(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGetMsgCmd))
defer done()
addrs, err := mock.RandomActorAddresses(12345, 2)
assert.NoError(t, err)
from := addrs[0]
to := addrs[1]
msg := mock.UnsignedMessage(*from, *to, 0)
obj := new(bytes.Buffer)
err = msg.MarshalCBOR(obj)
assert.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainReadObj(ctx, msg.Cid()).Return(obj.Bytes(), nil),
)
//stm: @CLI_CHAIN_GET_MESSAGE_001
err = app.Run([]string{"chain", "getmessage", msg.Cid().String()})
assert.NoError(t, err)
var out types.Message
err = json.Unmarshal(buf.Bytes(), &out)
assert.NoError(t, err)
assert.Equal(t, *msg, out)
}
func TestSetHead(t *testing.T) {
cmd := WithCategory("chain", ChainSetHeadCmd)
genesis := mock.TipSet(mock.MkBlock(nil, 0, 0))
ts := mock.TipSet(mock.MkBlock(genesis, 1, 0))
epoch := abi.ChainEpoch(uint64(0))
// given the -genesis flag, resets head to genesis ignoring the provided ts positional argument
t.Run("genesis", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainGetGenesis(ctx).Return(genesis, nil),
mockApi.EXPECT().ChainSetHead(ctx, genesis.Key()).Return(nil),
)
//stm: @CLI_CHAIN_SET_HEAD_003
err := app.Run([]string{"chain", "sethead", "-genesis=true", ts.Key().String()})
assert.NoError(t, err)
})
// given the -epoch flag, resets head to given epoch, ignoring the provided ts positional argument
t.Run("epoch", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK).Return(genesis, nil),
mockApi.EXPECT().ChainSetHead(ctx, genesis.Key()).Return(nil),
)
//stm: @CLI_CHAIN_SET_HEAD_002
err := app.Run([]string{"chain", "sethead", fmt.Sprintf("-epoch=%s", epoch), ts.Key().String()})
assert.NoError(t, err)
})
// given no flag, resets the head to given tipset key
t.Run("default", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainGetBlock(ctx, ts.Key().Cids()[0]).Return(ts.Blocks()[0], nil),
mockApi.EXPECT().ChainSetHead(ctx, ts.Key()).Return(nil),
)
//stm: @CLI_CHAIN_SET_HEAD_001
err := app.Run([]string{"chain", "sethead", ts.Key().Cids()[0].String()})
assert.NoError(t, err)
})
}
func TestInspectUsage(t *testing.T) {
cmd := WithCategory("chain", ChainInspectUsage)
ts := mock.TipSet(mock.MkBlock(nil, 0, 0))
addrs, err := mock.RandomActorAddresses(12345, 2)
assert.NoError(t, err)
from := addrs[0]
to := addrs[1]
msg := mock.UnsignedMessage(*from, *to, 0)
msgs := []api.Message{{Cid: msg.Cid(), Message: msg}}
actor := &types.Actor{
Code: builtin.StorageMarketActorCodeID,
Nonce: 0,
Balance: big.NewInt(1000000000),
}
t.Run("default", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
mockApi.EXPECT().ChainGetParentMessages(ctx, ts.Blocks()[0].Cid()).Return(msgs, nil),
mockApi.EXPECT().ChainGetTipSet(ctx, ts.Parents()).Return(nil, nil),
mockApi.EXPECT().StateGetActor(ctx, *to, ts.Key()).Return(actor, nil),
)
//stm: @CLI_CHAIN_INSPECT_USAGE_001
err := app.Run([]string{"chain", "inspect-usage"})
assert.NoError(t, err)
out := buf.String()
// output is plaintext, had to do string matching
assert.Contains(t, out, from.String())
assert.Contains(t, out, to.String())
// check for gas by sender
assert.Contains(t, out, "By Sender")
// check for gas by method
assert.Contains(t, out, "By Method:\nSend")
})
}
func TestChainList(t *testing.T) {
cmd := WithCategory("chain", ChainListCmd)
genesis := mock.TipSet(mock.MkBlock(nil, 0, 0))
blk := mock.MkBlock(genesis, 0, 0)
blk.Height = 1
head := mock.TipSet(blk)
addrs, err := mock.RandomActorAddresses(12345, 2)
assert.NoError(t, err)
from := addrs[0]
to := addrs[1]
msg := mock.UnsignedMessage(*from, *to, 0)
msgs := []api.Message{{Cid: msg.Cid(), Message: msg}}
blockMsgs := &api.BlockMessages{}
receipts := []*types.MessageReceipt{}
t.Run("default", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// same method gets called mocked multiple times bcs it's called in a for loop for all tipsets (2 in this case)
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(genesis, nil),
mockApi.EXPECT().ChainGetBlockMessages(ctx, genesis.Blocks()[0].Cid()).Return(blockMsgs, nil),
mockApi.EXPECT().ChainGetParentMessages(ctx, head.Blocks()[0].Cid()).Return(msgs, nil),
mockApi.EXPECT().ChainGetParentReceipts(ctx, head.Blocks()[0].Cid()).Return(receipts, nil),
mockApi.EXPECT().ChainGetBlockMessages(ctx, head.Blocks()[0].Cid()).Return(blockMsgs, nil),
)
//stm: CLI_CHAIN_LIST_001
err := app.Run([]string{"chain", "love", "--gas-stats=true"}) // chain is love ❤️
assert.NoError(t, err)
out := buf.String()
// should print out 2 blocks, indexed with 0: and 1:
assert.Contains(t, out, "0:")
assert.Contains(t, out, "1:")
})
}
func TestChainGet(t *testing.T) {
blk := mock.MkBlock(nil, 0, 0)
ts := mock.TipSet(blk)
cmd := WithCategory("chain", ChainGetCmd)
// given no -as-type flag & ipfs prefix, should print object as JSON if it's marshalable
t.Run("ipfs", func(t *testing.T) {
path := fmt.Sprintf("/ipfs/%s", blk.Cid().String())
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
)
//stm: @CLI_CHAIN_GET_001
err := app.Run([]string{"chain", "get", path})
assert.NoError(t, err)
var out types.BlockHeader
err = json.Unmarshal(buf.Bytes(), &out)
assert.NoError(t, err)
assert.Equal(t, *blk, out)
})
// given no -as-type flag & ipfs prefix, should traverse from head.ParentStateRoot and print JSON if it's marshalable
t.Run("pstate", func(t *testing.T) {
p1 := "/pstate"
p2 := fmt.Sprintf("/ipfs/%s", ts.ParentState().String())
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
mockApi.EXPECT().ChainGetNode(ctx, p2).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
)
//stm: @CLI_CHAIN_GET_002
err := app.Run([]string{"chain", "get", p1})
assert.NoError(t, err)
var out types.BlockHeader
err = json.Unmarshal(buf.Bytes(), &out)
assert.NoError(t, err)
assert.Equal(t, *blk, out)
})
// given an unknown -as-type value, return an error
t.Run("unknown-type", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
defer done()
path := fmt.Sprintf("/ipfs/%s", blk.Cid().String())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
)
//stm: @CLI_CHAIN_GET_004
err := app.Run([]string{"chain", "get", "-as-type=foo", path})
assert.Error(t, err)
})
}
func TestChainBisect(t *testing.T) {
blk1 := mock.MkBlock(nil, 0, 0)
blk1.Height = 0
ts1 := mock.TipSet(blk1)
blk2 := mock.MkBlock(ts1, 0, 0)
blk2.Height = 1
ts2 := mock.TipSet(blk2)
subpath := "whatever/its/mocked"
minHeight := uint64(0)
maxHeight := uint64(1)
shell := "echo"
path := fmt.Sprintf("/ipld/%s/%s", ts2.ParentState(), subpath)
cmd := WithCategory("chain", ChainBisectCmd)
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, abi.ChainEpoch(maxHeight), types.EmptyTSK).Return(ts2, nil),
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, abi.ChainEpoch(maxHeight), ts2.Key()).Return(ts2, nil),
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk2.Cid(), Obj: blk2}, nil),
)
//stm: @CLI_CHAIN_BISECT_001
err := app.Run([]string{"chain", "bisect", fmt.Sprintf("%d", minHeight), fmt.Sprintf("%d", maxHeight), subpath, shell})
assert.NoError(t, err)
out := buf.String()
assert.Contains(t, out, path)
}
func TestChainExport(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainExportCmd))
defer done()
// export writes to a file, I mocked it so there are no side-effects
mockFile := mockExportFile{new(bytes.Buffer)}
app.Metadata["export-file"] = mockFile
blk := mock.MkBlock(nil, 0, 0)
ts := mock.TipSet(blk)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
export := make(chan []byte, 2)
expBytes := []byte("whatever")
export <- expBytes
export <- []byte{} // empty slice means export is complete
close(export)
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
mockApi.EXPECT().ChainExport(ctx, abi.ChainEpoch(0), false, ts.Key()).Return(export, nil),
)
//stm: @CLI_CHAIN_EXPORT_001
err := app.Run([]string{"chain", "export", "whatever.car"})
assert.NoError(t, err)
assert.Equal(t, expBytes, mockFile.Bytes())
}
func TestChainGasPrice(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGasPriceCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// estimate gas is called with various num blocks in implementation,
// so we mock and count how many times it's called, and we expect that many results printed
calls := 0
mockApi.
EXPECT().
GasEstimateGasPremium(ctx, gomock.Any(), builtin.SystemActorAddr, int64(10000), types.EmptyTSK).
Return(big.NewInt(0), nil).
AnyTimes().
Do(func(a, b, c, d, e interface{}) { // looks funny, but we don't care about args here, just counting
calls++
})
//stm: @CLI_CHAIN_GAS_PRICE_001
err := app.Run([]string{"chain", "gas-price"})
assert.NoError(t, err)
lines := strings.Split(strings.Trim(buf.String(), "\n"), "\n")
assert.Equal(t, calls, len(lines))
}
type mockExportFile struct {
*bytes.Buffer
}
func (mef mockExportFile) Close() error {
return nil
}

View File

@ -358,7 +358,13 @@ The minimum value is 518400 (6 months).`,
&CidBaseFlag, &CidBaseFlag,
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
expectedArgsMsg := "expected 4 args: dataCid, miner, price, duration"
if !cctx.Args().Present() { if !cctx.Args().Present() {
if cctx.Bool("manual-stateless-deal") {
return xerrors.New("--manual-stateless-deal can not be combined with interactive deal mode: you must specify the " + expectedArgsMsg)
}
return interactiveDeal(cctx) return interactiveDeal(cctx)
} }
@ -371,7 +377,7 @@ The minimum value is 518400 (6 months).`,
afmt := NewAppFmt(cctx.App) afmt := NewAppFmt(cctx.App)
if cctx.NArg() != 4 { if cctx.NArg() != 4 {
return xerrors.New("expected 4 args: dataCid, miner, price, duration") return xerrors.New(expectedArgsMsg)
} }
// [data, miner, price, dur] // [data, miner, price, dur]
@ -667,6 +673,8 @@ uiLoop:
state = "miner" state = "miner"
case "miner": case "miner":
maddrs = maddrs[:0]
ask = ask[:0]
afmt.Print("Miner Addresses (f0.. f0..), none to find: ") afmt.Print("Miner Addresses (f0.. f0..), none to find: ")
_maddrsStr, _, err := rl.ReadLine() _maddrsStr, _, err := rl.ReadLine()
@ -802,7 +810,8 @@ uiLoop:
dealCount, err = strconv.ParseInt(string(dealcStr), 10, 64) dealCount, err = strconv.ParseInt(string(dealcStr), 10, 64)
if err != nil { if err != nil {
return err printErr(xerrors.Errorf("reading deal count: invalid number"))
continue
} }
color.Blue(".. Picking miners") color.Blue(".. Picking miners")
@ -859,12 +868,13 @@ uiLoop:
a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr) a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr)
if err != nil { if err != nil {
printErr(xerrors.Errorf("failed to query ask: %w", err)) printErr(xerrors.Errorf("failed to query ask for miner %s: %w", maddr.String(), err))
state = "miner" state = "miner"
continue uiLoop continue uiLoop
} }
ask = append(ask, *a) ask = append(ask, *a)
} }
// TODO: run more validation // TODO: run more validation

View File

@ -1,7 +1,9 @@
package cli package cli
import ( import (
"bytes"
"context" "context"
"encoding/hex"
"fmt" "fmt"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
@ -34,6 +36,7 @@ var filplusCmd = &cli.Command{
filplusListClientsCmd, filplusListClientsCmd,
filplusCheckClientCmd, filplusCheckClientCmd,
filplusCheckNotaryCmd, filplusCheckNotaryCmd,
filplusSignRemoveDataCapProposal,
}, },
} }
@ -274,3 +277,112 @@ func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address)
return st.VerifierDataCap(vid) return st.VerifierDataCap(vid)
} }
var filplusSignRemoveDataCapProposal = &cli.Command{
Name: "sign-remove-data-cap-proposal",
Usage: "allows a notary to sign a Remove Data Cap Proposal",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "id",
Usage: "specify the RemoveDataCapProposal ID (will look up on chain if unspecified)",
Required: false,
},
},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 3 {
return fmt.Errorf("must specify three arguments: notary address, client address, and allowance to remove")
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return xerrors.Errorf("failed to get full node api: %w", err)
}
defer closer()
ctx := ReqContext(cctx)
act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to get verifreg actor: %w", err)
}
apibs := blockstore.NewAPIBlockstore(api)
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
st, err := verifreg.Load(store, act)
if err != nil {
return xerrors.Errorf("failed to load verified registry state: %w", err)
}
verifier, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
verifierIdAddr, err := api.StateLookupID(ctx, verifier, types.EmptyTSK)
if err != nil {
return err
}
client, err := address.NewFromString(cctx.Args().Get(1))
if err != nil {
return err
}
clientIdAddr, err := api.StateLookupID(ctx, client, types.EmptyTSK)
if err != nil {
return err
}
allowanceToRemove, err := types.BigFromString(cctx.Args().Get(2))
if err != nil {
return err
}
_, dataCap, err := st.VerifiedClientDataCap(clientIdAddr)
if err != nil {
return xerrors.Errorf("failed to find verified client data cap: %w", err)
}
if dataCap.LessThanEqual(big.Zero()) {
return xerrors.Errorf("client data cap %s is less than amount requested to be removed %s", dataCap.String(), allowanceToRemove.String())
}
found, _, err := checkNotary(ctx, api, verifier)
if err != nil {
return xerrors.Errorf("failed to check notary status: %w", err)
}
if !found {
return xerrors.New("verifier address must be a notary")
}
id := cctx.Uint64("id")
if id == 0 {
_, id, err = st.RemoveDataCapProposalID(verifierIdAddr, clientIdAddr)
if err != nil {
return xerrors.Errorf("failed find remove data cap proposal id: %w", err)
}
}
params := verifreg.RemoveDataCapProposal{
RemovalProposalID: verifreg.RmDcProposalID{ProposalID: id},
DataCapAmount: allowanceToRemove,
VerifiedClient: clientIdAddr,
}
paramBuf := new(bytes.Buffer)
paramBuf.WriteString(verifreg.SignatureDomainSeparation_RemoveDataCap)
err = params.MarshalCBOR(paramBuf)
if err != nil {
return xerrors.Errorf("failed to marshall paramBuf: %w", err)
}
sig, err := api.WalletSign(ctx, verifier, paramBuf.Bytes())
if err != nil {
return xerrors.Errorf("failed to sign message: %w", err)
}
sigBytes := append([]byte{byte(sig.Type)}, sig.Data...)
fmt.Println(hex.EncodeToString(sigBytes))
return nil
},
}

32
cli/mocks_test.go Normal file
View File

@ -0,0 +1,32 @@
package cli
import (
"bytes"
"testing"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/mocks"
"github.com/golang/mock/gomock"
ucli "github.com/urfave/cli/v2"
)
// newMockAppWithFullAPI returns a gomock-ed CLI app used for unit tests
// see cli/util/api.go:GetFullNodeAPI for mock API injection
func NewMockAppWithFullAPI(t *testing.T, cmd *ucli.Command) (*ucli.App, *mocks.MockFullNode, *bytes.Buffer, func()) {
app := ucli.NewApp()
app.Commands = ucli.Commands{cmd}
app.Setup()
// create and inject the mock API into app Metadata
ctrl := gomock.NewController(t)
mockFullNode := mocks.NewMockFullNode(ctrl)
var fullNode api.FullNode = mockFullNode
app.Metadata["test-full-api"] = fullNode
// this will only work if the implementation uses the app.Writer,
// if it uses fmt.*, it has to be refactored
buf := &bytes.Buffer{}
app.Writer = buf
return app, mockFullNode, buf, ctrl.Finish
}

View File

@ -60,6 +60,8 @@ var MpoolPending = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -72,7 +74,7 @@ var MpoolPending = &cli.Command{
if tos := cctx.String("to"); tos != "" { if tos := cctx.String("to"); tos != "" {
a, err := address.NewFromString(tos) a, err := address.NewFromString(tos)
if err != nil { if err != nil {
return fmt.Errorf("given 'to' address %q was invalid: %w", tos, err) return xerrors.Errorf("given 'to' address %q was invalid: %w", tos, err)
} }
toa = a toa = a
} }
@ -80,7 +82,7 @@ var MpoolPending = &cli.Command{
if froms := cctx.String("from"); froms != "" { if froms := cctx.String("from"); froms != "" {
a, err := address.NewFromString(froms) a, err := address.NewFromString(froms)
if err != nil { if err != nil {
return fmt.Errorf("given 'from' address %q was invalid: %w", froms, err) return xerrors.Errorf("given 'from' address %q was invalid: %w", froms, err)
} }
froma = a froma = a
} }
@ -119,13 +121,13 @@ var MpoolPending = &cli.Command{
} }
if cctx.Bool("cids") { if cctx.Bool("cids") {
fmt.Println(msg.Cid()) afmt.Println(msg.Cid())
} else { } else {
out, err := json.MarshalIndent(msg, "", " ") out, err := json.MarshalIndent(msg, "", " ")
if err != nil { if err != nil {
return err return err
} }
fmt.Println(string(out)) afmt.Println(string(out))
} }
} }
@ -216,6 +218,8 @@ var MpoolStat = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -234,6 +238,7 @@ var MpoolStat = &cli.Command{
currTs := ts currTs := ts
for i := 0; i < cctx.Int("basefee-lookback"); i++ { for i := 0; i < cctx.Int("basefee-lookback"); i++ {
currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
if err != nil { if err != nil {
return xerrors.Errorf("walking chain: %w", err) return xerrors.Errorf("walking chain: %w", err)
} }
@ -296,7 +301,7 @@ var MpoolStat = &cli.Command{
for a, bkt := range buckets { for a, bkt := range buckets {
act, err := api.StateGetActor(ctx, a, ts.Key()) act, err := api.StateGetActor(ctx, a, ts.Key())
if err != nil { if err != nil {
fmt.Printf("%s, err: %s\n", a, err) afmt.Printf("%s, err: %s\n", a, err)
continue continue
} }
@ -350,11 +355,11 @@ var MpoolStat = &cli.Command{
total.belowPast += stat.belowPast total.belowPast += stat.belowPast
total.gasLimit = big.Add(total.gasLimit, stat.gasLimit) total.gasLimit = big.Add(total.gasLimit, stat.gasLimit)
fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit) afmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit)
} }
fmt.Println("-----") afmt.Println("-----")
fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit) afmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit)
return nil return nil
}, },
@ -385,8 +390,9 @@ var MpoolReplaceCmd = &cli.Command{
Usage: "Spend up to X FIL for this message in units of FIL. Previously when flag was `max-fee` units were in attoFIL. Applicable for auto mode", Usage: "Spend up to X FIL for this message in units of FIL. Previously when flag was `max-fee` units were in attoFIL. Applicable for auto mode",
}, },
}, },
ArgsUsage: "<from nonce> | <message-cid>", ArgsUsage: "<from> <nonce> | <message-cid>",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
@ -407,13 +413,14 @@ var MpoolReplaceCmd = &cli.Command{
msg, err := api.ChainGetMessage(ctx, mcid) msg, err := api.ChainGetMessage(ctx, mcid)
if err != nil { if err != nil {
return fmt.Errorf("could not find referenced message: %w", err) return xerrors.Errorf("could not find referenced message: %w", err)
} }
from = msg.From from = msg.From
nonce = msg.Nonce nonce = msg.Nonce
case 2: case 2:
f, err := address.NewFromString(cctx.Args().Get(0)) arg0 := cctx.Args().Get(0)
f, err := address.NewFromString(arg0)
if err != nil { if err != nil {
return err return err
} }
@ -448,7 +455,7 @@ var MpoolReplaceCmd = &cli.Command{
} }
if found == nil { if found == nil {
return fmt.Errorf("no pending message found from %s with nonce %d", from, nonce) return xerrors.Errorf("no pending message found from %s with nonce %d", from, nonce)
} }
msg := found.Message msg := found.Message
@ -460,7 +467,7 @@ var MpoolReplaceCmd = &cli.Command{
if cctx.IsSet("fee-limit") { if cctx.IsSet("fee-limit") {
maxFee, err := types.ParseFIL(cctx.String("fee-limit")) maxFee, err := types.ParseFIL(cctx.String("fee-limit"))
if err != nil { if err != nil {
return fmt.Errorf("parsing max-spend: %w", err) return xerrors.Errorf("parsing max-spend: %w", err)
} }
mss = &lapi.MessageSendSpec{ mss = &lapi.MessageSendSpec{
MaxFee: abi.TokenAmount(maxFee), MaxFee: abi.TokenAmount(maxFee),
@ -472,7 +479,7 @@ var MpoolReplaceCmd = &cli.Command{
msg.GasPremium = abi.NewTokenAmount(0) msg.GasPremium = abi.NewTokenAmount(0)
retm, err := api.GasEstimateMessageGas(ctx, &msg, mss, types.EmptyTSK) retm, err := api.GasEstimateMessageGas(ctx, &msg, mss, types.EmptyTSK)
if err != nil { if err != nil {
return fmt.Errorf("failed to estimate gas values: %w", err) return xerrors.Errorf("failed to estimate gas values: %w", err)
} }
msg.GasPremium = big.Max(retm.GasPremium, minRBF) msg.GasPremium = big.Max(retm.GasPremium, minRBF)
@ -489,26 +496,26 @@ var MpoolReplaceCmd = &cli.Command{
} }
msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium")) msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
if err != nil { if err != nil {
return fmt.Errorf("parsing gas-premium: %w", err) return xerrors.Errorf("parsing gas-premium: %w", err)
} }
// TODO: estimate fee cap here // TODO: estimate fee cap here
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap")) msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
if err != nil { if err != nil {
return fmt.Errorf("parsing gas-feecap: %w", err) return xerrors.Errorf("parsing gas-feecap: %w", err)
} }
} }
smsg, err := api.WalletSignMessage(ctx, msg.From, &msg) smsg, err := api.WalletSignMessage(ctx, msg.From, &msg)
if err != nil { if err != nil {
return fmt.Errorf("failed to sign message: %w", err) return xerrors.Errorf("failed to sign message: %w", err)
} }
cid, err := api.MpoolPush(ctx, smsg) cid, err := api.MpoolPush(ctx, smsg)
if err != nil { if err != nil {
return fmt.Errorf("failed to push new message to mempool: %w", err) return xerrors.Errorf("failed to push new message to mempool: %w", err)
} }
fmt.Println("new message cid: ", cid) afmt.Println("new message cid: ", cid)
return nil return nil
}, },
} }
@ -531,6 +538,8 @@ var MpoolFindCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -548,7 +557,7 @@ var MpoolFindCmd = &cli.Command{
if cctx.IsSet("to") { if cctx.IsSet("to") {
a, err := address.NewFromString(cctx.String("to")) a, err := address.NewFromString(cctx.String("to"))
if err != nil { if err != nil {
return fmt.Errorf("'to' address was invalid: %w", err) return xerrors.Errorf("'to' address was invalid: %w", err)
} }
toFilter = a toFilter = a
@ -557,7 +566,7 @@ var MpoolFindCmd = &cli.Command{
if cctx.IsSet("from") { if cctx.IsSet("from") {
a, err := address.NewFromString(cctx.String("from")) a, err := address.NewFromString(cctx.String("from"))
if err != nil { if err != nil {
return fmt.Errorf("'from' address was invalid: %w", err) return xerrors.Errorf("'from' address was invalid: %w", err)
} }
fromFilter = a fromFilter = a
@ -591,7 +600,7 @@ var MpoolFindCmd = &cli.Command{
return err return err
} }
fmt.Println(string(b)) afmt.Println(string(b))
return nil return nil
}, },
} }
@ -605,6 +614,8 @@ var MpoolConfig = &cli.Command{
return cli.ShowCommandHelp(cctx, cctx.Command.Name) return cli.ShowCommandHelp(cctx, cctx.Command.Name)
} }
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -624,7 +635,7 @@ var MpoolConfig = &cli.Command{
return err return err
} }
fmt.Println(string(bytes)) afmt.Println(string(bytes))
} else { } else {
cfg := new(types.MpoolConfig) cfg := new(types.MpoolConfig)
bytes := []byte(cctx.Args().Get(0)) bytes := []byte(cctx.Args().Get(0))
@ -651,6 +662,8 @@ var MpoolGasPerfCmd = &cli.Command{
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -717,7 +730,7 @@ var MpoolGasPerfCmd = &cli.Command{
gasReward := getGasReward(m) gasReward := getGasReward(m)
gasPerf := getGasPerf(gasReward, m.Message.GasLimit) gasPerf := getGasPerf(gasReward, m.Message.GasLimit)
fmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf) afmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf)
} }
return nil return nil

582
cli/mpool_test.go Normal file
View File

@ -0,0 +1,582 @@
//stm: #cli
package cli
import (
"context"
"fmt"
"testing"
"encoding/json"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
func TestStat(t *testing.T) {
t.Run("local", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// add blocks to the chain
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
head := mock.TipSet(mock.MkBlock(first, 15, 7))
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
// mock actor to return for the sender
actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)}
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil),
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr, toAddr}, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil),
)
//stm: @CLI_MEMPOOL_STAT_002
err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1", "--local"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), "Nonce past: 1")
})
t.Run("all", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// add blocks to the chain
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
head := mock.TipSet(mock.MkBlock(first, 15, 7))
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
// mock actor to return for the sender
actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)}
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil),
)
//stm: @CLI_MEMPOOL_STAT_001
err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), "Nonce past: 1")
})
}
func TestPending(t *testing.T) {
t.Run("all", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
)
//stm: @CLI_MEMPOOL_PENDING_001
err = app.Run([]string{"mpool", "pending", "--cids"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
t.Run("local", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
)
//stm: @CLI_MEMPOOL_PENDING_002
err = app.Run([]string{"mpool", "pending", "--local"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
t.Run("to", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
)
//stm: @CLI_MEMPOOL_PENDING_003
err = app.Run([]string{"mpool", "pending", "--to", sm.Message.To.String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
t.Run("from", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
)
//stm: @CLI_MEMPOOL_PENDING_004
err = app.Run([]string{"mpool", "pending", "--from", sm.Message.From.String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
}
func TestReplace(t *testing.T) {
t.Run("manual", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil),
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, &sm.Message).Return(sm, nil),
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
)
//stm: @CLI_MEMPOOL_REPLACE_002
err = app.Run([]string{"mpool", "replace", "--gas-premium", "1", "--gas-feecap", "100", sm.Cid().String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
t.Run("auto", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
// gas fee param should be equal to the one passed in the cli invocation (used below)
maxFee := "1000000"
parsedFee, err := types.ParseFIL(maxFee)
if err != nil {
t.Fatal(err)
}
mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)}
gomock.InOrder(
mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil),
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
// use gomock.any to match the message in expected api calls
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil),
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
)
//stm: @CLI_MEMPOOL_REPLACE_002
err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Cid().String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
t.Run("sender / nonce", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
// gas fee param should be equal to the one passed in the cli invocation (used below)
maxFee := "1000000"
parsedFee, err := types.ParseFIL(maxFee)
if err != nil {
t.Fatal(err)
}
mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)}
gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
// use gomock.any to match the message in expected api calls
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil),
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
)
//stm: @CLI_MEMPOOL_REPLACE_001
err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Message.From.String(), fmt.Sprint(sm.Message.Nonce)})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
}
func TestFindMsg(t *testing.T) {
t.Run("from", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
)
//stm: @CLI_MEMPOOL_FIND_001
err = app.Run([]string{"mpool", "find", "--from", sm.Message.From.String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
t.Run("to", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
)
//stm: @CLI_MEMPOOL_FIND_002
err = app.Run([]string{"mpool", "find", "--to", sm.Message.To.String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
t.Run("method", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
)
//stm: @CLI_MEMPOOL_FIND_003
err = app.Run([]string{"mpool", "find", "--method", sm.Message.Method.String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Cid().String())
})
}
func TestGasPerf(t *testing.T) {
t.Run("all", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// add blocks to the chain
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
head := mock.TipSet(mock.MkBlock(first, 15, 7))
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 13, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
)
//stm: @CLI_MEMPOOL_GAS_PERF_002
err = app.Run([]string{"mpool", "gas-perf", "--all", "true"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Message.From.String())
assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce))
})
t.Run("local", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// add blocks to the chain
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
head := mock.TipSet(mock.MkBlock(first, 15, 7))
// create a signed message to be returned as a pending message
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
sm := mock.MkMessage(senderAddr, toAddr, 13, w)
gomock.InOrder(
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil),
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
)
//stm: @CLI_MEMPOOL_GAS_PERF_001
err = app.Run([]string{"mpool", "gas-perf"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), sm.Message.From.String())
assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce))
})
}
func TestConfig(t *testing.T) {
t.Run("get", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 1234567, SizeLimitLow: 6, ReplaceByFeeRatio: 0.25}
gomock.InOrder(
mockApi.EXPECT().MpoolGetConfig(ctx).Return(mpoolCfg, nil),
)
//stm: @CLI_MEMPOOL_CONFIG_001
err = app.Run([]string{"mpool", "config"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), mpoolCfg.PriorityAddrs[0].String())
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitHigh))
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitLow))
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.ReplaceByFeeRatio))
})
t.Run("set", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 234567, SizeLimitLow: 3, ReplaceByFeeRatio: 0.33}
gomock.InOrder(
mockApi.EXPECT().MpoolSetConfig(ctx, mpoolCfg).Return(nil),
)
bytes, err := json.Marshal(mpoolCfg)
if err != nil {
t.Fatal(err)
}
//stm: @CLI_MEMPOOL_CONFIG_002
err = app.Run([]string{"mpool", "config", string(bytes)})
assert.NoError(t, err)
})
}

View File

@ -36,6 +36,11 @@ var NetCmd = &cli.Command{
NetReachability, NetReachability,
NetBandwidthCmd, NetBandwidthCmd,
NetBlockCmd, NetBlockCmd,
NetStatCmd,
NetLimitCmd,
NetProtectAdd,
NetProtectRemove,
NetProtectList,
}, },
} }
@ -606,3 +611,208 @@ var NetBlockListCmd = &cli.Command{
return nil return nil
}, },
} }
var NetStatCmd = &cli.Command{
Name: "stat",
Usage: "Report resource usage for a scope",
ArgsUsage: "scope",
Description: `Report resource usage for a scope.
The scope can be one of the following:
- system -- reports the system aggregate resource usage.
- transient -- reports the transient resource usage.
- svc:<service> -- reports the resource usage of a specific service.
- proto:<proto> -- reports the resource usage of a specific protocol.
- peer:<peer> -- reports the resource usage of a specific peer.
- all -- reports the resource usage for all currently active scopes.
`,
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
args := cctx.Args().Slice()
if len(args) != 1 {
return xerrors.Errorf("must specify exactly one scope")
}
scope := args[0]
result, err := api.NetStat(ctx, scope)
if err != nil {
return err
}
enc := json.NewEncoder(os.Stdout)
return enc.Encode(result)
},
}
var NetLimitCmd = &cli.Command{
Name: "limit",
Usage: "Get or set resource limits for a scope",
ArgsUsage: "scope [limit]",
Description: `Get or set resource limits for a scope.
The scope can be one of the following:
- system -- reports the system aggregate resource usage.
- transient -- reports the transient resource usage.
- svc:<service> -- reports the resource usage of a specific service.
- proto:<proto> -- reports the resource usage of a specific protocol.
- peer:<peer> -- reports the resource usage of a specific peer.
The limit is json-formatted, with the same structure as the limits file.
`,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "set",
Usage: "set the limit for a scope",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
args := cctx.Args().Slice()
if cctx.Bool("set") {
if len(args) != 2 {
return xerrors.Errorf("must specify exactly a scope and a limit")
}
scope := args[0]
limitStr := args[1]
var limit atypes.NetLimit
err := json.Unmarshal([]byte(limitStr), &limit)
if err != nil {
return xerrors.Errorf("error decoding limit: %w", err)
}
return api.NetSetLimit(ctx, scope, limit)
}
if len(args) != 1 {
return xerrors.Errorf("must specify exactly one scope")
}
scope := args[0]
result, err := api.NetLimit(ctx, scope)
if err != nil {
return err
}
enc := json.NewEncoder(os.Stdout)
return enc.Encode(result)
},
}
var NetProtectAdd = &cli.Command{
Name: "protect",
Usage: "Add one or more peer IDs to the list of protected peer connections",
ArgsUsage: "<peer-id> [<peer-id>...]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
pids, err := decodePeerIDsFromArgs(cctx)
if err != nil {
return err
}
err = api.NetProtectAdd(ctx, pids)
if err != nil {
return err
}
fmt.Println("added to protected peers:")
for _, pid := range pids {
fmt.Printf(" %s\n", pid)
}
return nil
},
}
var NetProtectRemove = &cli.Command{
Name: "unprotect",
Usage: "Remove one or more peer IDs from the list of protected peer connections.",
ArgsUsage: "<peer-id> [<peer-id>...]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
pids, err := decodePeerIDsFromArgs(cctx)
if err != nil {
return err
}
err = api.NetProtectRemove(ctx, pids)
if err != nil {
return err
}
fmt.Printf("removed from protected peers:")
for _, pid := range pids {
fmt.Printf(" %s\n", pid)
}
return nil
},
}
// decodePeerIDsFromArgs decodes all the arguments present in cli.Context.Args as peer.ID.
//
// This function requires at least one argument to be present, and arguments must not be empty
// string. Otherwise, an error is returned.
func decodePeerIDsFromArgs(cctx *cli.Context) ([]peer.ID, error) {
pidArgs := cctx.Args().Slice()
if len(pidArgs) == 0 {
return nil, xerrors.Errorf("must specify at least one peer ID as an argument")
}
var pids []peer.ID
for _, pidStr := range pidArgs {
if pidStr == "" {
return nil, xerrors.Errorf("peer ID must not be empty")
}
pid, err := peer.Decode(pidStr)
if err != nil {
return nil, err
}
pids = append(pids, pid)
}
return pids, nil
}
var NetProtectList = &cli.Command{
Name: "list-protected",
Usage: "List the peer IDs with protected connection.",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
pids, err := api.NetProtectList(ctx)
if err != nil {
return err
}
for _, pid := range pids {
fmt.Printf("%s\n", pid)
}
return nil
},
}

View File

@ -8,7 +8,7 @@ import (
"sort" "sort"
"strings" "strings"
"github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/paychmgr" "github.com/filecoin-project/lotus/paychmgr"
@ -39,12 +39,15 @@ var paychAddFundsCmd = &cli.Command{
Usage: "Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist.", Usage: "Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist.",
ArgsUsage: "[fromAddress toAddress amount]", ArgsUsage: "[fromAddress toAddress amount]",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "restart-retrievals", Name: "restart-retrievals",
Usage: "restart stalled retrieval deals on this payment channel", Usage: "restart stalled retrieval deals on this payment channel",
Value: true, Value: true,
}, },
&cli.BoolFlag{
Name: "reserve",
Usage: "mark funds as reserved",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 3 { if cctx.Args().Len() != 3 {
@ -66,7 +69,7 @@ var paychAddFundsCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("parsing amount failed: %s", err)) return ShowHelp(cctx, fmt.Errorf("parsing amount failed: %s", err))
} }
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPIV1(cctx)
if err != nil { if err != nil {
return err return err
} }
@ -76,7 +79,14 @@ var paychAddFundsCmd = &cli.Command{
// Send a message to chain to create channel / add funds to existing // Send a message to chain to create channel / add funds to existing
// channel // channel
info, err := api.PaychGet(ctx, from, to, types.BigInt(amt)) var info *lapi.ChannelInfo
if cctx.Bool("reserve") {
info, err = api.PaychGet(ctx, from, to, types.BigInt(amt), lapi.PaychGetOpts{
OffChain: false,
})
} else {
info, err = api.PaychFund(ctx, from, to, types.BigInt(amt))
}
if err != nil { if err != nil {
return err return err
} }
@ -163,13 +173,13 @@ var paychStatusCmd = &cli.Command{
}, },
} }
func paychStatus(writer io.Writer, avail *api.ChannelAvailableFunds) { func paychStatus(writer io.Writer, avail *lapi.ChannelAvailableFunds) {
if avail.Channel == nil { if avail.Channel == nil {
if avail.PendingWaitSentinel != nil { if avail.PendingWaitSentinel != nil {
fmt.Fprint(writer, "Creating channel\n") fmt.Fprint(writer, "Creating channel\n")
fmt.Fprintf(writer, " From: %s\n", avail.From) fmt.Fprintf(writer, " From: %s\n", avail.From)
fmt.Fprintf(writer, " To: %s\n", avail.To) fmt.Fprintf(writer, " To: %s\n", avail.To)
fmt.Fprintf(writer, " Pending Amt: %d\n", avail.PendingAmt) fmt.Fprintf(writer, " Pending Amt: %s\n", types.FIL(avail.PendingAmt))
fmt.Fprintf(writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel) fmt.Fprintf(writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel)
return return
} }
@ -189,10 +199,12 @@ func paychStatus(writer io.Writer, avail *api.ChannelAvailableFunds) {
{"Channel", avail.Channel.String()}, {"Channel", avail.Channel.String()},
{"From", avail.From.String()}, {"From", avail.From.String()},
{"To", avail.To.String()}, {"To", avail.To.String()},
{"Confirmed Amt", fmt.Sprintf("%d", avail.ConfirmedAmt)}, {"Confirmed Amt", fmt.Sprintf("%s", types.FIL(avail.ConfirmedAmt))},
{"Pending Amt", fmt.Sprintf("%d", avail.PendingAmt)}, {"Available Amt", fmt.Sprintf("%s", types.FIL(avail.NonReservedAmt))},
{"Queued Amt", fmt.Sprintf("%d", avail.QueuedAmt)}, {"Voucher Redeemed Amt", fmt.Sprintf("%s", types.FIL(avail.VoucherReedeemedAmt))},
{"Voucher Redeemed Amt", fmt.Sprintf("%d", avail.VoucherReedeemedAmt)}, {"Pending Amt", fmt.Sprintf("%s", types.FIL(avail.PendingAmt))},
{"Pending Available Amt", fmt.Sprintf("%s", types.FIL(avail.PendingAvailableAmt))},
{"Queued Amt", fmt.Sprintf("%s", types.FIL(avail.QueuedAmt))},
} }
if avail.PendingWaitSentinel != nil { if avail.PendingWaitSentinel != nil {
nameValues = append(nameValues, []string{ nameValues = append(nameValues, []string{
@ -576,7 +588,7 @@ func outputVoucher(w io.Writer, v *paych.SignedVoucher, export bool) error {
} }
} }
fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, v.Amount.String()) fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, types.FIL(v.Amount))
if export { if export {
fmt.Fprintf(w, "; %s", enc) fmt.Fprintf(w, "; %s", enc)
} }

View File

@ -1768,6 +1768,9 @@ var StateSectorCmd = &cli.Command{
fmt.Println("SectorNumber: ", si.SectorNumber) fmt.Println("SectorNumber: ", si.SectorNumber)
fmt.Println("SealProof: ", si.SealProof) fmt.Println("SealProof: ", si.SealProof)
fmt.Println("SealedCID: ", si.SealedCID) fmt.Println("SealedCID: ", si.SealedCID)
if si.SectorKeyCID != nil {
fmt.Println("SectorKeyCID: ", si.SectorKeyCID)
}
fmt.Println("DealIDs: ", si.DealIDs) fmt.Println("DealIDs: ", si.DealIDs)
fmt.Println() fmt.Println()
fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation)) fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation))

View File

@ -33,6 +33,8 @@ var SyncStatusCmd = &cli.Command{
Name: "status", Name: "status",
Usage: "check sync status", Usage: "check sync status",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
apic, closer, err := GetFullNodeAPI(cctx) apic, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -45,9 +47,9 @@ var SyncStatusCmd = &cli.Command{
return err return err
} }
fmt.Println("sync status:") afmt.Println("sync status:")
for _, ss := range state.ActiveSyncs { for _, ss := range state.ActiveSyncs {
fmt.Printf("worker %d:\n", ss.WorkerID) afmt.Printf("worker %d:\n", ss.WorkerID)
var base, target []cid.Cid var base, target []cid.Cid
var heightDiff int64 var heightDiff int64
var theight abi.ChainEpoch var theight abi.ChainEpoch
@ -62,20 +64,20 @@ var SyncStatusCmd = &cli.Command{
} else { } else {
heightDiff = 0 heightDiff = 0
} }
fmt.Printf("\tBase:\t%s\n", base) afmt.Printf("\tBase:\t%s\n", base)
fmt.Printf("\tTarget:\t%s (%d)\n", target, theight) afmt.Printf("\tTarget:\t%s (%d)\n", target, theight)
fmt.Printf("\tHeight diff:\t%d\n", heightDiff) afmt.Printf("\tHeight diff:\t%d\n", heightDiff)
fmt.Printf("\tStage: %s\n", ss.Stage) afmt.Printf("\tStage: %s\n", ss.Stage)
fmt.Printf("\tHeight: %d\n", ss.Height) afmt.Printf("\tHeight: %d\n", ss.Height)
if ss.End.IsZero() { if ss.End.IsZero() {
if !ss.Start.IsZero() { if !ss.Start.IsZero() {
fmt.Printf("\tElapsed: %s\n", time.Since(ss.Start)) afmt.Printf("\tElapsed: %s\n", time.Since(ss.Start))
} }
} else { } else {
fmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start)) afmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start))
} }
if ss.Stage == api.StageSyncErrored { if ss.Stage == api.StageSyncErrored {
fmt.Printf("\tError: %s\n", ss.Message) afmt.Printf("\tError: %s\n", ss.Message)
} }
} }
return nil return nil
@ -168,6 +170,8 @@ var SyncCheckBadCmd = &cli.Command{
Usage: "check if the given block was marked bad, and for what reason", Usage: "check if the given block was marked bad, and for what reason",
ArgsUsage: "[blockCid]", ArgsUsage: "[blockCid]",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
napi, closer, err := GetFullNodeAPI(cctx) napi, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -190,11 +194,11 @@ var SyncCheckBadCmd = &cli.Command{
} }
if reason == "" { if reason == "" {
fmt.Println("block was not marked as bad") afmt.Println("block was not marked as bad")
return nil return nil
} }
fmt.Println(reason) afmt.Println(reason)
return nil return nil
}, },
} }

189
cli/sync_test.go Normal file
View File

@ -0,0 +1,189 @@
package cli
import (
"context"
"fmt"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
func TestSyncStatus(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncStatusCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0))
ts2 := mock.TipSet(mock.MkBlock(ts1, 0, 0))
start := time.Now()
end := start.Add(time.Minute)
state := &api.SyncState{
ActiveSyncs: []api.ActiveSync{{
WorkerID: 1,
Base: ts1,
Target: ts2,
Stage: api.StageMessages,
Height: abi.ChainEpoch(0),
Start: start,
End: end,
Message: "whatever",
}},
VMApplied: 0,
}
mockApi.EXPECT().SyncState(ctx).Return(state, nil)
//stm: @CLI_SYNC_STATUS_001
err := app.Run([]string{"sync", "status"})
assert.NoError(t, err)
out := buf.String()
// output is plaintext, had to do string matching
assert.Contains(t, out, fmt.Sprintf("Base:\t[%s]", ts1.Blocks()[0].Cid().String()))
assert.Contains(t, out, fmt.Sprintf("Target:\t[%s]", ts2.Blocks()[0].Cid().String()))
assert.Contains(t, out, "Height diff:\t1")
assert.Contains(t, out, "Stage: message sync")
assert.Contains(t, out, "Height: 0")
assert.Contains(t, out, "Elapsed: 1m0s")
}
func TestSyncMarkBad(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncMarkBadCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blk := mock.MkBlock(nil, 0, 0)
mockApi.EXPECT().SyncMarkBad(ctx, blk.Cid()).Return(nil)
//stm: @CLI_SYNC_MARK_BAD_001
err := app.Run([]string{"sync", "mark-bad", blk.Cid().String()})
assert.NoError(t, err)
}
func TestSyncUnmarkBad(t *testing.T) {
t.Run("one-block", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blk := mock.MkBlock(nil, 0, 0)
mockApi.EXPECT().SyncUnmarkBad(ctx, blk.Cid()).Return(nil)
//stm: @CLI_SYNC_UNMARK_BAD_001
err := app.Run([]string{"sync", "unmark-bad", blk.Cid().String()})
assert.NoError(t, err)
})
t.Run("all", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mockApi.EXPECT().SyncUnmarkAllBad(ctx).Return(nil)
//stm: @CLI_SYNC_UNMARK_BAD_002
err := app.Run([]string{"sync", "unmark-bad", "-all"})
assert.NoError(t, err)
})
}
func TestSyncCheckBad(t *testing.T) {
t.Run("not-bad", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blk := mock.MkBlock(nil, 0, 0)
mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return("", nil)
//stm: @CLI_SYNC_CHECK_BAD_002
err := app.Run([]string{"sync", "check-bad", blk.Cid().String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), "block was not marked as bad")
})
t.Run("bad", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blk := mock.MkBlock(nil, 0, 0)
reason := "whatever"
mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return(reason, nil)
//stm: @CLI_SYNC_CHECK_BAD_001
err := app.Run([]string{"sync", "check-bad", blk.Cid().String()})
assert.NoError(t, err)
assert.Contains(t, buf.String(), reason)
})
}
func TestSyncCheckpoint(t *testing.T) {
t.Run("tipset", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blk := mock.MkBlock(nil, 0, 0)
ts := mock.TipSet(blk)
gomock.InOrder(
mockApi.EXPECT().ChainGetBlock(ctx, blk.Cid()).Return(blk, nil),
mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil),
)
//stm: @CLI_SYNC_CHECKPOINT_001
err := app.Run([]string{"sync", "checkpoint", blk.Cid().String()})
assert.NoError(t, err)
})
t.Run("epoch", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
epoch := abi.ChainEpoch(0)
blk := mock.MkBlock(nil, 0, 0)
ts := mock.TipSet(blk)
gomock.InOrder(
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK).Return(ts, nil),
mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil),
)
//stm: @CLI_SYNC_CHECKPOINT_002
err := app.Run([]string{"sync", "checkpoint", fmt.Sprintf("-epoch=%d", epoch)})
assert.NoError(t, err)
})
}

View File

@ -223,6 +223,11 @@ func GetCommonAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error)
} }
func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, error) { func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, error) {
// use the mocked API in CLI unit tests, see cli/mocks_test.go for mock definition
if mock, ok := ctx.App.Metadata["test-full-api"]; ok {
return &v0api.WrapperV1Full{FullNode: mock.(v1api.FullNode)}, func() {}, nil
}
if tn, ok := ctx.App.Metadata["testnode-full"]; ok { if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
return &v0api.WrapperV1Full{FullNode: tn.(v1api.FullNode)}, func() {}, nil return &v0api.WrapperV1Full{FullNode: tn.(v1api.FullNode)}, func() {}, nil
} }

View File

@ -56,6 +56,8 @@ var walletNew = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
t := cctx.Args().First() t := cctx.Args().First()
if t == "" { if t == "" {
t = "secp256k1" t = "secp256k1"
@ -66,7 +68,7 @@ var walletNew = &cli.Command{
return err return err
} }
fmt.Println(nk.String()) afmt.Println(nk.String())
return nil return nil
}, },
@ -100,6 +102,8 @@ var walletList = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
addrs, err := api.WalletList(ctx) addrs, err := api.WalletList(ctx)
if err != nil { if err != nil {
return err return err
@ -120,7 +124,7 @@ var walletList = &cli.Command{
for _, addr := range addrs { for _, addr := range addrs {
if cctx.Bool("addr-only") { if cctx.Bool("addr-only") {
fmt.Println(addr.String()) afmt.Println(addr.String())
} else { } else {
a, err := api.StateGetActor(ctx, addr, types.EmptyTSK) a, err := api.StateGetActor(ctx, addr, types.EmptyTSK)
if err != nil { if err != nil {
@ -187,6 +191,8 @@ var walletBalance = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
var addr address.Address var addr address.Address
if cctx.Args().First() != "" { if cctx.Args().First() != "" {
addr, err = address.NewFromString(cctx.Args().First()) addr, err = address.NewFromString(cctx.Args().First())
@ -203,9 +209,9 @@ var walletBalance = &cli.Command{
} }
if balance.Equals(types.NewInt(0)) { if balance.Equals(types.NewInt(0)) {
fmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance)) afmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
} else { } else {
fmt.Printf("%s\n", types.FIL(balance)) afmt.Printf("%s\n", types.FIL(balance))
} }
return nil return nil
@ -223,12 +229,14 @@ var walletGetDefault = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
addr, err := api.WalletDefaultAddress(ctx) addr, err := api.WalletDefaultAddress(ctx)
if err != nil { if err != nil {
return err return err
} }
fmt.Printf("%s\n", addr.String()) afmt.Printf("%s\n", addr.String())
return nil return nil
}, },
} }
@ -270,6 +278,8 @@ var walletExport = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
if !cctx.Args().Present() { if !cctx.Args().Present() {
return fmt.Errorf("must specify key to export") return fmt.Errorf("must specify key to export")
} }
@ -289,7 +299,7 @@ var walletExport = &cli.Command{
return err return err
} }
fmt.Println(hex.EncodeToString(b)) afmt.Println(hex.EncodeToString(b))
return nil return nil
}, },
} }
@ -403,6 +413,8 @@ var walletSign = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
if !cctx.Args().Present() || cctx.NArg() != 2 { if !cctx.Args().Present() || cctx.NArg() != 2 {
return fmt.Errorf("must specify signing address and message to sign") return fmt.Errorf("must specify signing address and message to sign")
} }
@ -427,7 +439,7 @@ var walletSign = &cli.Command{
sigBytes := append([]byte{byte(sig.Type)}, sig.Data...) sigBytes := append([]byte{byte(sig.Type)}, sig.Data...)
fmt.Println(hex.EncodeToString(sigBytes)) afmt.Println(hex.EncodeToString(sigBytes))
return nil return nil
}, },
} }
@ -444,6 +456,8 @@ var walletVerify = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
if !cctx.Args().Present() || cctx.NArg() != 3 { if !cctx.Args().Present() || cctx.NArg() != 3 {
return fmt.Errorf("must specify signing address, message, and signature to verify") return fmt.Errorf("must specify signing address, message, and signature to verify")
} }
@ -476,10 +490,10 @@ var walletVerify = &cli.Command{
return err return err
} }
if ok { if ok {
fmt.Println("valid") afmt.Println("valid")
return nil return nil
} }
fmt.Println("invalid") afmt.Println("invalid")
return NewCliError("CLI Verify called with invalid signature") return NewCliError("CLI Verify called with invalid signature")
}, },
} }
@ -547,6 +561,8 @@ var walletMarketWithdraw = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
var wallet address.Address var wallet address.Address
if cctx.String("wallet") != "" { if cctx.String("wallet") != "" {
wallet, err = address.NewFromString(cctx.String("wallet")) wallet, err = address.NewFromString(cctx.String("wallet"))
@ -622,7 +638,7 @@ var walletMarketWithdraw = &cli.Command{
return xerrors.Errorf("fund manager withdraw error: %w", err) return xerrors.Errorf("fund manager withdraw error: %w", err)
} }
fmt.Printf("WithdrawBalance message cid: %s\n", smsg) afmt.Printf("WithdrawBalance message cid: %s\n", smsg)
// wait for it to get mined into a block // wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, smsg, uint64(cctx.Int("confidence"))) wait, err := api.StateWaitMsg(ctx, smsg, uint64(cctx.Int("confidence")))
@ -632,7 +648,7 @@ var walletMarketWithdraw = &cli.Command{
// check it executed successfully // check it executed successfully
if wait.Receipt.ExitCode != 0 { if wait.Receipt.ExitCode != 0 {
fmt.Println(cctx.App.Writer, "withdrawal failed!") afmt.Println(cctx.App.Writer, "withdrawal failed!")
return err return err
} }
@ -647,7 +663,7 @@ var walletMarketWithdraw = &cli.Command{
return err return err
} }
fmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn)) afmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn))
if withdrawn.LessThan(amt) { if withdrawn.LessThan(amt) {
fmt.Printf("Note that this is less than the requested amount of %s \n", types.FIL(amt)) fmt.Printf("Note that this is less than the requested amount of %s \n", types.FIL(amt))
} }
@ -681,6 +697,8 @@ var walletMarketAdd = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
// Get amount param // Get amount param
if !cctx.Args().Present() { if !cctx.Args().Present() {
return fmt.Errorf("must pass amount to add") return fmt.Errorf("must pass amount to add")
@ -722,7 +740,7 @@ var walletMarketAdd = &cli.Command{
return xerrors.Errorf("add balance error: %w", err) return xerrors.Errorf("add balance error: %w", err)
} }
fmt.Printf("AddBalance message cid: %s\n", smsg) afmt.Printf("AddBalance message cid: %s\n", smsg)
return nil return nil
}, },

333
cli/wallet_test.go Normal file
View File

@ -0,0 +1,333 @@
//stm: #cli
package cli
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"testing"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
types "github.com/filecoin-project/lotus/chain/types"
"github.com/golang/mock/gomock"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/assert"
)
func TestWalletNew(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletNew))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
keyType := types.KeyType("secp256k1")
address, err := address.NewFromString("t0123")
assert.NoError(t, err)
mockApi.EXPECT().WalletNew(ctx, keyType).Return(address, nil)
//stm: @CLI_WALLET_NEW_001
err = app.Run([]string{"wallet", "new"})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), address.String())
}
func TestWalletList(t *testing.T) {
addr, err := address.NewIDAddress(1234)
addresses := []address.Address{addr}
assert.NoError(t, err)
cid := cid.Cid{}
key := types.NewTipSetKey(cid)
actor := types.Actor{
Code: cid,
Head: cid,
Nonce: 0,
Balance: big.NewInt(100),
}
t.Run("wallet-list-addr-only", func(t *testing.T) {
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletList))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().WalletList(ctx).Return(addresses, nil),
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil),
)
//stm: @CLI_WALLET_LIST_001
err := app.Run([]string{"wallet", "list", "--addr-only"})
assert.NoError(t, err)
assert.Contains(t, buf.String(), addr.String())
})
t.Run("wallet-list-id", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletList))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gomock.InOrder(
mockApi.EXPECT().WalletList(ctx).Return(addresses, nil),
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil),
mockApi.EXPECT().StateGetActor(ctx, addr, key).Return(&actor, nil),
mockApi.EXPECT().StateLookupID(ctx, addr, key).Return(addr, nil),
)
//stm: @CLI_WALLET_LIST_002
err := app.Run([]string{"wallet", "list", "--id"})
assert.NoError(t, err)
})
t.Run("wallet-list-market", func(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletList))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
balance := api.MarketBalance{
Escrow: big.NewInt(1234),
Locked: big.NewInt(123),
}
gomock.InOrder(
mockApi.EXPECT().WalletList(ctx).Return(addresses, nil),
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil),
mockApi.EXPECT().StateGetActor(ctx, addr, key).Return(&actor, nil),
mockApi.EXPECT().StateMarketBalance(ctx, addr, key).Return(balance, nil),
)
//stm: @CLI_WALLET_LIST_003
err := app.Run([]string{"wallet", "list", "--market"})
assert.NoError(t, err)
})
}
func TestWalletBalance(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletBalance))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewIDAddress(1234)
assert.NoError(t, err)
balance := big.NewInt(1234)
mockApi.EXPECT().WalletBalance(ctx, addr).Return(balance, nil)
//stm: @CLI_WALLET_BALANCE_001
err = app.Run([]string{"wallet", "balance", "f01234"})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), balance.String())
}
func TestWalletGetDefault(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletGetDefault))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewFromString("t0123")
assert.NoError(t, err)
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil)
//stm: @CLI_WALLET_GET_DEFAULT_001
err = app.Run([]string{"wallet", "default"})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), addr.String())
}
func TestWalletSetDefault(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletSetDefault))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewIDAddress(1234)
assert.NoError(t, err)
mockApi.EXPECT().WalletSetDefault(ctx, addr).Return(nil)
//stm: @CLI_WALLET_SET_DEFAULT_001
err = app.Run([]string{"wallet", "set-default", "f01234"})
assert.NoError(t, err)
}
func TestWalletExport(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletExport))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewIDAddress(1234)
assert.NoError(t, err)
keyInfo := types.KeyInfo{
Type: types.KTSecp256k1,
PrivateKey: []byte("0x000000000000000000001"),
}
mockApi.EXPECT().WalletExport(ctx, addr).Return(&keyInfo, nil)
ki, err := json.Marshal(keyInfo)
assert.NoError(t, err)
//stm: @CLI_WALLET_EXPORT_001
err = app.Run([]string{"wallet", "export", "f01234"})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), hex.EncodeToString(ki))
}
func TestWalletSign(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletSign))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewFromString("f01234")
assert.NoError(t, err)
msg, err := hex.DecodeString("01")
assert.NoError(t, err)
signature := crypto.Signature{
Type: crypto.SigTypeSecp256k1,
Data: []byte{0x01},
}
mockApi.EXPECT().WalletSign(ctx, addr, msg).Return(&signature, nil)
sigBytes := append([]byte{byte(signature.Type)}, signature.Data...)
//stm: @CLI_WALLET_SIGN_001
err = app.Run([]string{"wallet", "sign", "f01234", "01"})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), hex.EncodeToString(sigBytes))
}
func TestWalletVerify(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletVerify))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewIDAddress(1234)
assert.NoError(t, err)
msg := []byte{1}
signature := crypto.Signature{
Type: crypto.SigTypeSecp256k1,
Data: []byte{},
}
mockApi.EXPECT().WalletVerify(ctx, addr, msg, &signature).Return(true, nil)
//stm: @CLI_WALLET_VERIFY_001
err = app.Run([]string{"wallet", "verify", "f01234", "01", "01"})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), "valid")
}
func TestWalletDelete(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletDelete))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewIDAddress(1234)
assert.NoError(t, err)
mockApi.EXPECT().WalletDelete(ctx, addr).Return(nil)
//stm: @CLI_WALLET_DELETE_001
err = app.Run([]string{"wallet", "delete", "f01234"})
assert.NoError(t, err)
}
func TestWalletMarketWithdraw(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletMarket))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewIDAddress(1234)
assert.NoError(t, err)
balance := api.MarketBalance{
Escrow: big.NewInt(100),
Locked: big.NewInt(10),
}
h, err := hex.DecodeString("12209cbc07c3f991725836a3aa2a581ca2029198aa420b9d99bc0e131d9f3e2cbe47")
assert.NoError(t, err)
cid := cid.NewCidV0(multihash.Multihash(h))
msgLookup := api.MsgLookup{}
var networkVers apitypes.NetworkVersion
gomock.InOrder(
mockApi.EXPECT().StateMarketBalance(ctx, addr, types.TipSetKey{}).Return(balance, nil),
// mock reserve to 10
mockApi.EXPECT().MarketGetReserved(ctx, addr).Return(big.NewInt(10), nil),
// available should be 80.. escrow - locked - reserve
mockApi.EXPECT().MarketWithdraw(ctx, addr, addr, big.NewInt(80)).Return(cid, nil),
mockApi.EXPECT().StateWaitMsg(ctx, cid, uint64(5), abi.ChainEpoch(int64(-1)), true).Return(&msgLookup, nil),
mockApi.EXPECT().StateNetworkVersion(ctx, types.TipSetKey{}).Return(networkVers, nil),
)
//stm: @CLI_WALLET_MARKET_WITHDRAW_001
err = app.Run([]string{"wallet", "market", "withdraw", "--wallet", addr.String()})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), fmt.Sprintf("WithdrawBalance message cid: %s", cid))
}
func TestWalletMarketAdd(t *testing.T) {
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletMarket))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
toAddr := address.Address{}
defaultAddr := address.Address{}
h, err := hex.DecodeString("12209cbc07c3f991725836a3aa2a581ca2029198aa420b9d99bc0e131d9f3e2cbe47")
assert.NoError(t, err)
cid := cid.NewCidV0(multihash.Multihash(h))
gomock.InOrder(
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(defaultAddr, nil),
mockApi.EXPECT().MarketAddBalance(ctx, defaultAddr, toAddr, big.NewInt(80)).Return(cid, nil),
)
//stm: @CLI_WALLET_MARKET_ADD_001
err = app.Run([]string{"wallet", "market", "add", "0.000000000000000080", "--address", toAddr.String()})
assert.NoError(t, err)
assert.Contains(t, buffer.String(), fmt.Sprintf("AddBalance message cid: %s", cid))
}

View File

@ -3,8 +3,10 @@ package main
import ( import (
"fmt" "fmt"
"os" "os"
"strings"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
@ -21,6 +23,7 @@ var dagstoreCmd = &cli.Command{
dagstoreRecoverShardCmd, dagstoreRecoverShardCmd,
dagstoreInitializeAllCmd, dagstoreInitializeAllCmd,
dagstoreGcCmd, dagstoreGcCmd,
dagstoreLookupPiecesCmd,
}, },
} }
@ -52,38 +55,7 @@ var dagstoreListShardsCmd = &cli.Command{
return err return err
} }
if len(shards) == 0 { return printTableShards(shards)
return nil
}
tw := tablewriter.New(
tablewriter.Col("Key"),
tablewriter.Col("State"),
tablewriter.Col("Error"),
)
colors := map[string]color.Attribute{
"ShardStateAvailable": color.FgGreen,
"ShardStateServing": color.FgBlue,
"ShardStateErrored": color.FgRed,
"ShardStateNew": color.FgYellow,
}
for _, s := range shards {
m := map[string]interface{}{
"Key": s.Key,
"State": func() string {
if c, ok := colors[s.State]; ok {
return color.New(c).Sprint(s.State)
}
return s.State
}(),
"Error": s.Error,
}
tw.Write(m)
}
return tw.Flush(os.Stdout)
}, },
} }
@ -265,3 +237,81 @@ var dagstoreGcCmd = &cli.Command{
return nil return nil
}, },
} }
func printTableShards(shards []api.DagstoreShardInfo) error {
if len(shards) == 0 {
return nil
}
tw := tablewriter.New(
tablewriter.Col("Key"),
tablewriter.Col("State"),
tablewriter.Col("Error"),
)
colors := map[string]color.Attribute{
"ShardStateAvailable": color.FgGreen,
"ShardStateServing": color.FgBlue,
"ShardStateErrored": color.FgRed,
"ShardStateNew": color.FgYellow,
}
for _, s := range shards {
m := map[string]interface{}{
"Key": s.Key,
"State": func() string {
trimmedState := strings.TrimPrefix(s.State, "ShardState")
if c, ok := colors[s.State]; ok {
return color.New(c).Sprint(trimmedState)
}
return trimmedState
}(),
"Error": s.Error,
}
tw.Write(m)
}
return tw.Flush(os.Stdout)
}
var dagstoreLookupPiecesCmd = &cli.Command{
Name: "lookup-pieces",
Usage: "Lookup pieces that a given CID belongs to",
ArgsUsage: "<cid>",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "color",
Usage: "use color in display output",
DefaultText: "depends on output being a TTY",
},
},
Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
if cctx.NArg() != 1 {
return fmt.Errorf("must provide a CID")
}
cidStr := cctx.Args().First()
cid, err := cid.Parse(cidStr)
if err != nil {
return fmt.Errorf("invalid CID: %w", err)
}
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
shards, err := marketsApi.DagstoreLookupPieces(ctx, cid)
if err != nil {
return err
}
return printTableShards(shards)
},
}

View File

@ -0,0 +1,86 @@
package main
import (
"fmt"
"github.com/ipfs/go-cid"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
lcli "github.com/filecoin-project/lotus/cli"
)
var indexProvCmd = &cli.Command{
Name: "index",
Usage: "Manage the index provider on the markets subsystem",
Subcommands: []*cli.Command{
indexProvAnnounceCmd,
indexProvAnnounceAllCmd,
},
}
var indexProvAnnounceCmd = &cli.Command{
Name: "announce",
ArgsUsage: "<deal proposal cid>",
Usage: "Announce a deal to indexers so they can download its index",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "color",
Usage: "use color in display output",
DefaultText: "depends on output being a TTY",
},
},
Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
if cctx.NArg() != 1 {
return fmt.Errorf("must provide the deal proposal CID")
}
proposalCidStr := cctx.Args().First()
proposalCid, err := cid.Parse(proposalCidStr)
if err != nil {
return fmt.Errorf("invalid deal proposal CID: %w", err)
}
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
return marketsApi.IndexerAnnounceDeal(ctx, proposalCid)
},
}
var indexProvAnnounceAllCmd = &cli.Command{
Name: "announce-all",
Usage: "Announce all active deals to indexers so they can download the indices",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "color",
Usage: "use color in display output",
DefaultText: "depends on output being a TTY",
},
},
Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
return marketsApi.IndexerAnnounceAllDeals(ctx)
},
}

View File

@ -126,7 +126,7 @@ func infoCmdAct(cctx *cli.Context) error {
alerts, err := minerApi.LogAlerts(ctx) alerts, err := minerApi.LogAlerts(ctx)
if err != nil { if err != nil {
return xerrors.Errorf("getting alerts: %w", err) fmt.Printf("ERROR: getting alerts: %s\n", err)
} }
activeAlerts := make([]alerting.Alert, 0) activeAlerts := make([]alerting.Alert, 0)
@ -466,6 +466,7 @@ var stateOrder = map[sealing.SectorState]stateMeta{}
var stateList = []stateMeta{ var stateList = []stateMeta{
{col: 39, state: "Total"}, {col: 39, state: "Total"},
{col: color.FgGreen, state: sealing.Proving}, {col: color.FgGreen, state: sealing.Proving},
{col: color.FgGreen, state: sealing.Available},
{col: color.FgGreen, state: sealing.UpdateActivating}, {col: color.FgGreen, state: sealing.UpdateActivating},
{col: color.FgBlue, state: sealing.Empty}, {col: color.FgBlue, state: sealing.Empty},

View File

@ -96,6 +96,11 @@ var infoAllCmd = &cli.Command{
fmt.Println("ERROR: ", err) fmt.Println("ERROR: ", err)
} }
fmt.Println("\n#: Storage Locks")
if err := storageLocks.Action(cctx); err != nil {
fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Sched Diag") fmt.Println("\n#: Sched Diag")
if err := sealingSchedDiagCmd.Action(cctx); err != nil { if err := sealingSchedDiagCmd.Action(cctx); err != nil {
fmt.Println("ERROR: ", err) fmt.Println("ERROR: ", err)
@ -192,6 +197,11 @@ var infoAllCmd = &cli.Command{
fmt.Println("ERROR: ", err) fmt.Println("ERROR: ", err)
} }
fmt.Println("\n#: Storage Sector List")
if err := storageListSectorsCmd.Action(cctx); err != nil {
fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Expired Sectors") fmt.Println("\n#: Expired Sectors")
if err := sectorsExpiredCmd.Action(cctx); err != nil { if err := sectorsExpiredCmd.Action(cctx); err != nil {
fmt.Println("ERROR: ", err) fmt.Println("ERROR: ", err)

View File

@ -48,6 +48,7 @@ func main() {
lcli.WithCategory("market", retrievalDealsCmd), lcli.WithCategory("market", retrievalDealsCmd),
lcli.WithCategory("market", dataTransfersCmd), lcli.WithCategory("market", dataTransfersCmd),
lcli.WithCategory("market", dagstoreCmd), lcli.WithCategory("market", dagstoreCmd),
lcli.WithCategory("market", indexProvCmd),
lcli.WithCategory("storage", sectorsCmd), lcli.WithCategory("storage", sectorsCmd),
lcli.WithCategory("storage", provingCmd), lcli.WithCategory("storage", provingCmd),
lcli.WithCategory("storage", storageCmd), lcli.WithCategory("storage", storageCmd),

View File

@ -39,9 +39,12 @@ func barString(total, y, g float64) string {
yBars := int(math.Round(y / total * barCols)) yBars := int(math.Round(y / total * barCols))
gBars := int(math.Round(g / total * barCols)) gBars := int(math.Round(g / total * barCols))
eBars := int(barCols) - yBars - gBars eBars := int(barCols) - yBars - gBars
return color.YellowString(strings.Repeat("|", yBars)) + var barString = color.YellowString(strings.Repeat("|", yBars)) +
color.GreenString(strings.Repeat("|", gBars)) + color.GreenString(strings.Repeat("|", gBars))
strings.Repeat(" ", eBars) if eBars >= 0 {
barString += strings.Repeat(" ", eBars)
}
return barString
} }
var sealingWorkersCmd = &cli.Command{ var sealingWorkersCmd = &cli.Command{

View File

@ -161,7 +161,7 @@ var sectorsStatusCmd = &cli.Command{
fmt.Printf("Expiration:\t\t%v\n", status.Expiration) fmt.Printf("Expiration:\t\t%v\n", status.Expiration)
fmt.Printf("DealWeight:\t\t%v\n", status.DealWeight) fmt.Printf("DealWeight:\t\t%v\n", status.DealWeight)
fmt.Printf("VerifiedDealWeight:\t\t%v\n", status.VerifiedDealWeight) fmt.Printf("VerifiedDealWeight:\t\t%v\n", status.VerifiedDealWeight)
fmt.Printf("InitialPledge:\t\t%v\n", status.InitialPledge) fmt.Printf("InitialPledge:\t\t%v\n", types.FIL(status.InitialPledge))
fmt.Printf("\nExpiration Info\n") fmt.Printf("\nExpiration Info\n")
fmt.Printf("OnTime:\t\t%v\n", status.OnTime) fmt.Printf("OnTime:\t\t%v\n", status.OnTime)
fmt.Printf("Early:\t\t%v\n", status.Early) fmt.Printf("Early:\t\t%v\n", status.Early)
@ -294,8 +294,14 @@ var sectorsListCmd = &cli.Command{
Aliases: []string{"e"}, Aliases: []string{"e"},
}, },
&cli.BoolFlag{ &cli.BoolFlag{
Name: "seal-time", Name: "initial-pledge",
Usage: "display how long it took for the sector to be sealed", Usage: "display initial pledge",
Aliases: []string{"p"},
},
&cli.BoolFlag{
Name: "seal-time",
Usage: "display how long it took for the sector to be sealed",
Aliases: []string{"t"},
}, },
&cli.StringFlag{ &cli.StringFlag{
Name: "states", Name: "states",
@ -345,7 +351,7 @@ var sectorsListCmd = &cli.Command{
if cctx.Bool("unproven") { if cctx.Bool("unproven") {
for state := range sealing.ExistSectorStateList { for state := range sealing.ExistSectorStateList {
if state == sealing.Proving { if state == sealing.Proving || state == sealing.Available {
continue continue
} }
states = append(states, api.SectorState(state)) states = append(states, api.SectorState(state))
@ -405,6 +411,7 @@ var sectorsListCmd = &cli.Command{
tablewriter.Col("Deals"), tablewriter.Col("Deals"),
tablewriter.Col("DealWeight"), tablewriter.Col("DealWeight"),
tablewriter.Col("VerifiedPower"), tablewriter.Col("VerifiedPower"),
tablewriter.Col("Pledge"),
tablewriter.NewLineCol("Error"), tablewriter.NewLineCol("Error"),
tablewriter.NewLineCol("RecoveryTimeout")) tablewriter.NewLineCol("RecoveryTimeout"))
@ -483,6 +490,9 @@ var sectorsListCmd = &cli.Command{
m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early)) m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
} }
} }
if inSSet && cctx.Bool("initial-pledge") {
m["Pledge"] = types.FIL(st.InitialPledge).Short()
}
} }
if !fast && deals > 0 { if !fast && deals > 0 {

View File

@ -368,6 +368,7 @@ type storedSector struct {
store stores.SectorStorageInfo store stores.SectorStorageInfo
unsealed, sealed, cache bool unsealed, sealed, cache bool
update, updatecache bool
} }
var storageFindCmd = &cli.Command{ var storageFindCmd = &cli.Command{
@ -421,6 +422,16 @@ var storageFindCmd = &cli.Command{
return xerrors.Errorf("finding cache: %w", err) return xerrors.Errorf("finding cache: %w", err)
} }
us, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTUpdate, 0, false)
if err != nil {
return xerrors.Errorf("finding sealed: %w", err)
}
uc, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTUpdateCache, 0, false)
if err != nil {
return xerrors.Errorf("finding cache: %w", err)
}
byId := map[stores.ID]*storedSector{} byId := map[stores.ID]*storedSector{}
for _, info := range u { for _, info := range u {
sts, ok := byId[info.ID] sts, ok := byId[info.ID]
@ -455,6 +466,28 @@ var storageFindCmd = &cli.Command{
} }
sts.cache = true sts.cache = true
} }
for _, info := range us {
sts, ok := byId[info.ID]
if !ok {
sts = &storedSector{
id: info.ID,
store: info,
}
byId[info.ID] = sts
}
sts.update = true
}
for _, info := range uc {
sts, ok := byId[info.ID]
if !ok {
sts = &storedSector{
id: info.ID,
store: info,
}
byId[info.ID] = sts
}
sts.updatecache = true
}
local, err := nodeApi.StorageLocal(ctx) local, err := nodeApi.StorageLocal(ctx)
if err != nil { if err != nil {
@ -480,6 +513,12 @@ var storageFindCmd = &cli.Command{
if info.cache { if info.cache {
types += "Cache, " types += "Cache, "
} }
if info.update {
types += "Update, "
}
if info.updatecache {
types += "UpdateCache, "
}
fmt.Printf("In %s (%s)\n", info.id, types[:len(types)-2]) fmt.Printf("In %s (%s)\n", info.id, types[:len(types)-2])
fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanStore) fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanStore)
@ -559,7 +598,7 @@ var storageListSectorsCmd = &cli.Command{
ft storiface.SectorFileType ft storiface.SectorFileType
urls string urls string
primary, seal, store bool primary, copy, main, seal, store bool
state api.SectorState state api.SectorState
} }
@ -587,8 +626,11 @@ var storageListSectorsCmd = &cli.Command{
urls: strings.Join(info.URLs, ";"), urls: strings.Join(info.URLs, ";"),
primary: info.Primary, primary: info.Primary,
seal: info.CanSeal, copy: !info.Primary && len(si) > 1,
store: info.CanStore, main: !info.Primary && len(si) == 1, // only copy, but not primary
seal: info.CanSeal,
store: info.CanStore,
state: st.State, state: st.State,
}) })
@ -641,7 +683,7 @@ var storageListSectorsCmd = &cli.Command{
"Sector": e.id, "Sector": e.id,
"Type": e.ft.String(), "Type": e.ft.String(),
"State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state), "State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state),
"Primary": maybeStr(e.seal, color.FgGreen, "primary"), "Primary": maybeStr(e.primary, color.FgGreen, "primary") + maybeStr(e.copy, color.FgBlue, "copy") + maybeStr(e.main, color.FgRed, "main"),
"Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"), "Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"),
"URLs": e.urls, "URLs": e.urls,
} }

View File

@ -508,12 +508,19 @@ var genesisSetRemainderCmd = &cli.Command{
} }
var genesisSetActorVersionCmd = &cli.Command{ var genesisSetActorVersionCmd = &cli.Command{
Name: "set-network-version", Name: "set-network-version",
Usage: "Set the version that this network will start from", Usage: "Set the version that this network will start from",
ArgsUsage: "<genesisFile> <actorVersion>", Flags: []cli.Flag{
&cli.IntFlag{
Name: "network-version",
Usage: "network version to start genesis with",
Value: int(build.GenesisNetworkVersion),
},
},
ArgsUsage: "<genesisFile>",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 2 { if cctx.Args().Len() != 1 {
return fmt.Errorf("must specify genesis file and network version (e.g. '0'") return fmt.Errorf("must specify genesis file")
} }
genf, err := homedir.Expand(cctx.Args().First()) genf, err := homedir.Expand(cctx.Args().First())
@ -531,16 +538,12 @@ var genesisSetActorVersionCmd = &cli.Command{
return xerrors.Errorf("unmarshal genesis template: %w", err) return xerrors.Errorf("unmarshal genesis template: %w", err)
} }
nv, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) nv := network.Version(cctx.Int("network-version"))
if err != nil { if nv > build.NewestNetworkVersion {
return xerrors.Errorf("parsing network version: %w", err)
}
if nv > uint64(build.NewestNetworkVersion) {
return xerrors.Errorf("invalid network version: %d", nv) return xerrors.Errorf("invalid network version: %d", nv)
} }
template.NetworkVersion = network.Version(nv) template.NetworkVersion = nv
b, err = json.MarshalIndent(&template, "", " ") b, err = json.MarshalIndent(&template, "", " ")
if err != nil { if err != nil {

View File

@ -0,0 +1,342 @@
package main
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"io"
"os"
"strings"
"github.com/dgraph-io/badger/v2/y"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-base32"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
var datastoreVlog2CarCmd = &cli.Command{
Name: "vlog2car",
Usage: "convert badger blockstore .vlog to .car",
Flags: []cli.Flag{
&cli.PathFlag{
Name: "vlog",
Usage: "vlog file",
Required: true,
},
&cli.PathFlag{
Name: "car",
Usage: "out car file name (no .car)",
Required: true,
},
&cli.StringFlag{
Name: "key-prefix",
Usage: "datastore prefix",
Value: "/blocks/",
},
},
Action: func(cctx *cli.Context) error {
ctx := cctx.Context
maxSz := uint64(1 << 20)
carb := &rawCarb{
max: maxSz,
blocks: map[cid.Cid]block.Block{},
}
cars := 0
pref := cctx.String("key-prefix")
plen := len(pref)
{
// NOTE: Some bits of code in this code block come from https://github.com/dgraph-io/badger, which is licensed
// under Apache 2.0; See https://github.com/dgraph-io/badger/blob/master/LICENSE
vf, err := os.Open(cctx.Path("vlog"))
if err != nil {
return xerrors.Errorf("open vlog file: %w", err)
}
if _, err := vf.Seek(20, io.SeekStart); err != nil {
return xerrors.Errorf("seek past vlog start: %w", err)
}
reader := bufio.NewReader(vf)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: 20,
}
loop:
for {
e, err := read.Entry(reader)
switch {
case err == io.EOF:
break loop
case err == io.ErrUnexpectedEOF || err == errTruncate:
break loop
case err != nil:
return xerrors.Errorf("entry read error: %w", err)
case e == nil:
continue
}
if e.meta&0x40 > 0 {
e.Key = e.Key[:len(e.Key)-8]
} else if e.meta > 0 {
if e.meta&0x3f > 0 {
log.Infof("unk meta m:%x; k:%x, v:%60x", e.meta, e.Key, e.Value)
}
continue
}
{
if plen > 0 && !strings.HasPrefix(string(e.Key), pref) {
log.Infow("no blocks prefix", "key", string(e.Key))
continue
}
h, err := base32.RawStdEncoding.DecodeString(string(e.Key[plen:]))
if err != nil {
return xerrors.Errorf("decode b32 ds key %x: %w", e.Key, err)
}
c := cid.NewCidV1(cid.Raw, h)
b, err := block.NewBlockWithCid(e.Value, c)
if err != nil {
return xerrors.Errorf("readblk: %w", err)
}
err = carb.consume(c, b)
switch err {
case nil:
case errFullCar:
root, err := carb.finalize()
if err != nil {
return xerrors.Errorf("carb finalize: %w", err)
}
if err := carb.writeCar(ctx, fmt.Sprintf("%s%d.car", cctx.Path("car"), cars), root); err != nil {
return xerrors.Errorf("writeCar: %w", err)
}
cars++
carb = &rawCarb{
max: maxSz,
blocks: map[cid.Cid]block.Block{},
}
default:
return xerrors.Errorf("carb consume: %w", err)
}
}
}
if err := vf.Close(); err != nil {
return err
}
}
root, err := carb.finalize()
if err != nil {
return xerrors.Errorf("carb finalize: %w", err)
}
if err := carb.writeCar(ctx, fmt.Sprintf("%s%d.car", cctx.Path("car"), cars), root); err != nil {
return xerrors.Errorf("writeCar: %w", err)
}
return nil
},
}
// NOTE: Code below comes (with slight modifications) from https://github.com/dgraph-io/badger/blob/master/value.go
// Apache 2.0; See https://github.com/dgraph-io/badger/blob/master/LICENSE
var errTruncate = errors.New("do truncate")
// hashReader implements io.Reader, io.ByteReader interfaces. It also keeps track of the number
// bytes read. The hashReader writes to h (hash) what it reads from r.
type hashReader struct {
r io.Reader
h hash.Hash32
bytesRead int // Number of bytes read.
}
func newHashReader(r io.Reader) *hashReader {
hash := crc32.New(y.CastagnoliCrcTable)
return &hashReader{
r: r,
h: hash,
}
}
// Read reads len(p) bytes from the reader. Returns the number of bytes read, error on failure.
func (t *hashReader) Read(p []byte) (int, error) {
n, err := t.r.Read(p)
if err != nil {
return n, err
}
t.bytesRead += n
return t.h.Write(p[:n])
}
// ReadByte reads exactly one byte from the reader. Returns error on failure.
func (t *hashReader) ReadByte() (byte, error) {
b := make([]byte, 1)
_, err := t.Read(b)
return b[0], err
}
// Sum32 returns the sum32 of the underlying hash.
func (t *hashReader) Sum32() uint32 {
return t.h.Sum32()
}
type safeRead struct {
k []byte
v []byte
recordOffset uint32
}
// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by
// the user to set data.
type Entry struct {
Key []byte
Value []byte
UserMeta byte
ExpiresAt uint64 // time.Unix
meta byte
// Fields maintained internally.
offset uint32
hlen int // Length of the header.
}
// Entry reads an entry from the provided reader. It also validates the checksum for every entry
// read. Returns error on failure.
func (r *safeRead) Entry(reader io.Reader) (*Entry, error) {
tee := newHashReader(reader)
var h header
hlen, err := h.DecodeFrom(tee)
if err != nil {
return nil, err
}
if h.klen > uint32(1<<16) { // Key length must be below uint16.
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.hlen = hlen
buf := make([]byte, h.klen+h.vlen)
if _, err := io.ReadFull(tee, buf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
e.Key = buf[:h.klen]
e.Value = buf[h.klen:]
var crcBuf [crc32.Size]byte
if _, err := io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := y.BytesToU32(crcBuf[:])
if crc != tee.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
e.UserMeta = h.userMeta
e.ExpiresAt = h.expiresAt
return e, nil
}
// header is used in value log as a header before Entry.
type header struct {
klen uint32
vlen uint32
expiresAt uint64
meta byte
userMeta byte
}
// Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The
// function will panic if out []byte isn't large enough to hold all the values.
// The encoded header looks like
// +------+----------+------------+--------------+-----------+
// | Meta | UserMeta | Key Length | Value Length | ExpiresAt |
// +------+----------+------------+--------------+-----------+
func (h header) Encode(out []byte) int {
out[0], out[1] = h.meta, h.userMeta
index := 2
index += binary.PutUvarint(out[index:], uint64(h.klen))
index += binary.PutUvarint(out[index:], uint64(h.vlen))
index += binary.PutUvarint(out[index:], h.expiresAt)
return index
}
// Decode decodes the given header from the provided byte slice.
// Returns the number of bytes read.
func (h *header) Decode(buf []byte) int {
h.meta, h.userMeta = buf[0], buf[1]
index := 2
klen, count := binary.Uvarint(buf[index:])
h.klen = uint32(klen)
index += count
vlen, count := binary.Uvarint(buf[index:])
h.vlen = uint32(vlen)
index += count
h.expiresAt, count = binary.Uvarint(buf[index:])
return index + count
}
// DecodeFrom reads the header from the hashReader.
// Returns the number of bytes read.
func (h *header) DecodeFrom(reader *hashReader) (int, error) {
var err error
h.meta, err = reader.ReadByte()
if err != nil {
return 0, err
}
h.userMeta, err = reader.ReadByte()
if err != nil {
return 0, err
}
klen, err := binary.ReadUvarint(reader)
if err != nil {
return 0, err
}
h.klen = uint32(klen)
vlen, err := binary.ReadUvarint(reader)
if err != nil {
return 0, err
}
h.vlen = uint32(vlen)
h.expiresAt, err = binary.ReadUvarint(reader)
if err != nil {
return 0, err
}
return reader.bytesRead, nil
}

View File

@ -32,6 +32,7 @@ var datastoreCmd = &cli.Command{
datastoreListCmd, datastoreListCmd,
datastoreGetCmd, datastoreGetCmd,
datastoreRewriteCmd, datastoreRewriteCmd,
datastoreVlog2CarCmd,
}, },
} }

Some files were not shown because too many files have changed in this diff Show More