Merge remote-tracking branch 'origin/master' into lock-lotus-version-in-testplan
This commit is contained in:
commit
4f128accc4
@ -1,6 +1,7 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
go: gotest/tools@0.0.13
|
||||
aws-cli: circleci/aws-cli@1.3.2
|
||||
|
||||
executors:
|
||||
golang:
|
||||
@ -200,6 +201,10 @@ jobs:
|
||||
<<: *test
|
||||
test-window-post:
|
||||
<<: *test
|
||||
test-window-post-dispute:
|
||||
<<: *test
|
||||
test-terminate:
|
||||
<<: *test
|
||||
test-conformance:
|
||||
description: |
|
||||
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||
@ -262,6 +267,16 @@ jobs:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
build-ntwk-calibration:
|
||||
description: |
|
||||
Compile lotus binaries for the calibration network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make calibnet
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan
|
||||
@ -448,6 +463,114 @@ jobs:
|
||||
name: Publish release
|
||||
command: ./scripts/publish-release.sh
|
||||
|
||||
build-and-push-image:
|
||||
description: build and push docker images to public AWS ECR registry
|
||||
executor: aws-cli/default
|
||||
parameters:
|
||||
profile-name:
|
||||
type: string
|
||||
default: "default"
|
||||
description: AWS profile name to be configured.
|
||||
|
||||
aws-access-key-id:
|
||||
type: env_var_name
|
||||
default: AWS_ACCESS_KEY_ID
|
||||
description: >
|
||||
AWS access key id for IAM role. Set this to the name of
|
||||
the environment variable you will set to hold this
|
||||
value, i.e. AWS_ACCESS_KEY.
|
||||
|
||||
aws-secret-access-key:
|
||||
type: env_var_name
|
||||
default: AWS_SECRET_ACCESS_KEY
|
||||
description: >
|
||||
AWS secret key for IAM role. Set this to the name of
|
||||
the environment variable you will set to hold this
|
||||
value, i.e. AWS_SECRET_ACCESS_KEY.
|
||||
|
||||
region:
|
||||
type: env_var_name
|
||||
default: AWS_REGION
|
||||
description: >
|
||||
Name of env var storing your AWS region information,
|
||||
defaults to AWS_REGION
|
||||
|
||||
account-url:
|
||||
type: env_var_name
|
||||
default: AWS_ECR_ACCOUNT_URL
|
||||
description: >
|
||||
Env var storing Amazon ECR account URL that maps to an AWS account,
|
||||
e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
|
||||
defaults to AWS_ECR_ACCOUNT_URL
|
||||
|
||||
dockerfile:
|
||||
type: string
|
||||
default: Dockerfile
|
||||
description: Name of dockerfile to use. Defaults to Dockerfile.
|
||||
|
||||
path:
|
||||
type: string
|
||||
default: .
|
||||
description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
|
||||
|
||||
extra-build-args:
|
||||
type: string
|
||||
default: ""
|
||||
description: >
|
||||
Extra flags to pass to docker build. For examples, see
|
||||
https://docs.docker.com/engine/reference/commandline/build
|
||||
|
||||
repo:
|
||||
type: string
|
||||
description: Name of an Amazon ECR repository
|
||||
|
||||
tag:
|
||||
type: string
|
||||
default: "latest"
|
||||
description: A comma-separated string containing docker image tags to build and push (default = latest)
|
||||
|
||||
steps:
|
||||
- aws-cli/setup:
|
||||
profile-name: <<parameters.profile-name>>
|
||||
aws-access-key-id: <<parameters.aws-access-key-id>>
|
||||
aws-secret-access-key: <<parameters.aws-secret-access-key>>
|
||||
aws-region: <<parameters.region>>
|
||||
|
||||
- run:
|
||||
name: Log into Amazon ECR
|
||||
command: |
|
||||
aws ecr-public get-login-password --region $<<parameters.region>> --profile <<parameters.profile-name>> | docker login --username AWS --password-stdin $<<parameters.account-url>>
|
||||
|
||||
- checkout
|
||||
|
||||
- setup_remote_docker:
|
||||
version: 19.03.13
|
||||
docker_layer_caching: false
|
||||
|
||||
- run:
|
||||
name: Build docker image
|
||||
command: |
|
||||
registry_id=$(echo $<<parameters.account-url>> | sed "s;\..*;;g")
|
||||
|
||||
docker_tag_args=""
|
||||
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||
for tag in "${DOCKER_TAGS[@]}"; do
|
||||
docker_tag_args="$docker_tag_args -t $<<parameters.account-url>>/<<parameters.repo>>:$tag"
|
||||
done
|
||||
|
||||
docker build \
|
||||
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
|
||||
-f <<parameters.path>>/<<parameters.dockerfile>> \
|
||||
$docker_tag_args \
|
||||
<<parameters.path>>
|
||||
|
||||
- run:
|
||||
name: Push image to Amazon ECR
|
||||
command: |
|
||||
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||
for tag in "${DOCKER_TAGS[@]}"; do
|
||||
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
|
||||
done
|
||||
|
||||
workflows:
|
||||
version: 2.1
|
||||
@ -479,9 +602,20 @@ workflows:
|
||||
test-suite-name: cli
|
||||
packages: "./cli/... ./cmd/... ./api/..."
|
||||
- test-window-post:
|
||||
codecov-upload: true
|
||||
go-test-flags: "-run=TestWindowedPost"
|
||||
winpost-test: "1"
|
||||
test-suite-name: window-post
|
||||
- test-window-post-dispute:
|
||||
codecov-upload: true
|
||||
go-test-flags: "-run=TestWindowPostDispute"
|
||||
winpost-test: "1"
|
||||
test-suite-name: window-post-dispute
|
||||
- test-terminate:
|
||||
codecov-upload: true
|
||||
go-test-flags: "-run=TestTerminate"
|
||||
winpost-test: "1"
|
||||
test-suite-name: terminate
|
||||
- test-short:
|
||||
go-test-flags: "--timeout 10m --short"
|
||||
test-suite-name: short
|
||||
@ -497,6 +631,7 @@ workflows:
|
||||
test-suite-name: conformance-bleeding-edge
|
||||
packages: "./conformance"
|
||||
vectors-branch: master
|
||||
- build-ntwk-calibration
|
||||
- build-lotus-soup
|
||||
- trigger-testplans:
|
||||
filters:
|
||||
@ -533,3 +668,8 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- build-and-push-image:
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-dev
|
||||
tag: '${CIRCLE_SHA1:0:8}'
|
||||
|
10
.codecov.yml
10
.codecov.yml
@ -5,5 +5,15 @@ ignore:
|
||||
- "api/test/*"
|
||||
- "gen/**/*"
|
||||
- "gen/*"
|
||||
- "cmd/lotus-shed/*"
|
||||
- "cmd/tvx/*"
|
||||
- "cmd/lotus-pcr/*"
|
||||
- "cmd/tvx/*"
|
||||
- "cmd/lotus-chainwatch/*"
|
||||
- "cmd/lotus-health/*"
|
||||
- "cmd/lotus-fountain/*"
|
||||
- "cmd/lotus-townhall/*"
|
||||
- "cmd/lotus-stats/*"
|
||||
- "cmd/lotus-pcr/*"
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -7,6 +7,3 @@
|
||||
[submodule "extern/test-vectors"]
|
||||
path = extern/test-vectors
|
||||
url = https://github.com/filecoin-project/test-vectors.git
|
||||
[submodule "extern/blst"]
|
||||
path = extern/blst
|
||||
url = https://github.com/supranational/blst.git
|
||||
|
122
CHANGELOG.md
122
CHANGELOG.md
@ -1,5 +1,125 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.4.1 / 2021-01-20
|
||||
|
||||
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. In particular, [#5341](https://github.com/filecoin-project/lotus/pull/5341) introduces the ability for Lotus miners to terminate sectors.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Core Lotus
|
||||
|
||||
- fix(sync): enforce ForkLengthThreshold for synced chain (https://github.com/filecoin-project/lotus/pull/5182)
|
||||
- introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101)
|
||||
- Skip bootstrapping if no peers specified (https://github.com/filecoin-project/lotus/pull/5301)
|
||||
- Chainxchg write response timeout (https://github.com/filecoin-project/lotus/pull/5254)
|
||||
- update NewestNetworkVersion (https://github.com/filecoin-project/lotus/pull/5277)
|
||||
- fix(sync): remove checks bypass when we submit the block (https://github.com/filecoin-project/lotus/pull/4192)
|
||||
- chore: export vm.ShouldBurn (https://github.com/filecoin-project/lotus/pull/5355)
|
||||
- fix(sync): enforce fork len when changing head (https://github.com/filecoin-project/lotus/pull/5244)
|
||||
- Use 55th percentile instead of median for gas-price (https://github.com/filecoin-project/lotus/pull/5369)
|
||||
- update go-libp2p-pubsub to v0.4.1 (https://github.com/filecoin-project/lotus/pull/5329)
|
||||
|
||||
#### Sealing
|
||||
|
||||
- Sector termination support (https://github.com/filecoin-project/lotus/pull/5341)
|
||||
- update weight canSeal and canStore when attach (https://github.com/filecoin-project/lotus/pull/5242/files)
|
||||
- sector-storage/mock: improve mocked readpiece (https://github.com/filecoin-project/lotus/pull/5208)
|
||||
- Fix deadlock in runWorker in sched_worker.go (https://github.com/filecoin-project/lotus/pull/5251)
|
||||
- Skip checking terminated sectors provable (https://github.com/filecoin-project/lotus/pull/5217)
|
||||
- storagefsm: Fix unsealedInfoMap.lk init race (https://github.com/filecoin-project/lotus/pull/5319)
|
||||
- Multicore AddPiece CommP (https://github.com/filecoin-project/lotus/pull/5320)
|
||||
- storagefsm: Send correct event on ErrExpiredTicket in CommitFailed (https://github.com/filecoin-project/lotus/pull/5366)
|
||||
- expose StateSearchMessage on gateway (https://github.com/filecoin-project/lotus/pull/5382)
|
||||
- fix FileSize to return correct disk usage recursively (https://github.com/filecoin-project/lotus/pull/5384)
|
||||
|
||||
#### Dealmaking
|
||||
|
||||
- Better error message when withdrawing funds (https://github.com/filecoin-project/lotus/pull/5293)
|
||||
- add verbose for list transfers (https://github.com/filecoin-project/lotus/pull/5259)
|
||||
- cli - rename `client info` to `client balances` (https://github.com/filecoin-project/lotus/pull/5304)
|
||||
- Better CLI for wallet market withdraw and client info (https://github.com/filecoin-project/lotus/pull/5303)
|
||||
|
||||
#### UX
|
||||
|
||||
- correct flag usages for replace cmd (https://github.com/filecoin-project/lotus/pull/5255)
|
||||
- lotus state call will panic (https://github.com/filecoin-project/lotus/pull/5275)
|
||||
- fix get sector bug (https://github.com/filecoin-project/lotus/pull/4976)
|
||||
- feat: lotus wallet market add (adds funds to storage market actor) (https://github.com/filecoin-project/lotus/pull/5300)
|
||||
- Fix client flag parsing in client balances cli (https://github.com/filecoin-project/lotus/pull/5312)
|
||||
- delete slash-consensus miner (https://github.com/filecoin-project/lotus/pull/4577)
|
||||
- add fund sufficient check in send (https://github.com/filecoin-project/lotus/pull/5252)
|
||||
- enable parse and shorten negative FIL values (https://github.com/filecoin-project/lotus/pull/5315)
|
||||
- add limit and rate for chain noise (https://github.com/filecoin-project/lotus/pull/5223)
|
||||
- add bench env print (https://github.com/filecoin-project/lotus/pull/5222)
|
||||
- Implement full-node restore option (https://github.com/filecoin-project/lotus/pull/5362)
|
||||
- add color for token amount (https://github.com/filecoin-project/lotus/pull/5352)
|
||||
- correct log in maybeUseAddress (https://github.com/filecoin-project/lotus/pull/5359)
|
||||
- add slash-consensus from flag (https://github.com/filecoin-project/lotus/pull/5378)
|
||||
|
||||
#### Testing
|
||||
|
||||
- tvx extract: more tipset extraction goodness (https://github.com/filecoin-project/lotus/pull/5258)
|
||||
- Fix race in blockstore test suite (https://github.com/filecoin-project/lotus/pull/5297)
|
||||
|
||||
|
||||
#### Build & Networks
|
||||
|
||||
- Remove LOTUS_DISABLE_V2_ACTOR_MIGRATION envvar (https://github.com/filecoin-project/lotus/pull/5289)
|
||||
- Create a calibnet build option (https://github.com/filecoin-project/lotus/pull/5288)
|
||||
- Calibnet: Set Orange epoch (https://github.com/filecoin-project/lotus/pull/5325)
|
||||
|
||||
#### Management
|
||||
|
||||
- Update SECURITY.md (https://github.com/filecoin-project/lotus/pull/5246)
|
||||
- README: Contribute section (https://github.com/filecoin-project/lotus/pull/5330)
|
||||
- README: refine Contribute section (https://github.com/filecoin-project/lotus/pull/5331)
|
||||
- Add misc tooling to codecov ignore list (https://github.com/filecoin-project/lotus/pull/5347)
|
||||
|
||||
# 1.4.0 / 2020-12-19
|
||||
|
||||
This is a MANDATORY hotfix release of Lotus that resolves a chain halt at height 336,459 caused by nondeterminism in specs-actors. The fix is to update actors to 2.3.3 in order to incorporate this fix https://github.com/filecoin-project/specs-actors/pull/1334.
|
||||
|
||||
# 1.3.0 / 2020-12-16
|
||||
|
||||
This is a mandatory release of Lotus that introduces the third post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 343200, before which time all nodes must have updated to this release (or later). The change that breaks consensus is an implementation of FIP-0009(https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0009.md).
|
||||
|
||||
## Changes
|
||||
|
||||
- Disable gas burning for window post messages (https://github.com/filecoin-project/lotus/pull/5200)
|
||||
- fix lock propose (https://github.com/filecoin-project/lotus/pull/5197)
|
||||
|
||||
# 1.2.3 / 2020-12-15
|
||||
|
||||
This is an optional Lotus release that introduces many performance improvements, bugfixes, and UX improvements.
|
||||
|
||||
## Changes
|
||||
|
||||
- When waiting for deal commit messages, ignore unsuccessful messages (https://github.com/filecoin-project/lotus/pull/5189)
|
||||
- Bigger copy buffer size for stores (https://github.com/filecoin-project/lotus/pull/5177)
|
||||
- Print MinPieceSize when querying ask (https://github.com/filecoin-project/lotus/pull/5178)
|
||||
- Optimize miner info & sectors list loading (https://github.com/filecoin-project/lotus/pull/5176)
|
||||
- Allow miners to filter (un)verified deals (https://github.com/filecoin-project/lotus/pull/5094)
|
||||
- Fix curSealing out of MaxSealingSectors limit (https://github.com/filecoin-project/lotus/pull/5166)
|
||||
- Add mpool pending from / to filter (https://github.com/filecoin-project/lotus/pull/5169)
|
||||
- Add metrics for delayed blocks (https://github.com/filecoin-project/lotus/pull/5171)
|
||||
- Fix PushUntrusted publishing -- the message is local (https://github.com/filecoin-project/lotus/pull/5173)
|
||||
- Avoid potential hang in events API when starting event listener (https://github.com/filecoin-project/lotus/pull/5159)
|
||||
- Show data transfer ID in list-deals (https://github.com/filecoin-project/lotus/pull/5150)
|
||||
- Fix events API mutex locking (https://github.com/filecoin-project/lotus/pull/5160)
|
||||
- Message pool refactors (https://github.com/filecoin-project/lotus/pull/5162)
|
||||
- Fix lotus-shed cid output (https://github.com/filecoin-project/lotus/pull/5072)
|
||||
- Use FundManager to withdraw funds, add MarketWithdraw API (https://github.com/filecoin-project/lotus/pull/5112)
|
||||
- Add keygen outfile (https://github.com/filecoin-project/lotus/pull/5118)
|
||||
- Update sr2 stat aggregation (https://github.com/filecoin-project/lotus/pull/5114)
|
||||
- Fix miner control address lookup (https://github.com/filecoin-project/lotus/pull/5119)
|
||||
- Fix send with declared nonce 0 (https://github.com/filecoin-project/lotus/pull/5111)
|
||||
- Introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101)
|
||||
- Miner control address config for (pre)commits (https://github.com/filecoin-project/lotus/pull/5103)
|
||||
- Delete repeated call func (https://github.com/filecoin-project/lotus/pull/5099)
|
||||
- lotus-shed ledger show command (https://github.com/filecoin-project/lotus/pull/5098)
|
||||
- Log a message when there aren't enough peers for sync (https://github.com/filecoin-project/lotus/pull/5105)
|
||||
- Miner code cleanup (https://github.com/filecoin-project/lotus/pull/5107)
|
||||
|
||||
# 1.2.2 / 2020-12-03
|
||||
|
||||
This is an optional Lotus release that introduces various improvements to the mining logic and deal-making workflow, as well as several new UX features.
|
||||
@ -88,7 +208,7 @@ This is a very small release of Lotus that fixes an issue users are experiencing
|
||||
|
||||
# 1.2.0 / 2020-11-18
|
||||
|
||||
This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have update to this release (or later). This release also bumps the required version of Go to 1.15.
|
||||
This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have updated to this release (or later). This release also bumps the required version of Go to 1.15.
|
||||
|
||||
The changes that break consensus are:
|
||||
|
||||
|
74
Dockerfile.lotus
Normal file
74
Dockerfile.lotus
Normal file
@ -0,0 +1,74 @@
|
||||
FROM golang:1.15.6 AS builder-deps
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||
|
||||
ARG RUST_VERSION=nightly
|
||||
ENV XDG_CACHE_HOME="/tmp"
|
||||
|
||||
ENV RUSTUP_HOME=/usr/local/rustup \
|
||||
CARGO_HOME=/usr/local/cargo \
|
||||
PATH=/usr/local/cargo/bin:$PATH
|
||||
|
||||
RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \
|
||||
chmod +x rustup-init; \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
|
||||
FROM builder-deps AS builder-local
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
COPY ./ /opt/filecoin
|
||||
WORKDIR /opt/filecoin
|
||||
RUN make clean deps
|
||||
|
||||
|
||||
FROM builder-local AS builder
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
WORKDIR /opt/filecoin
|
||||
|
||||
ARG RUSTFLAGS=""
|
||||
ARG GOFLAGS=""
|
||||
|
||||
RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats
|
||||
|
||||
|
||||
FROM ubuntu:20.04 AS base
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
# Base resources
|
||||
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc
|
||||
|
||||
|
||||
FROM base AS lotus
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
|
||||
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||
|
||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||
ENV LOTUS_PATH /var/lib/lotus
|
||||
|
||||
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters
|
||||
|
||||
USER fc
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/lotus"]
|
||||
|
||||
CMD ["-help"]
|
3
Makefile
3
Makefile
@ -63,6 +63,9 @@ debug: lotus lotus-miner lotus-worker lotus-seed
|
||||
2k: GOFLAGS+=-tags=2k
|
||||
2k: lotus lotus-miner lotus-worker lotus-seed
|
||||
|
||||
calibnet: GOFLAGS+=-tags=calibnet
|
||||
calibnet: lotus lotus-miner lotus-worker lotus-seed
|
||||
|
||||
lotus: $(BUILD_DEPS)
|
||||
rm -f lotus
|
||||
go build $(GOFLAGS) -o lotus ./cmd/lotus
|
||||
|
37
README.md
37
README.md
@ -24,24 +24,31 @@ For instructions on how to build, install and setup lotus, please visit [https:/
|
||||
|
||||
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
|
||||
|
||||
## Development
|
||||
## Related packages
|
||||
|
||||
The main branches under development at the moment are:
|
||||
* [`master`](https://github.com/filecoin-project/lotus): current testnet.
|
||||
* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes.
|
||||
* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits.
|
||||
These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation:
|
||||
|
||||
### Tracker
|
||||
|
||||
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch.
|
||||
|
||||
### Packages
|
||||
|
||||
The lotus Filecoin implementation unfolds into the following packages:
|
||||
|
||||
- [This repo](https://github.com/filecoin-project/lotus)
|
||||
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
||||
- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||
- [specs-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||
|
||||
## Contribute
|
||||
|
||||
Lotus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations:
|
||||
|
||||
1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs).
|
||||
2. If the change is complex and requires prior discussion, [open an issue](github.com/filecoin-project/lotus/issues) or a [discussion](https://github.com/filecoin-project/lotus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted.
|
||||
3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn.
|
||||
|
||||
When implementing a change:
|
||||
|
||||
1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`.
|
||||
2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc.
|
||||
3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go.
|
||||
4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers.
|
||||
5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted).
|
||||
6. Add tests.
|
||||
7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description.
|
||||
8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message.
|
||||
|
||||
## License
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
For *critical* bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md
|
||||
For reporting security vulnerabilities/bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md. Security vulnerabilities should be reported via our [Vulnerability Reporting channels](https://github.com/filecoin-project/community/blob/master/SECURITY.md#vulnerability-reporting) and will be eligible for a [Bug Bounty](https://security.filecoin.io/bug-bounty/).
|
||||
|
||||
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
|
||||
|
||||
Here are some examples of bugs we would consider 'critical':
|
||||
Here are some examples of bugs we would consider to be security vulnerabilities:
|
||||
|
||||
* If you can spend from a `multisig` wallet you do not control the keys for.
|
||||
* If you can cause a miner to be slashed without them actually misbehaving.
|
||||
@ -16,8 +16,8 @@ Here are some examples of bugs we would consider 'critical':
|
||||
* If you can craft a message that causes a persistent fork in the network.
|
||||
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
|
||||
|
||||
This is not an exhaustive list, but should provide some idea of what we consider 'critical'.
|
||||
This is not an exhaustive list, but should provide some idea of what we consider as a security vulnerability, .
|
||||
|
||||
## Reporting a non security bug
|
||||
|
||||
For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
|
||||
For non-security bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
|
||||
|
@ -391,6 +391,8 @@ type FullNode interface {
|
||||
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
|
||||
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
||||
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
|
||||
// StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
|
||||
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*MsgLookup, error)
|
||||
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
||||
// message arrives on chain, and gets to the indicated confidence depth.
|
||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
|
||||
@ -515,6 +517,10 @@ type FullNode interface {
|
||||
// along with the address removal.
|
||||
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error)
|
||||
|
||||
// MarketAddBalance adds funds to the market actor
|
||||
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error)
|
||||
// MarketGetReserved gets the amount of funds that are currently reserved for the address
|
||||
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error)
|
||||
// MarketReserveFunds reserves funds for a deal
|
||||
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error)
|
||||
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
|
||||
@ -943,7 +949,8 @@ const (
|
||||
)
|
||||
|
||||
type Deadline struct {
|
||||
PostSubmissions bitfield.BitField
|
||||
PostSubmissions bitfield.BitField
|
||||
DisputableProofCount uint64
|
||||
}
|
||||
|
||||
type Partition struct {
|
||||
|
@ -39,6 +39,7 @@ type GatewayAPI interface {
|
||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
||||
StateSearchMsg(ctx context.Context, msg cid.Cid) (*MsgLookup, error)
|
||||
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error)
|
||||
|
@ -65,7 +65,17 @@ type StorageMiner interface {
|
||||
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
|
||||
SectorGetExpectedSealDuration(context.Context) (time.Duration, error)
|
||||
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
|
||||
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
|
||||
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
|
||||
SectorRemove(context.Context, abi.SectorNumber) error
|
||||
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
|
||||
// automatically removes it from storage
|
||||
SectorTerminate(context.Context, abi.SectorNumber) error
|
||||
// SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
|
||||
// Returns null if message wasn't sent
|
||||
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error)
|
||||
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
|
||||
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error)
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error
|
||||
|
||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
|
||||
@ -217,9 +227,12 @@ const (
|
||||
PreCommitAddr AddrUse = iota
|
||||
CommitAddr
|
||||
PoStAddr
|
||||
|
||||
TerminateSectorsAddr
|
||||
)
|
||||
|
||||
type AddressConfig struct {
|
||||
PreCommitControl []address.Address
|
||||
CommitControl []address.Address
|
||||
TerminateControl []address.Address
|
||||
}
|
||||
|
@ -206,6 +206,7 @@ type FullNodeStruct struct {
|
||||
StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateWaitMsgLimited func(context.Context, cid.Cid, uint64, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateSearchMsgLimited func(context.Context, cid.Cid, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"`
|
||||
@ -244,6 +245,8 @@ type FullNodeStruct struct {
|
||||
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MarketAddBalance func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketGetReserved func(ctx context.Context, addr address.Address) (types.BigInt, error) `perm:"sign"`
|
||||
MarketReserveFunds func(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketReleaseFunds func(ctx context.Context, addr address.Address, amt types.BigInt) error `perm:"sign"`
|
||||
MarketWithdraw func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
@ -312,6 +315,9 @@ type StorageMinerStruct struct {
|
||||
SectorGetExpectedSealDuration func(context.Context) (time.Duration, error) `perm:"read"`
|
||||
SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"admin"`
|
||||
SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
|
||||
SectorTerminate func(context.Context, abi.SectorNumber) error `perm:"admin"`
|
||||
SectorTerminateFlush func(ctx context.Context) (*cid.Cid, error) `perm:"admin"`
|
||||
SectorTerminatePending func(ctx context.Context) ([]abi.SectorID, error) `perm:"admin"`
|
||||
SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"`
|
||||
|
||||
WorkerConnect func(context.Context, string) error `perm:"admin" retry:"true"` // TODO: worker perm
|
||||
@ -438,6 +444,7 @@ type GatewayStruct struct {
|
||||
StateMinerProvingDeadline func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||
StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
|
||||
StateSearchMsg func(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
|
||||
StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
|
||||
StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error)
|
||||
StateSectorGetInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
@ -1008,6 +1015,10 @@ func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api
|
||||
return c.Internal.StateSearchMsg(ctx, msgc)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) StateSearchMsgLimited(ctx context.Context, msgc cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) {
|
||||
return c.Internal.StateSearchMsgLimited(ctx, msgc, limit)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
|
||||
return c.Internal.StateListMiners(ctx, tsk)
|
||||
}
|
||||
@ -1148,6 +1159,14 @@ func (c *FullNodeStruct) MsigRemoveSigner(ctx context.Context, msig address.Addr
|
||||
return c.Internal.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketAddBalance(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return c.Internal.MarketAddBalance(ctx, wallet, addr, amt)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) {
|
||||
return c.Internal.MarketGetReserved(ctx, addr)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return c.Internal.MarketReserveFunds(ctx, wallet, addr, amt)
|
||||
}
|
||||
@ -1300,6 +1319,18 @@ func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.Sector
|
||||
return c.Internal.SectorRemove(ctx, number)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorTerminate(ctx context.Context, number abi.SectorNumber) error {
|
||||
return c.Internal.SectorTerminate(ctx, number)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) {
|
||||
return c.Internal.SectorTerminateFlush(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) {
|
||||
return c.Internal.SectorTerminatePending(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorMarkForUpgrade(ctx context.Context, number abi.SectorNumber) error {
|
||||
return c.Internal.SectorMarkForUpgrade(ctx, number)
|
||||
}
|
||||
@ -1754,6 +1785,10 @@ func (g GatewayStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSet
|
||||
return g.Internal.StateNetworkVersion(ctx, tsk)
|
||||
}
|
||||
|
||||
func (g GatewayStruct) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
|
||||
return g.Internal.StateSearchMsg(ctx, msg)
|
||||
}
|
||||
|
||||
func (g GatewayStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) {
|
||||
return g.Internal.StateSectorGetInfo(ctx, maddr, n, tsk)
|
||||
}
|
||||
|
@ -17,9 +17,9 @@ import (
|
||||
|
||||
func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
for _, height := range []abi.ChainEpoch{
|
||||
1, // before
|
||||
2, // before
|
||||
162, // while sealing
|
||||
520, // after upgrade deal
|
||||
530, // after upgrade deal
|
||||
5000, // after
|
||||
} {
|
||||
height := height // make linters happy by copying
|
||||
@ -31,7 +31,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(upgradeHeight)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
|
@ -8,103 +8,40 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
dstest "github.com/ipfs/go-merkledag/test"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
)
|
||||
|
||||
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
MakeDeal(t, ctx, 6, client, miner, carExport, fastRet, startEpoch)
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch)
|
||||
}
|
||||
|
||||
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
MakeDeal(t, ctx, 6, client, miner, false, false, startEpoch)
|
||||
MakeDeal(t, ctx, 7, client, miner, false, false, startEpoch)
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
|
||||
MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch)
|
||||
}
|
||||
|
||||
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
@ -152,95 +89,41 @@ func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api
|
||||
}
|
||||
|
||||
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
data := make([]byte, 1600)
|
||||
rand.New(rand.NewSource(int64(8))).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
fcid, err := s.client.ClientImportLocal(s.ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
deal := startDeal(t, ctx, miner, client, fcid, true, startEpoch)
|
||||
deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch)
|
||||
|
||||
waitDealPublished(t, ctx, miner, deal)
|
||||
waitDealPublished(t, s.ctx, s.miner, deal)
|
||||
fmt.Println("deal published, retrieving")
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
info, err := s.client.ClientGetDealInfo(s.ctx, *deal)
|
||||
require.NoError(t, err)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data)
|
||||
}
|
||||
|
||||
func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
{
|
||||
data1 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(3))).Read(data1)
|
||||
r := bytes.NewReader(data1)
|
||||
|
||||
fcid1, err := client.ClientImportLocal(ctx, r)
|
||||
fcid1, err := s.client.ClientImportLocal(s.ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -249,35 +132,31 @@ func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
|
||||
rand.New(rand.NewSource(int64(9))).Read(data2)
|
||||
r2 := bytes.NewReader(data2)
|
||||
|
||||
fcid2, err := client.ClientImportLocal(ctx, r2)
|
||||
fcid2, err := s.client.ClientImportLocal(s.ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
deal1 := startDeal(t, ctx, miner, client, fcid1, true, 0)
|
||||
deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal1, true)
|
||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true)
|
||||
|
||||
deal2 := startDeal(t, ctx, miner, client, fcid2, true, 0)
|
||||
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal2, false)
|
||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false)
|
||||
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal2)
|
||||
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
|
||||
require.NoError(t, err)
|
||||
|
||||
rf, _ := miner.SectorsRefs(ctx)
|
||||
rf, _ := s.miner.SectorsRefs(s.ctx)
|
||||
fmt.Printf("refs: %+v\n", rf)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
|
||||
testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||
@ -459,3 +338,40 @@ func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath strin
|
||||
}
|
||||
return rdata
|
||||
}
|
||||
|
||||
type dealsScaffold struct {
|
||||
ctx context.Context
|
||||
client *impl.FullNodeAPI
|
||||
miner TestStorageNode
|
||||
blockMiner *BlockMiner
|
||||
}
|
||||
|
||||
func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold {
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
return connectAndStartMining(t, b, blocktime, client, miner)
|
||||
}
|
||||
|
||||
func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold {
|
||||
ctx := context.Background()
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
blockMiner := NewBlockMiner(ctx, t, miner, blocktime)
|
||||
blockMiner.MineBlocks()
|
||||
|
||||
return &dealsScaffold{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
miner: miner,
|
||||
blockMiner: blockMiner,
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -48,6 +49,7 @@ type TestStorageNode struct {
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
|
||||
MineOne func(context.Context, miner.MineReq) error
|
||||
Stop func(context.Context) error
|
||||
}
|
||||
|
||||
var PresealGenesis = -1
|
||||
@ -109,14 +111,19 @@ var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
var OneFull = DefaultFullOpts(1)
|
||||
var TwoFull = DefaultFullOpts(2)
|
||||
|
||||
var FullNodeWithActorsV2At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
var FullNodeWithActorsV3At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
// Skip directly to tape height so precommits work.
|
||||
Network: network.Version5,
|
||||
Height: upgradeHeight,
|
||||
// prepare for upgrade.
|
||||
Network: network.Version9,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
// Skip directly to tape height so precommits work.
|
||||
Network: network.Version10,
|
||||
Height: upgradeHeight,
|
||||
Migration: stmgr.UpgradeActorsV3,
|
||||
}})
|
||||
},
|
||||
}
|
||||
@ -157,7 +164,11 @@ func (ts *testSuite) testVersion(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, v.Version, build.BuildVersion)
|
||||
versions := strings.Split(v.Version, "+")
|
||||
if len(versions) <= 0 {
|
||||
t.Fatal("empty version")
|
||||
}
|
||||
require.Equal(t, versions[0], build.BuildVersion)
|
||||
}
|
||||
|
||||
func (ts *testSuite) testSearchMsg(t *testing.T) {
|
||||
|
@ -14,14 +14,20 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
bminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
@ -200,7 +206,7 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n,
|
||||
|
||||
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
for _, height := range []abi.ChainEpoch{
|
||||
1, // before
|
||||
2, // before
|
||||
162, // while sealing
|
||||
5000, // while proving
|
||||
} {
|
||||
@ -211,12 +217,13 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int,
|
||||
upgradeHeight abi.ChainEpoch) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(upgradeHeight)}, OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
@ -428,3 +435,592 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
||||
}
|
||||
|
||||
func TestTerminate(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
nSectors := uint64(2)
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(2)}, []StorageMiner{{Full: 0, Preseal: int(nSectors)}})
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
||||
|
||||
fmt.Printf("Seal a sector\n")
|
||||
|
||||
pledgeSectors(t, ctx, miner, 1, 0, nil)
|
||||
|
||||
fmt.Printf("wait for power\n")
|
||||
|
||||
{
|
||||
// Wait until proven.
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||
fmt.Printf("End for head.Height > %d\n", waitUntil)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > waitUntil {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nSectors++
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
||||
|
||||
fmt.Println("Terminate a sector")
|
||||
|
||||
toTerminate := abi.SectorNumber(3)
|
||||
|
||||
err = miner.SectorTerminate(ctx, toTerminate)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgTriggerred := false
|
||||
loop:
|
||||
for {
|
||||
si, err := miner.SectorsStatus(ctx, toTerminate, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("state: ", si.State, msgTriggerred)
|
||||
|
||||
switch sealing.SectorState(si.State) {
|
||||
case sealing.Terminating:
|
||||
if !msgTriggerred {
|
||||
{
|
||||
p, err := miner.SectorTerminatePending(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, p, 1)
|
||||
require.Equal(t, abi.SectorNumber(3), p[0].Number)
|
||||
}
|
||||
|
||||
c, err := miner.SectorTerminateFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if c != nil {
|
||||
msgTriggerred = true
|
||||
fmt.Println("terminate message:", c)
|
||||
|
||||
{
|
||||
p, err := miner.SectorTerminatePending(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, p, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed:
|
||||
break loop
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// check power decreased
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
||||
|
||||
// check in terminated set
|
||||
{
|
||||
parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(parts), 0)
|
||||
|
||||
bflen := func(b bitfield.BitField) uint64 {
|
||||
l, err := b.Count()
|
||||
require.NoError(t, err)
|
||||
return l
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(1), bflen(parts[0].AllSectors))
|
||||
require.Equal(t, uint64(0), bflen(parts[0].LiveSectors))
|
||||
}
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
||||
}
|
||||
|
||||
func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// First, we configure two miners. After sealing, we're going to turn off the first miner so
|
||||
// it doesn't submit proofs.
|
||||
///
|
||||
// Then we're going to manually submit bad proofs.
|
||||
n, sn := b(t, []FullNodeOpts{
|
||||
FullNodeWithActorsV3At(2),
|
||||
}, []StorageMiner{
|
||||
{Full: 0, Preseal: PresealGenesis},
|
||||
{Full: 0},
|
||||
})
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
chainMiner := sn[0]
|
||||
evilMiner := sn[1]
|
||||
|
||||
{
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := evilMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
// Mine with the _second_ node (the good one).
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := chainMiner.MineOne(ctx, MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
// Give the chain miner enough sectors to win every block.
|
||||
pledgeSectors(t, ctx, chainMiner, 10, 0, nil)
|
||||
// And the evil one 1 sector. No cookie for you.
|
||||
pledgeSectors(t, ctx, evilMiner, 1, 0, nil)
|
||||
|
||||
// Let the evil miner's sectors gain power.
|
||||
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure it has gained power.
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||
|
||||
evilSectors, err := evilMiner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
evilSectorNo := evilSectors[0] // only one.
|
||||
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("evil miner stopping")
|
||||
|
||||
// Now stop the evil miner, and start manually submitting bad proofs.
|
||||
require.NoError(t, evilMiner.Stop(ctx))
|
||||
|
||||
fmt.Println("evil miner stopped")
|
||||
|
||||
// Wait until we need to prove our sector.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index == evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||
require.NoError(t, err, "evil proof not accepted")
|
||||
|
||||
// Wait until after the proving period.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index != evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
fmt.Println("accepted evil proof")
|
||||
|
||||
// Make sure the evil node didn't lose any power.
|
||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||
|
||||
// OBJECTION! The good miner files a DISPUTE!!!!
|
||||
{
|
||||
params := &minerActor.DisputeWindowedPoStParams{
|
||||
Deadline: evilSectorLoc.Deadline,
|
||||
PoStIndex: 0,
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: evilMinerAddr,
|
||||
Method: minerActor.Methods.DisputeWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: defaultFrom,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("waiting dispute")
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||
}
|
||||
|
||||
// Objection SUSTAINED!
|
||||
// Make sure the evil node lost power.
|
||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||
|
||||
// Now we begin the redemption arc.
|
||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||
|
||||
// First, recover the sector.
|
||||
|
||||
{
|
||||
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
params := &minerActor.DeclareFaultsRecoveredParams{
|
||||
Recoveries: []minerActor.RecoveryDeclaration{{
|
||||
Deadline: evilSectorLoc.Deadline,
|
||||
Partition: evilSectorLoc.Partition,
|
||||
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
|
||||
}},
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: evilMinerAddr,
|
||||
Method: minerActor.Methods.DeclareFaultsRecovered,
|
||||
Params: enc,
|
||||
Value: types.FromFil(30), // repay debt.
|
||||
From: minerInfo.Owner,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||
}
|
||||
|
||||
// Then wait for the deadline.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index == evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
// Now try to be evil again
|
||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt")
|
||||
|
||||
// It didn't work because we're recovering.
|
||||
}
|
||||
|
||||
func submitBadProof(
|
||||
ctx context.Context,
|
||||
client api.FullNode, maddr address.Address,
|
||||
di *dline.Info, dlIdx, partIdx uint64,
|
||||
) error {
|
||||
head, err := client.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
from, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commEpoch := di.Open
|
||||
commRand, err := client.ChainGetRandomnessFromTickets(
|
||||
ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit,
|
||||
commEpoch, nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params := &minerActor.SubmitWindowedPoStParams{
|
||||
ChainCommitEpoch: commEpoch,
|
||||
ChainCommitRand: commRand,
|
||||
Deadline: dlIdx,
|
||||
Partitions: []minerActor.PoStPartition{{Index: partIdx}},
|
||||
Proofs: []proof3.PoStProof{{
|
||||
PoStProof: minerInfo.WindowPoStProofType,
|
||||
ProofBytes: []byte("I'm soooo very evil."),
|
||||
}},
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
if aerr != nil {
|
||||
return aerr
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
Method: minerActor.Methods.SubmitWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: from,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rec.Receipt.ExitCode.IsError() {
|
||||
return rec.Receipt.ExitCode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestWindowPostDisputeFails(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(2)}, OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
{
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
// Mine with the _second_ node (the good one).
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := miner.MineOne(ctx, MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
pledgeSectors(t, ctx, miner, 10, 0, nil)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
expectedPower := types.NewInt(uint64(ssz) * (GenesisPreseals + 10))
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure it has gained power.
|
||||
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
|
||||
|
||||
// Wait until a proof has been submitted.
|
||||
var targetDeadline uint64
|
||||
waitForProof:
|
||||
for {
|
||||
deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
for dlIdx, dl := range deadlines {
|
||||
nonEmpty, err := dl.PostSubmissions.IsEmpty()
|
||||
require.NoError(t, err)
|
||||
if nonEmpty {
|
||||
targetDeadline = uint64(dlIdx)
|
||||
break waitForProof
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
for {
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
// wait until the deadline finishes.
|
||||
if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) {
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
// Try to object to the proof. This should fail.
|
||||
{
|
||||
params := &minerActor.DisputeWindowedPoStParams{
|
||||
Deadline: targetDeadline,
|
||||
PoStIndex: 0,
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
Method: minerActor.Methods.DisputeWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: defaultFrom,
|
||||
}
|
||||
_, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)")
|
||||
}
|
||||
}
|
||||
|
@ -2,11 +2,9 @@ package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/addrutil"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
rice "github.com/GeertJohan/go.rice"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@ -17,24 +15,16 @@ func BuiltinBootstrap() ([]peer.AddrInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var out []peer.AddrInfo
|
||||
|
||||
b := rice.MustFindBox("bootstrap")
|
||||
err := b.Walk("", func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to walk box: %w", err)
|
||||
|
||||
if BootstrappersFile != "" {
|
||||
spi := b.MustString(BootstrappersFile)
|
||||
if spi == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(path, ".pi") {
|
||||
return nil
|
||||
}
|
||||
spi := b.MustString(path)
|
||||
if spi == "" {
|
||||
return nil
|
||||
}
|
||||
pi, err := addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n"))
|
||||
out = append(out, pi...)
|
||||
return err
|
||||
})
|
||||
return out, err
|
||||
return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n"))
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
4
build/bootstrap/calibnet.pi
Normal file
4
build/bootstrap/calibnet.pi
Normal file
@ -0,0 +1,4 @@
|
||||
/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWK1QYsm6iqyhgH7vqsbeoNoKHbT368h1JLHS1qYN36oyc
|
||||
/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWKDyJZoPsNak1iYNN1GGmvGnvhyVbWBL6iusYfP3RpgYs
|
||||
/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWJRSTnzABB6MYYEBbSTT52phQntVD1PpRTMh1xt9mh6yH
|
||||
/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWQLi3kY6HnMYLUtwCe26zWMdNhniFgHVNn1DioQc7NiWv
|
@ -14,7 +14,7 @@ func MaybeGenesis() []byte {
|
||||
log.Warnf("loading built-in genesis: %s", err)
|
||||
return nil
|
||||
}
|
||||
genBytes, err := builtinGen.Bytes("devnet.car")
|
||||
genBytes, err := builtinGen.Bytes(GenesisFile)
|
||||
if err != nil {
|
||||
log.Warnf("loading built-in genesis: %s", err)
|
||||
}
|
||||
|
BIN
build/genesis/calibnet.car
Normal file
BIN
build/genesis/calibnet.car
Normal file
Binary file not shown.
9
build/isnearupgrade.go
Normal file
9
build/isnearupgrade.go
Normal file
@ -0,0 +1,9 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool {
|
||||
return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality
|
||||
}
|
@ -3,13 +3,13 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
const BootstrappersFile = ""
|
||||
const GenesisFile = ""
|
||||
|
||||
const UpgradeBreezeHeight = -1
|
||||
const BreezeGasTampingDuration = 0
|
||||
|
||||
@ -18,12 +18,16 @@ const UpgradeIgnitionHeight = -2
|
||||
const UpgradeRefuelHeight = -3
|
||||
const UpgradeTapeHeight = -4
|
||||
|
||||
var UpgradeActorsV2Height = abi.ChainEpoch(10)
|
||||
var UpgradeLiftoffHeight = abi.ChainEpoch(-5)
|
||||
const UpgradeActorsV2Height = 10
|
||||
const UpgradeLiftoffHeight = -5
|
||||
|
||||
const UpgradeKumquatHeight = 15
|
||||
const UpgradeCalicoHeight = 20
|
||||
const UpgradePersianHeight = 25
|
||||
const UpgradeOrangeHeight = 27
|
||||
const UpgradeClausHeight = 30
|
||||
|
||||
const UpgradeActorsV3Height = 35
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -34,11 +38,6 @@ func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
|
||||
UpgradeActorsV2Height = math.MaxInt64
|
||||
UpgradeLiftoffHeight = 11
|
||||
}
|
||||
|
||||
BuildType |= Build2k
|
||||
}
|
||||
|
||||
|
70
build/params_calibnet.go
Normal file
70
build/params_calibnet.go
Normal file
@ -0,0 +1,70 @@
|
||||
// +build calibnet
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const BootstrappersFile = "calibnet.pi"
|
||||
const GenesisFile = "calibnet.car"
|
||||
|
||||
const UpgradeBreezeHeight = -1
|
||||
const BreezeGasTampingDuration = 120
|
||||
|
||||
const UpgradeSmokeHeight = -2
|
||||
|
||||
const UpgradeIgnitionHeight = -3
|
||||
const UpgradeRefuelHeight = -4
|
||||
|
||||
var UpgradeActorsV2Height = abi.ChainEpoch(30)
|
||||
|
||||
const UpgradeTapeHeight = 60
|
||||
|
||||
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
|
||||
// Miners, clients, developers, custodians all need time to prepare.
|
||||
// We still have upgrades and state changes to do, but can happen after signaling timing here.
|
||||
const UpgradeLiftoffHeight = -5
|
||||
|
||||
const UpgradeKumquatHeight = 90
|
||||
|
||||
const UpgradeCalicoHeight = 92000
|
||||
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
|
||||
|
||||
// 2020-12-17T19:00:00Z
|
||||
const UpgradeClausHeight = 161386
|
||||
|
||||
// 2021-01-17T19:00:00Z
|
||||
const UpgradeOrangeHeight = 250666
|
||||
|
||||
// 2021-01-28T21:00:00Z
|
||||
const UpgradeActorsV3Height = 282586
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 30))
|
||||
policy.SetSupportedProofTypes(
|
||||
abi.RegisteredSealProof_StackedDrg512MiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
)
|
||||
|
||||
SetAddressNetwork(address.Testnet)
|
||||
|
||||
Devnet = true
|
||||
|
||||
BuildType = BuildCalibnet
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 4
|
@ -1,11 +1,11 @@
|
||||
// +build !debug
|
||||
// +build !2k
|
||||
// +build !testground
|
||||
// +build !calibnet
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -19,7 +19,11 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
UpgradeSmokeHeight: DrandMainnet,
|
||||
}
|
||||
|
||||
const BootstrappersFile = "mainnet.pi"
|
||||
const GenesisFile = "mainnet.car"
|
||||
|
||||
const UpgradeBreezeHeight = 41280
|
||||
|
||||
const BreezeGasTampingDuration = 120
|
||||
|
||||
const UpgradeSmokeHeight = 51000
|
||||
@ -27,7 +31,7 @@ const UpgradeSmokeHeight = 51000
|
||||
const UpgradeIgnitionHeight = 94000
|
||||
const UpgradeRefuelHeight = 130800
|
||||
|
||||
var UpgradeActorsV2Height = abi.ChainEpoch(138720)
|
||||
const UpgradeActorsV2Height = 138720
|
||||
|
||||
const UpgradeTapeHeight = 140760
|
||||
|
||||
@ -41,6 +45,14 @@ const UpgradeKumquatHeight = 170000
|
||||
const UpgradeCalicoHeight = 265200
|
||||
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
|
||||
|
||||
const UpgradeOrangeHeight = 336458
|
||||
|
||||
// 2020-12-22T02:00:00Z
|
||||
const UpgradeClausHeight = 343200
|
||||
|
||||
// TODO
|
||||
const UpgradeActorsV3Height = 999999999
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
|
||||
|
||||
@ -48,11 +60,9 @@ func init() {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
|
||||
UpgradeActorsV2Height = math.MaxInt64
|
||||
}
|
||||
|
||||
Devnet = false
|
||||
|
||||
BuildType = BuildMainnet
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
|
||||
// Consensus / Network
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
const NewestNetworkVersion = network.Version8
|
||||
const NewestNetworkVersion = network.Version9
|
||||
const ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
// Epochs
|
||||
|
@ -90,16 +90,22 @@ var (
|
||||
UpgradeKumquatHeight abi.ChainEpoch = -6
|
||||
UpgradeCalicoHeight abi.ChainEpoch = -7
|
||||
UpgradePersianHeight abi.ChainEpoch = -8
|
||||
UpgradeOrangeHeight abi.ChainEpoch = -9
|
||||
UpgradeClausHeight abi.ChainEpoch = -10
|
||||
UpgradeActorsV3Height abi.ChainEpoch = -11
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
NewestNetworkVersion = network.Version8
|
||||
NewestNetworkVersion = network.Version9
|
||||
ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
Devnet = true
|
||||
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
|
||||
BootstrappersFile = ""
|
||||
GenesisFile = ""
|
||||
)
|
||||
|
||||
const BootstrapPeerThreshold = 1
|
||||
|
@ -10,26 +10,32 @@ var CurrentCommit string
|
||||
var BuildType int
|
||||
|
||||
const (
|
||||
BuildDefault = 0
|
||||
Build2k = 0x1
|
||||
BuildDebug = 0x3
|
||||
BuildDefault = 0
|
||||
BuildMainnet = 0x1
|
||||
Build2k = 0x2
|
||||
BuildDebug = 0x3
|
||||
BuildCalibnet = 0x4
|
||||
)
|
||||
|
||||
func buildType() string {
|
||||
switch BuildType {
|
||||
case BuildDefault:
|
||||
return ""
|
||||
case BuildDebug:
|
||||
return "+debug"
|
||||
case BuildMainnet:
|
||||
return "+mainnet"
|
||||
case Build2k:
|
||||
return "+2k"
|
||||
case BuildDebug:
|
||||
return "+debug"
|
||||
case BuildCalibnet:
|
||||
return "+calibnet"
|
||||
default:
|
||||
return "+huh?"
|
||||
}
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version, set by build system
|
||||
const BuildVersion = "1.2.2"
|
||||
const BuildVersion = "1.4.1"
|
||||
|
||||
func UserVersion() string {
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
@ -84,7 +90,7 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion = newVer(1, 0, 0)
|
||||
MinerAPIVersion = newVer(1, 0, 0)
|
||||
MinerAPIVersion = newVer(1, 0, 1)
|
||||
WorkerAPIVersion = newVer(1, 0, 0)
|
||||
)
|
||||
|
||||
|
@ -2,16 +2,9 @@ package adt
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
|
||||
)
|
||||
|
||||
type Map interface {
|
||||
@ -24,26 +17,6 @@ type Map interface {
|
||||
ForEach(v cbor.Unmarshaler, fn func(key string) error) error
|
||||
}
|
||||
|
||||
func AsMap(store Store, root cid.Cid, version actors.Version) (Map, error) {
|
||||
switch version {
|
||||
case actors.Version0:
|
||||
return adt0.AsMap(store, root)
|
||||
case actors.Version2:
|
||||
return adt2.AsMap(store, root)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
func NewMap(store Store, version actors.Version) (Map, error) {
|
||||
switch version {
|
||||
case actors.Version0:
|
||||
return adt0.MakeEmptyMap(store), nil
|
||||
case actors.Version2:
|
||||
return adt2.MakeEmptyMap(store), nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
type Array interface {
|
||||
Root() (cid.Cid, error)
|
||||
|
||||
@ -54,23 +27,3 @@ type Array interface {
|
||||
|
||||
ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error
|
||||
}
|
||||
|
||||
func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) {
|
||||
switch actors.VersionForNetwork(version) {
|
||||
case actors.Version0:
|
||||
return adt0.AsArray(store, root)
|
||||
case actors.Version2:
|
||||
return adt2.AsArray(store, root)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
func NewArray(store Store, version actors.Version) (Array, error) {
|
||||
switch version {
|
||||
case actors.Version0:
|
||||
return adt0.MakeEmptyArray(store), nil
|
||||
case actors.Version2:
|
||||
return adt2.MakeEmptyArray(store), nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -22,9 +23,12 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var Methods = builtin2.MethodsAccount
|
||||
var Methods = builtin3.MethodsAccount
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch act.Code {
|
||||
@ -32,6 +36,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.AccountActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.AccountActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
30
chain/actors/builtin/account/v3.go
Normal file
30
chain/actors/builtin/account/v3.go
Normal file
@ -0,0 +1,30 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
account3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) PubkeyAddress() (address.Address, error) {
|
||||
return s.Address, nil
|
||||
}
|
@ -2,12 +2,12 @@ package builtin
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
@ -15,9 +15,12 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
|
||||
smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing"
|
||||
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||
smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
)
|
||||
|
||||
var SystemActorAddr = builtin0.SystemActorAddr
|
||||
@ -38,11 +41,12 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
MethodSend = builtin2.MethodSend
|
||||
MethodConstructor = builtin2.MethodConstructor
|
||||
MethodSend = builtin3.MethodSend
|
||||
MethodConstructor = builtin3.MethodConstructor
|
||||
)
|
||||
|
||||
// TODO: Why does actors have 2 different versions of this?
|
||||
// These are all just type aliases across actor versions 0, 2, & 3. In the future, that might change
|
||||
// and we might need to do something fancier.
|
||||
type SectorInfo = proof0.SectorInfo
|
||||
type PoStProof = proof0.PoStProof
|
||||
type FilterEstimate = smoothing0.FilterEstimate
|
||||
@ -51,13 +55,17 @@ func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v0)
|
||||
}
|
||||
|
||||
// Doesn't change between actors v0 and v1
|
||||
// Doesn't change between actors v0, v2, and v3.
|
||||
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
||||
return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
}
|
||||
|
||||
func FromV2FilterEstimate(v1 smoothing2.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v1)
|
||||
func FromV2FilterEstimate(v2 smoothing2.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v2)
|
||||
}
|
||||
|
||||
func FromV3FilterEstimate(v3 smoothing3.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v3)
|
||||
}
|
||||
|
||||
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
|
||||
@ -82,30 +90,42 @@ func ActorNameByCode(c cid.Cid) string {
|
||||
return builtin0.ActorNameByCode(c)
|
||||
case builtin2.IsBuiltinActor(c):
|
||||
return builtin2.ActorNameByCode(c)
|
||||
case builtin3.IsBuiltinActor(c):
|
||||
return builtin3.ActorNameByCode(c)
|
||||
default:
|
||||
return "<unknown>"
|
||||
}
|
||||
}
|
||||
|
||||
func IsBuiltinActor(c cid.Cid) bool {
|
||||
return builtin0.IsBuiltinActor(c) || builtin2.IsBuiltinActor(c)
|
||||
return builtin0.IsBuiltinActor(c) ||
|
||||
builtin2.IsBuiltinActor(c) ||
|
||||
builtin3.IsBuiltinActor(c)
|
||||
}
|
||||
|
||||
func IsAccountActor(c cid.Cid) bool {
|
||||
return c == builtin0.AccountActorCodeID || c == builtin2.AccountActorCodeID
|
||||
return c == builtin0.AccountActorCodeID ||
|
||||
c == builtin2.AccountActorCodeID ||
|
||||
c == builtin3.AccountActorCodeID
|
||||
}
|
||||
|
||||
func IsStorageMinerActor(c cid.Cid) bool {
|
||||
return c == builtin0.StorageMinerActorCodeID || c == builtin2.StorageMinerActorCodeID
|
||||
return c == builtin0.StorageMinerActorCodeID ||
|
||||
c == builtin2.StorageMinerActorCodeID ||
|
||||
c == builtin3.StorageMinerActorCodeID
|
||||
}
|
||||
|
||||
func IsMultisigActor(c cid.Cid) bool {
|
||||
return c == builtin0.MultisigActorCodeID || c == builtin2.MultisigActorCodeID
|
||||
return c == builtin0.MultisigActorCodeID ||
|
||||
c == builtin2.MultisigActorCodeID ||
|
||||
c == builtin3.MultisigActorCodeID
|
||||
|
||||
}
|
||||
|
||||
func IsPaymentChannelActor(c cid.Cid) bool {
|
||||
return c == builtin0.PaymentChannelActorCodeID || c == builtin2.PaymentChannelActorCodeID
|
||||
return c == builtin0.PaymentChannelActorCodeID ||
|
||||
c == builtin2.PaymentChannelActorCodeID ||
|
||||
c == builtin3.PaymentChannelActorCodeID
|
||||
}
|
||||
|
||||
func makeAddress(addr string) address.Address {
|
||||
|
@ -1,10 +1,10 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin2.CronActorAddr
|
||||
Methods = builtin2.MethodsCron
|
||||
Address = builtin3.CronActorAddr
|
||||
Methods = builtin3.MethodsCron
|
||||
)
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -24,11 +25,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.InitActorAddr
|
||||
Methods = builtin2.MethodsInit
|
||||
Address = builtin3.InitActorAddr
|
||||
Methods = builtin3.MethodsInit
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -37,6 +41,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.InitActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.InitActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
86
chain/actors/builtin/init/v3.go
Normal file
86
chain/actors/builtin/init/v3.go
Normal file
@ -0,0 +1,86 @@
|
||||
package init
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
|
||||
init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
init3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) ResolveAddress(address address.Address) (address.Address, bool, error) {
|
||||
return s.State.ResolveAddress(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state3) MapAddressToNewID(address address.Address) (address.Address, error) {
|
||||
return s.State.MapAddressToNewID(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state3) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
|
||||
addrs, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var actorID cbg.CborInt
|
||||
return addrs.ForEach(&actorID, func(key string) error {
|
||||
addr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(abi.ActorID(actorID), addr)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) NetworkName() (dtypes.NetworkName, error) {
|
||||
return dtypes.NetworkName(s.State.NetworkName), nil
|
||||
}
|
||||
|
||||
func (s *state3) SetNetworkName(name string) error {
|
||||
s.State.NetworkName = name
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state3) Remove(addrs ...address.Address) (err error) {
|
||||
m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if err = m.Delete(abi.AddrKey(addr)); err != nil {
|
||||
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
|
||||
}
|
||||
}
|
||||
amr, err := m.Root()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get address map root: %w", err)
|
||||
}
|
||||
s.State.AddressMap = amr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state3) addressMap() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.AddressMap, builtin3.DefaultHamtBitwidth)
|
||||
}
|
@ -12,6 +12,7 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -25,11 +26,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.StorageMarketActorAddr
|
||||
Methods = builtin2.MethodsMarket
|
||||
Address = builtin3.StorageMarketActorAddr
|
||||
Methods = builtin3.MethodsMarket
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
@ -38,6 +42,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.StorageMarketActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.StorageMarketActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
205
chain/actors/builtin/market/v3.go
Normal file
205
chain/actors/builtin/market/v3.go
Normal file
@ -0,0 +1,205 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
market3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) TotalLocked() (abi.TokenAmount, error) {
|
||||
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
|
||||
fml = types.BigAdd(fml, s.TotalClientStorageFee)
|
||||
return fml, nil
|
||||
}
|
||||
|
||||
func (s *state3) BalancesChanged(otherState State) (bool, error) {
|
||||
otherState2, ok := otherState.(*state3)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.EscrowTable.Equals(otherState2.State.EscrowTable) || !s.State.LockedTable.Equals(otherState2.State.LockedTable), nil
|
||||
}
|
||||
|
||||
func (s *state3) StatesChanged(otherState State) (bool, error) {
|
||||
otherState2, ok := otherState.(*state3)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.States.Equals(otherState2.State.States), nil
|
||||
}
|
||||
|
||||
func (s *state3) States() (DealStates, error) {
|
||||
stateArray, err := adt3.AsArray(s.store, s.State.States, market3.StatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealStates3{stateArray}, nil
|
||||
}
|
||||
|
||||
func (s *state3) ProposalsChanged(otherState State) (bool, error) {
|
||||
otherState2, ok := otherState.(*state3)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Proposals.Equals(otherState2.State.Proposals), nil
|
||||
}
|
||||
|
||||
func (s *state3) Proposals() (DealProposals, error) {
|
||||
proposalArray, err := adt3.AsArray(s.store, s.State.Proposals, market3.ProposalsAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealProposals3{proposalArray}, nil
|
||||
}
|
||||
|
||||
func (s *state3) EscrowTable() (BalanceTable, error) {
|
||||
bt, err := adt3.AsBalanceTable(s.store, s.State.EscrowTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable3{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state3) LockedTable() (BalanceTable, error) {
|
||||
bt, err := adt3.AsBalanceTable(s.store, s.State.LockedTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable3{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state3) VerifyDealsForActivation(
|
||||
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
||||
) (weight, verifiedWeight abi.DealWeight, err error) {
|
||||
w, vw, _, err := market3.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
|
||||
return w, vw, err
|
||||
}
|
||||
|
||||
type balanceTable3 struct {
|
||||
*adt3.BalanceTable
|
||||
}
|
||||
|
||||
func (bt *balanceTable3) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
|
||||
asMap := (*adt3.Map)(bt.BalanceTable)
|
||||
var ta abi.TokenAmount
|
||||
return asMap.ForEach(&ta, func(key string) error {
|
||||
a, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, ta)
|
||||
})
|
||||
}
|
||||
|
||||
type dealStates3 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) {
|
||||
var deal2 market3.DealState
|
||||
found, err := s.Array.Get(uint64(dealID), &deal2)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
deal := fromV3DealState(deal2)
|
||||
return &deal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
|
||||
var ds1 market3.DealState
|
||||
return s.Array.ForEach(&ds1, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV3DealState(ds1))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) {
|
||||
var ds1 market3.DealState
|
||||
if err := ds1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds := fromV3DealState(ds1)
|
||||
return &ds, nil
|
||||
}
|
||||
|
||||
func (s *dealStates3) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV3DealState(v1 market3.DealState) DealState {
|
||||
return (DealState)(v1)
|
||||
}
|
||||
|
||||
type dealProposals3 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealProposals3) Get(dealID abi.DealID) (*DealProposal, bool, error) {
|
||||
var proposal2 market3.DealProposal
|
||||
found, err := s.Array.Get(uint64(dealID), &proposal2)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
proposal := fromV3DealProposal(proposal2)
|
||||
return &proposal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals3) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
|
||||
var dp1 market3.DealProposal
|
||||
return s.Array.ForEach(&dp1, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV3DealProposal(dp1))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealProposals3) decode(val *cbg.Deferred) (*DealProposal, error) {
|
||||
var dp1 market3.DealProposal
|
||||
if err := dp1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dp := fromV3DealProposal(dp1)
|
||||
return &dp, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals3) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV3DealProposal(v1 market3.DealProposal) DealProposal {
|
||||
return (DealProposal)(v1)
|
||||
}
|
@ -2,6 +2,7 @@ package miner
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
@ -20,6 +21,9 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -29,11 +33,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var Methods = builtin2.MethodsMiner
|
||||
var Methods = builtin3.MethodsMiner
|
||||
|
||||
// Unchanged between v0 and v2 actors
|
||||
// Unchanged between v0, v2, and v3 actors
|
||||
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
|
||||
var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines
|
||||
var WPoStChallengeWindow = miner0.WPoStChallengeWindow
|
||||
@ -42,12 +49,18 @@ var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff
|
||||
|
||||
const MinSectorExpiration = miner0.MinSectorExpiration
|
||||
|
||||
// Not used / checked in v0
|
||||
var DeclarationsMax = miner2.DeclarationsMax
|
||||
var AddressedSectorsMax = miner2.AddressedSectorsMax
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
switch act.Code {
|
||||
case builtin0.StorageMinerActorCodeID:
|
||||
return load0(store, act.Head)
|
||||
case builtin2.StorageMinerActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.StorageMinerActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -91,9 +104,10 @@ type State interface {
|
||||
type Deadline interface {
|
||||
LoadPartition(idx uint64) (Partition, error)
|
||||
ForEachPartition(cb func(idx uint64, part Partition) error) error
|
||||
PostSubmissions() (bitfield.BitField, error)
|
||||
PartitionsPoSted() (bitfield.BitField, error)
|
||||
|
||||
PartitionsChanged(Deadline) (bool, error)
|
||||
DisputableProofCount() (uint64, error)
|
||||
}
|
||||
|
||||
type Partition interface {
|
||||
@ -137,6 +151,60 @@ type DeclareFaultsParams = miner0.DeclareFaultsParams
|
||||
type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
|
||||
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
|
||||
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
|
||||
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
|
||||
|
||||
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
|
||||
// We added support for the new proofs in network version 7, and removed support for the old
|
||||
// ones in network version 8.
|
||||
if nver < network.Version7 {
|
||||
switch proof {
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
|
||||
}
|
||||
}
|
||||
|
||||
switch proof {
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
|
||||
}
|
||||
}
|
||||
|
||||
func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) {
|
||||
switch proof {
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("unknown proof type %d", proof)
|
||||
}
|
||||
}
|
||||
|
||||
type MinerInfo struct {
|
||||
Owner address.Address // Must be an ID-address.
|
||||
@ -146,7 +214,7 @@ type MinerInfo struct {
|
||||
WorkerChangeEpoch abi.ChainEpoch
|
||||
PeerId *peer.ID
|
||||
Multiaddrs []abi.Multiaddrs
|
||||
SealProofType abi.RegisteredSealProof
|
||||
WindowPoStProofType abi.RegisteredPoStProof
|
||||
SectorSize abi.SectorSize
|
||||
WindowPoStPartitionSectors uint64
|
||||
ConsensusFaultElapsed abi.ChainEpoch
|
||||
|
@ -297,6 +297,11 @@ func (s *state0) Info() (MinerInfo, error) {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
wpp, err := info.SealProofType.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
@ -307,7 +312,7 @@ func (s *state0) Info() (MinerInfo, error) {
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
SealProofType: info.SealProofType,
|
||||
WindowPoStProofType: wpp,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: -1,
|
||||
@ -382,10 +387,15 @@ func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) {
|
||||
return !d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline0) PostSubmissions() (bitfield.BitField, error) {
|
||||
func (d *deadline0) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PostSubmissions, nil
|
||||
}
|
||||
|
||||
func (d *deadline0) DisputableProofCount() (uint64, error) {
|
||||
// field doesn't exist until v3
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (p *partition0) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
@ -296,6 +296,11 @@ func (s *state2) Info() (MinerInfo, error) {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
wpp, err := info.SealProofType.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
@ -306,7 +311,7 @@ func (s *state2) Info() (MinerInfo, error) {
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
SealProofType: info.SealProofType,
|
||||
WindowPoStProofType: wpp,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
|
||||
@ -381,10 +386,15 @@ func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) {
|
||||
return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline2) PostSubmissions() (bitfield.BitField, error) {
|
||||
func (d *deadline2) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PostSubmissions, nil
|
||||
}
|
||||
|
||||
func (d *deadline2) DisputableProofCount() (uint64, error) {
|
||||
// field doesn't exist until v3
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (p *partition2) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
434
chain/actors/builtin/miner/v3.go
Normal file
434
chain/actors/builtin/miner/v3.go
Normal file
@ -0,0 +1,434 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
miner3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type deadline3 struct {
|
||||
miner3.Deadline
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type partition3 struct {
|
||||
miner3.Partition
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = xerrors.Errorf("failed to get available balance: %w", r)
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
||||
func (s *state3) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.CheckVestedFunds(s.store, epoch)
|
||||
}
|
||||
|
||||
func (s *state3) LockedFunds() (LockedFunds, error) {
|
||||
return LockedFunds{
|
||||
VestingFunds: s.State.LockedFunds,
|
||||
InitialPledgeRequirement: s.State.InitialPledge,
|
||||
PreCommitDeposits: s.State.PreCommitDeposits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) FeeDebt() (abi.TokenAmount, error) {
|
||||
return s.State.FeeDebt, nil
|
||||
}
|
||||
|
||||
func (s *state3) InitialPledge() (abi.TokenAmount, error) {
|
||||
return s.State.InitialPledge, nil
|
||||
}
|
||||
|
||||
func (s *state3) PreCommitDeposits() (abi.TokenAmount, error) {
|
||||
return s.State.PreCommitDeposits, nil
|
||||
}
|
||||
|
||||
func (s *state3) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV3SectorOnChainInfo(*info)
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state3) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
|
||||
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SectorLocation{
|
||||
Deadline: dlIdx,
|
||||
Partition: partIdx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) NumLiveSectors() (uint64, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var total uint64
|
||||
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
|
||||
total += dl.LiveSectors
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// GetSectorExpiration returns the effective expiration of the given sector.
|
||||
//
|
||||
// If the sector does not expire early, the Early expiration field is 0.
|
||||
func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: this can be optimized significantly.
|
||||
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
||||
// learned from the sector info), or in the next quantized expiration
|
||||
// epoch (i.e., the first element in the partition's expiration queue.
|
||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||
// of the expiration queue.
|
||||
stopErr := errors.New("stop")
|
||||
out := SectorExpiration{}
|
||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
|
||||
partitions, err := dl.PartitionsArray(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quant := s.State.QuantSpecForDeadline(dlIdx)
|
||||
var part miner3.Partition
|
||||
return partitions.ForEach(&part, func(partIdx int64) error {
|
||||
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if !found {
|
||||
return nil
|
||||
}
|
||||
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if found {
|
||||
// already terminated
|
||||
return stopErr
|
||||
}
|
||||
|
||||
q, err := miner3.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner3.PartitionExpirationAmtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var exp miner3.ExpirationSet
|
||||
return q.ForEach(&exp, func(epoch int64) error {
|
||||
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if early {
|
||||
out.Early = abi.ChainEpoch(epoch)
|
||||
return nil
|
||||
}
|
||||
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if onTime {
|
||||
out.OnTime = abi.ChainEpoch(epoch)
|
||||
return stopErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
if err == stopErr {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if out.Early == 0 && out.OnTime == 0 {
|
||||
return nil, xerrors.Errorf("failed to find sector %d", num)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV3SectorPreCommitOnChainInfo(*info)
|
||||
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
|
||||
sectors, err := miner3.LoadSectors(s.store, s.State.Sectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If no sector numbers are specified, load all.
|
||||
if snos == nil {
|
||||
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
|
||||
var info2 miner3.SectorOnChainInfo
|
||||
if err := sectors.ForEach(&info2, func(_ int64) error {
|
||||
info := fromV3SectorOnChainInfo(info2)
|
||||
infos = append(infos, &info)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
// Otherwise, load selected.
|
||||
infos2, err := sectors.Load(*snos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos := make([]*SectorOnChainInfo, len(infos2))
|
||||
for i, info2 := range infos2 {
|
||||
info := fromV3SectorOnChainInfo(*info2)
|
||||
infos[i] = &info
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) {
|
||||
var allocatedSectors bitfield.BitField
|
||||
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return allocatedSectors.IsSet(uint64(num))
|
||||
}
|
||||
|
||||
func (s *state3) LoadDeadline(idx uint64) (Deadline, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dl, err := dls.LoadDeadline(s.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &deadline3{*dl, s.store}, nil
|
||||
}
|
||||
|
||||
func (s *state3) ForEachDeadline(cb func(uint64, Deadline) error) error {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dls.ForEach(s.store, func(i uint64, dl *miner3.Deadline) error {
|
||||
return cb(i, &deadline3{*dl, s.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) NumDeadlines() (uint64, error) {
|
||||
return miner3.WPoStPeriodDeadlines, nil
|
||||
}
|
||||
|
||||
func (s *state3) DeadlinesChanged(other State) (bool, error) {
|
||||
other2, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !s.State.Deadlines.Equals(other2.Deadlines), nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerInfoChanged(other State) (bool, error) {
|
||||
other0, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Info.Equals(other0.State.Info), nil
|
||||
}
|
||||
|
||||
func (s *state3) Info() (MinerInfo, error) {
|
||||
info, err := s.State.GetInfo(s.store)
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
var pid *peer.ID
|
||||
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
ControlAddresses: info.ControlAddresses,
|
||||
|
||||
NewWorker: address.Undef,
|
||||
WorkerChangeEpoch: -1,
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
WindowPoStProofType: info.WindowPoStProofType,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
|
||||
}
|
||||
|
||||
if info.PendingWorkerKey != nil {
|
||||
mi.NewWorker = info.PendingWorkerKey.NewWorker
|
||||
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
|
||||
}
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func (s *state3) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
|
||||
return s.State.DeadlineInfo(epoch), nil
|
||||
}
|
||||
|
||||
func (s *state3) sectors() (adt.Array, error) {
|
||||
return adt3.AsArray(s.store, s.Sectors, miner3.SectorsAmtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
|
||||
var si miner3.SectorOnChainInfo
|
||||
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV3SectorOnChainInfo(si), nil
|
||||
}
|
||||
|
||||
func (s *state3) precommits() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.PreCommittedSectors, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
|
||||
var sp miner3.SectorPreCommitOnChainInfo
|
||||
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorPreCommitOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV3SectorPreCommitOnChainInfo(sp), nil
|
||||
}
|
||||
|
||||
func (d *deadline3) LoadPartition(idx uint64) (Partition, error) {
|
||||
p, err := d.Deadline.LoadPartition(d.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &partition3{*p, d.store}, nil
|
||||
}
|
||||
|
||||
func (d *deadline3) ForEachPartition(cb func(uint64, Partition) error) error {
|
||||
ps, err := d.Deadline.PartitionsArray(d.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var part miner3.Partition
|
||||
return ps.ForEach(&part, func(i int64) error {
|
||||
return cb(uint64(i), &partition3{part, d.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (d *deadline3) PartitionsChanged(other Deadline) (bool, error) {
|
||||
other2, ok := other.(*deadline3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline3) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PartitionsPoSted, nil
|
||||
}
|
||||
|
||||
func (d *deadline3) DisputableProofCount() (uint64, error) {
|
||||
ops, err := d.OptimisticProofsSnapshotArray(d.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return ops.Length(), nil
|
||||
}
|
||||
|
||||
func (p *partition3) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
||||
func (p *partition3) FaultySectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Faults, nil
|
||||
}
|
||||
|
||||
func (p *partition3) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
||||
return SectorOnChainInfo{
|
||||
SectorNumber: v3.SectorNumber,
|
||||
SealProof: v3.SealProof,
|
||||
SealedCID: v3.SealedCID,
|
||||
DealIDs: v3.DealIDs,
|
||||
Activation: v3.Activation,
|
||||
Expiration: v3.Expiration,
|
||||
DealWeight: v3.DealWeight,
|
||||
VerifiedDealWeight: v3.VerifiedDealWeight,
|
||||
InitialPledge: v3.InitialPledge,
|
||||
ExpectedDayReward: v3.ExpectedDayReward,
|
||||
ExpectedStoragePledge: v3.ExpectedStoragePledge,
|
||||
}
|
||||
}
|
||||
|
||||
func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||
return SectorPreCommitOnChainInfo{
|
||||
Info: (SectorPreCommitInfo)(v3.Info),
|
||||
PreCommitDeposit: v3.PreCommitDeposit,
|
||||
PreCommitEpoch: v3.PreCommitEpoch,
|
||||
DealWeight: v3.DealWeight,
|
||||
VerifiedDealWeight: v3.VerifiedDealWeight,
|
||||
}
|
||||
}
|
@ -9,14 +9,14 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Methods = builtin2.MethodsMultisig
|
||||
var Methods = builtin3.MethodsMultisig
|
||||
|
||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
switch version {
|
||||
@ -24,6 +24,8 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
return message0{from}
|
||||
case actors.Version2:
|
||||
return message2{message0{from}}
|
||||
case actors.Version3:
|
||||
return message3{message0{from}}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
@ -47,11 +49,11 @@ type MessageBuilder interface {
|
||||
}
|
||||
|
||||
// this type is the same between v0 and v2
|
||||
type ProposalHashData = multisig2.ProposalHashData
|
||||
type ProposeReturn = multisig2.ProposeReturn
|
||||
type ProposalHashData = multisig3.ProposalHashData
|
||||
type ProposeReturn = multisig3.ProposeReturn
|
||||
|
||||
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
|
||||
params := multisig2.TxnIDParams{ID: multisig2.TxnID(id)}
|
||||
params := multisig3.TxnIDParams{ID: multisig3.TxnID(id)}
|
||||
if data != nil {
|
||||
if data.Requester.Protocol() != address.ID {
|
||||
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
|
||||
|
71
chain/actors/builtin/multisig/message3.go
Normal file
71
chain/actors/builtin/multisig/message3.go
Normal file
@ -0,0 +1,71 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
|
||||
multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message3 struct{ message0 }
|
||||
|
||||
func (m message3) Create(
|
||||
signers []address.Address, threshold uint64,
|
||||
unlockStart, unlockDuration abi.ChainEpoch,
|
||||
initialAmount abi.TokenAmount,
|
||||
) (*types.Message, error) {
|
||||
|
||||
lenAddrs := uint64(len(signers))
|
||||
|
||||
if lenAddrs < threshold {
|
||||
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
|
||||
}
|
||||
|
||||
if threshold == 0 {
|
||||
threshold = lenAddrs
|
||||
}
|
||||
|
||||
if m.from == address.Undef {
|
||||
return nil, xerrors.Errorf("must provide source address")
|
||||
}
|
||||
|
||||
// Set up constructor parameters for multisig
|
||||
msigParams := &multisig3.ConstructorParams{
|
||||
Signers: signers,
|
||||
NumApprovalsThreshold: threshold,
|
||||
UnlockDuration: unlockDuration,
|
||||
StartEpoch: unlockStart,
|
||||
}
|
||||
|
||||
enc, actErr := actors.SerializeParams(msigParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init3.ExecParams{
|
||||
CodeCID: builtin3.MultisigActorCodeID,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
||||
enc, actErr = actors.SerializeParams(execParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Method: builtin3.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
Value: initialAmount,
|
||||
}, nil
|
||||
}
|
@ -12,6 +12,7 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -25,6 +26,9 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -33,6 +37,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.MultisigActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.MultisigActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
multisig0 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
95
chain/actors/builtin/multisig/state3.go
Normal file
95
chain/actors/builtin/multisig/state3.go
Normal file
@ -0,0 +1,95 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
msig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
msig3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
|
||||
}
|
||||
|
||||
func (s *state3) StartEpoch() (abi.ChainEpoch, error) {
|
||||
return s.State.StartEpoch, nil
|
||||
}
|
||||
|
||||
func (s *state3) UnlockDuration() (abi.ChainEpoch, error) {
|
||||
return s.State.UnlockDuration, nil
|
||||
}
|
||||
|
||||
func (s *state3) InitialBalance() (abi.TokenAmount, error) {
|
||||
return s.State.InitialBalance, nil
|
||||
}
|
||||
|
||||
func (s *state3) Threshold() (uint64, error) {
|
||||
return s.State.NumApprovalsThreshold, nil
|
||||
}
|
||||
|
||||
func (s *state3) Signers() ([]address.Address, error) {
|
||||
return s.State.Signers, nil
|
||||
}
|
||||
|
||||
func (s *state3) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
|
||||
arr, err := adt3.AsMap(s.store, s.State.PendingTxns, builtin3.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var out msig3.Transaction
|
||||
return arr.ForEach(&out, func(key string) error {
|
||||
txid, n := binary.Varint([]byte(key))
|
||||
if n <= 0 {
|
||||
return xerrors.Errorf("invalid pending transaction key: %v", key)
|
||||
}
|
||||
return cb(txid, (Transaction)(out))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) PendingTxnChanged(other State) (bool, error) {
|
||||
other2, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.PendingTxns.Equals(other2.PendingTxns), nil
|
||||
}
|
||||
|
||||
func (s *state3) transactions() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.PendingTxns, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
|
||||
var tx msig3.Transaction
|
||||
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Transaction{}, err
|
||||
}
|
||||
return tx, nil
|
||||
}
|
@ -8,10 +8,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
var Methods = builtin2.MethodsPaych
|
||||
var Methods = builtin3.MethodsPaych
|
||||
|
||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
switch version {
|
||||
@ -19,6 +19,8 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
return message0{from}
|
||||
case actors.Version2:
|
||||
return message2{from}
|
||||
case actors.Version3:
|
||||
return message3{from}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
|
74
chain/actors/builtin/paych/message3.go
Normal file
74
chain/actors/builtin/paych/message3.go
Normal file
@ -0,0 +1,74 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
|
||||
paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message3 struct{ from address.Address }
|
||||
|
||||
func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych3.ConstructorParams{From: m.from, To: to})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
enc, aerr := actors.SerializeParams(&init3.ExecParams{
|
||||
CodeCID: builtin3.PaymentChannelActorCodeID,
|
||||
ConstructorParams: params,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Value: initialAmount,
|
||||
Method: builtin3.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin3.MethodsPaych.UpdateChannelState,
|
||||
Params: params,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message3) Settle(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin3.MethodsPaych.Settle,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message3) Collect(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin3.MethodsPaych.Collect,
|
||||
}, nil
|
||||
}
|
@ -15,6 +15,7 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -28,6 +29,9 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
||||
@ -37,6 +41,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.PaymentChannelActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.PaymentChannelActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
104
chain/actors/builtin/paych/state3.go
Normal file
104
chain/actors/builtin/paych/state3.go
Normal file
@ -0,0 +1,104 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
paych3.State
|
||||
store adt.Store
|
||||
lsAmt *adt3.Array
|
||||
}
|
||||
|
||||
// Channel owner, who has funded the actor
|
||||
func (s *state3) From() (address.Address, error) {
|
||||
return s.State.From, nil
|
||||
}
|
||||
|
||||
// Recipient of payouts from channel
|
||||
func (s *state3) To() (address.Address, error) {
|
||||
return s.State.To, nil
|
||||
}
|
||||
|
||||
// Height at which the channel can be `Collected`
|
||||
func (s *state3) SettlingAt() (abi.ChainEpoch, error) {
|
||||
return s.State.SettlingAt, nil
|
||||
}
|
||||
|
||||
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||
func (s *state3) ToSend() (abi.TokenAmount, error) {
|
||||
return s.State.ToSend, nil
|
||||
}
|
||||
|
||||
func (s *state3) getOrLoadLsAmt() (*adt3.Array, error) {
|
||||
if s.lsAmt != nil {
|
||||
return s.lsAmt, nil
|
||||
}
|
||||
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := adt3.AsArray(s.store, s.State.LaneStates, paych3.LaneStatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.lsAmt = lsamt
|
||||
return lsamt, nil
|
||||
}
|
||||
|
||||
// Get total number of lanes
|
||||
func (s *state3) LaneCount() (uint64, error) {
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lsamt.Length(), nil
|
||||
}
|
||||
|
||||
// Iterate lane states
|
||||
func (s *state3) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: we use a map instead of an array to store laneStates because the
|
||||
// client sets the lane ID (the index) and potentially they could use a
|
||||
// very large index.
|
||||
var ls paych3.LaneState
|
||||
return lsamt.ForEach(&ls, func(i int64) error {
|
||||
return cb(uint64(i), &laneState3{ls})
|
||||
})
|
||||
}
|
||||
|
||||
type laneState3 struct {
|
||||
paych3.LaneState
|
||||
}
|
||||
|
||||
func (ls *laneState3) Redeemed() (big.Int, error) {
|
||||
return ls.LaneState.Redeemed, nil
|
||||
}
|
||||
|
||||
func (ls *laneState3) Nonce() (uint64, error) {
|
||||
return ls.LaneState.Nonce, nil
|
||||
}
|
@ -16,6 +16,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -25,11 +26,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.StoragePowerActorAddr
|
||||
Methods = builtin2.MethodsPower
|
||||
Address = builtin3.StoragePowerActorAddr
|
||||
Methods = builtin3.MethodsPower
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
@ -38,6 +42,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.StoragePowerActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.StoragePowerActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
149
chain/actors/builtin/power/v3.go
Normal file
149
chain/actors/builtin/power/v3.go
Normal file
@ -0,0 +1,149 @@
|
||||
package power
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
power3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) TotalLocked() (abi.TokenAmount, error) {
|
||||
return s.TotalPledgeCollateral, nil
|
||||
}
|
||||
|
||||
func (s *state3) TotalPower() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalRawBytePower,
|
||||
QualityAdjPower: s.TotalQualityAdjPower,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Committed power to the network. Includes miners below the minimum threshold.
|
||||
func (s *state3) TotalCommitted() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalBytesCommitted,
|
||||
QualityAdjPower: s.TotalQABytesCommitted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerPower(addr address.Address) (Claim, bool, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
var claim power3.Claim
|
||||
ok, err := claims.Get(abi.AddrKey(addr), &claim)
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
return Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
}, ok, nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
|
||||
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
|
||||
}
|
||||
|
||||
func (s *state3) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FromV3FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerCounts() (uint64, uint64, error) {
|
||||
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
|
||||
}
|
||||
|
||||
func (s *state3) ListAllMiners() ([]address.Address, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var miners []address.Address
|
||||
err = claims.ForEach(nil, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
miners = append(miners, a)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return miners, nil
|
||||
}
|
||||
|
||||
func (s *state3) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var claim power3.Claim
|
||||
return claims.ForEach(&claim, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) ClaimsChanged(other State) (bool, error) {
|
||||
other2, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Claims.Equals(other2.State.Claims), nil
|
||||
}
|
||||
|
||||
func (s *state3) claims() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.Claims, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeClaim(val *cbg.Deferred) (Claim, error) {
|
||||
var ci power3.Claim
|
||||
if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Claim{}, err
|
||||
}
|
||||
return fromV3Claim(ci), nil
|
||||
}
|
||||
|
||||
func fromV3Claim(v3 power3.Claim) Claim {
|
||||
return Claim{
|
||||
RawBytePower: v3.RawBytePower,
|
||||
QualityAdjPower: v3.QualityAdjPower,
|
||||
}
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -22,11 +23,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.RewardActorAddr
|
||||
Methods = builtin2.MethodsReward
|
||||
Address = builtin3.RewardActorAddr
|
||||
Methods = builtin3.MethodsReward
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
@ -35,6 +39,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.RewardActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.RewardActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
86
chain/actors/builtin/reward/v3.go
Normal file
86
chain/actors/builtin/reward/v3.go
Normal file
@ -0,0 +1,86 @@
|
||||
package reward
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
reward3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/reward"
|
||||
smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
reward3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) ThisEpochReward() (abi.TokenAmount, error) {
|
||||
return s.State.ThisEpochReward, nil
|
||||
}
|
||||
|
||||
func (s *state3) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FilterEstimate{
|
||||
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
|
||||
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) ThisEpochBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.ThisEpochBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state3) TotalStoragePowerReward() (abi.TokenAmount, error) {
|
||||
return s.State.TotalStoragePowerReward, nil
|
||||
}
|
||||
|
||||
func (s *state3) EffectiveBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.EffectiveBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state3) EffectiveNetworkTime() (abi.ChainEpoch, error) {
|
||||
return s.State.EffectiveNetworkTime, nil
|
||||
}
|
||||
|
||||
func (s *state3) CumsumBaseline() (reward3.Spacetime, error) {
|
||||
return s.State.CumsumBaseline, nil
|
||||
}
|
||||
|
||||
func (s *state3) CumsumRealized() (reward3.Spacetime, error) {
|
||||
return s.State.CumsumRealized, nil
|
||||
}
|
||||
|
||||
func (s *state3) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
return miner3.InitialPledgeForPower(
|
||||
qaPower,
|
||||
s.State.ThisEpochBaselinePower,
|
||||
s.State.ThisEpochRewardSmoothed,
|
||||
smoothing3.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
circSupply,
|
||||
), nil
|
||||
}
|
||||
|
||||
func (s *state3) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
|
||||
return miner3.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
|
||||
smoothing3.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
sectorWeight), nil
|
||||
}
|
@ -6,16 +6,21 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) {
|
||||
// taking this as a function instead of asking the caller to call it helps reduce some of the error
|
||||
// checking boilerplate.
|
||||
//
|
||||
// "go made me do it"
|
||||
type rootFunc func() (adt.Map, error)
|
||||
|
||||
// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth
|
||||
func getDataCap(store adt.Store, ver actors.Version, root rootFunc, addr address.Address) (bool, abi.StoragePower, error) {
|
||||
if addr.Protocol() != address.ID {
|
||||
return false, big.Zero(), xerrors.Errorf("can only look up ID addresses")
|
||||
}
|
||||
|
||||
vh, err := adt.AsMap(store, root, ver)
|
||||
vh, err := root()
|
||||
if err != nil {
|
||||
return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err)
|
||||
}
|
||||
@ -30,8 +35,9 @@ func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.
|
||||
return true, dcap, nil
|
||||
}
|
||||
|
||||
func forEachCap(store adt.Store, ver actors.Version, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
vh, err := adt.AsMap(store, root, ver)
|
||||
// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth
|
||||
func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
vh, err := root()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading verified clients: %w", err)
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
@ -32,17 +33,25 @@ func (s *state0) RootKey() (address.Address, error) {
|
||||
}
|
||||
|
||||
func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr)
|
||||
return getDataCap(s.store, actors.Version0, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version0, s.State.Verifiers, addr)
|
||||
return getDataCap(s.store, actors.Version0, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version0, s.State.Verifiers, cb)
|
||||
return forEachCap(s.store, actors.Version0, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version0, s.State.VerifiedClients, cb)
|
||||
return forEachCap(s.store, actors.Version0, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state0) verifiedClients() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.VerifiedClients)
|
||||
}
|
||||
|
||||
func (s *state0) verifiers() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.Verifiers)
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
|
||||
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state2)(nil)
|
||||
@ -32,17 +33,25 @@ func (s *state2) RootKey() (address.Address, error) {
|
||||
}
|
||||
|
||||
func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr)
|
||||
return getDataCap(s.store, actors.Version2, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version2, s.State.Verifiers, addr)
|
||||
return getDataCap(s.store, actors.Version2, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version2, s.State.Verifiers, cb)
|
||||
return forEachCap(s.store, actors.Version2, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state2) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version2, s.State.VerifiedClients, cb)
|
||||
return forEachCap(s.store, actors.Version2, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state2) verifiedClients() (adt.Map, error) {
|
||||
return adt2.AsMap(s.store, s.VerifiedClients)
|
||||
}
|
||||
|
||||
func (s *state2) verifiers() (adt.Map, error) {
|
||||
return adt2.AsMap(s.store, s.Verifiers)
|
||||
}
|
||||
|
58
chain/actors/builtin/verifreg/v3.go
Normal file
58
chain/actors/builtin/verifreg/v3.go
Normal file
@ -0,0 +1,58 @@
|
||||
package verifreg
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
verifreg3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) RootKey() (address.Address, error) {
|
||||
return s.State.RootKey, nil
|
||||
}
|
||||
|
||||
func (s *state3) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version3, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version3, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version3, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state3) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version3, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state3) verifiedClients() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.VerifiedClients, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) verifiers() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth)
|
||||
}
|
@ -3,6 +3,7 @@ package verifreg
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -22,11 +23,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.VerifiedRegistryActorAddr
|
||||
Methods = builtin2.MethodsVerifiedRegistry
|
||||
Address = builtin3.VerifiedRegistryActorAddr
|
||||
Methods = builtin3.MethodsVerifiedRegistry
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -35,6 +39,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.VerifiedRegistryActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.VerifiedRegistryActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
@ -6,21 +6,28 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych"
|
||||
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
|
||||
verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg"
|
||||
)
|
||||
|
||||
const (
|
||||
ChainFinality = miner0.ChainFinality
|
||||
ChainFinality = miner3.ChainFinality
|
||||
SealRandomnessLookback = ChainFinality
|
||||
PaychSettleDelay = paych2.SettleDelay
|
||||
PaychSettleDelay = paych3.SettleDelay
|
||||
)
|
||||
|
||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
@ -31,6 +38,10 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
|
||||
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
miner3.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner3.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
|
||||
miner3.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
AddSupportedProofTypes(types...)
|
||||
}
|
||||
|
||||
@ -49,6 +60,13 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner3.PreCommitSealProofTypesV0[t] = struct{}{}
|
||||
|
||||
miner3.PreCommitSealProofTypesV7[t] = struct{}{}
|
||||
miner3.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner3.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,6 +76,7 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
|
||||
// Set for all miner versions.
|
||||
miner0.PreCommitChallengeDelay = delay
|
||||
miner2.PreCommitChallengeDelay = delay
|
||||
miner3.PreCommitChallengeDelay = delay
|
||||
}
|
||||
|
||||
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
|
||||
@ -73,6 +92,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
|
||||
for _, policy := range builtin2.SealProofPolicies {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
|
||||
for _, policy := range builtin3.PoStProofPolicies {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
}
|
||||
|
||||
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
|
||||
@ -80,6 +103,7 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
|
||||
func SetMinVerifiedDealSize(size abi.StoragePower) {
|
||||
verifreg0.MinVerifiedDealSize = size
|
||||
verifreg2.MinVerifiedDealSize = size
|
||||
verifreg3.MinVerifiedDealSize = size
|
||||
}
|
||||
|
||||
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch {
|
||||
@ -88,6 +112,8 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) ab
|
||||
return miner0.MaxSealDuration[t]
|
||||
case actors.Version2:
|
||||
return miner2.MaxProveCommitDuration[t]
|
||||
case actors.Version3:
|
||||
return miner3.MaxProveCommitDuration[t]
|
||||
default:
|
||||
panic("unsupported actors version")
|
||||
}
|
||||
@ -103,6 +129,8 @@ func DealProviderCollateralBounds(
|
||||
return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer)
|
||||
case actors.Version2:
|
||||
return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
case actors.Version3:
|
||||
return market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
default:
|
||||
panic("unsupported network version")
|
||||
}
|
||||
@ -116,6 +144,12 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
|
||||
|
||||
miner2.WPoStChallengeWindow = period
|
||||
miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines)
|
||||
|
||||
miner3.WPoStChallengeWindow = period
|
||||
miner3.WPoStProvingPeriod = period * abi.ChainEpoch(miner3.WPoStPeriodDeadlines)
|
||||
// by default, this is 2x finality which is 30 periods.
|
||||
// scale it if we're scaling the challenge period.
|
||||
miner3.WPoStDisputeWindow = period * 30
|
||||
}
|
||||
|
||||
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
||||
@ -132,17 +166,17 @@ func GetMaxSectorExpirationExtension() abi.ChainEpoch {
|
||||
|
||||
// TODO: we'll probably need to abstract over this better in the future.
|
||||
func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) {
|
||||
sectorsPerPart, err := builtin2.PoStProofWindowPoStPartitionSectors(p)
|
||||
sectorsPerPart, err := builtin3.PoStProofWindowPoStPartitionSectors(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(miner2.AddressedSectorsMax / sectorsPerPart), nil
|
||||
return int(miner3.AddressedSectorsMax / sectorsPerPart), nil
|
||||
}
|
||||
|
||||
func GetDefaultSectorSize() abi.SectorSize {
|
||||
// supported sector sizes are the same across versions.
|
||||
szs := make([]abi.SectorSize, 0, len(miner2.PreCommitSealProofTypesV8))
|
||||
for spt := range miner2.PreCommitSealProofTypesV8 {
|
||||
szs := make([]abi.SectorSize, 0, len(miner3.PreCommitSealProofTypesV8))
|
||||
for spt := range miner3.PreCommitSealProofTypesV8 {
|
||||
ss, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -11,6 +11,7 @@ type Version int
|
||||
const (
|
||||
Version0 Version = 0
|
||||
Version2 Version = 2
|
||||
Version3 Version = 3
|
||||
)
|
||||
|
||||
// Converts a network version into an actors adt version.
|
||||
@ -18,8 +19,10 @@ func VersionForNetwork(version network.Version) Version {
|
||||
switch version {
|
||||
case network.Version0, network.Version1, network.Version2, network.Version3:
|
||||
return Version0
|
||||
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8:
|
||||
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9:
|
||||
return Version2
|
||||
case network.Version10:
|
||||
return Version3
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported network version %d", version))
|
||||
}
|
||||
|
@ -56,7 +56,11 @@ func (s *server) HandleStream(stream inet.Stream) {
|
||||
}
|
||||
|
||||
_ = stream.SetDeadline(time.Now().Add(WriteResDeadline))
|
||||
if err := cborutil.WriteCborRPC(stream, resp); err != nil {
|
||||
buffered := bufio.NewWriter(stream)
|
||||
if err = cborutil.WriteCborRPC(buffered, resp); err == nil {
|
||||
err = buffered.Flush()
|
||||
}
|
||||
if err != nil {
|
||||
_ = stream.SetDeadline(time.Time{})
|
||||
log.Warnw("failed to write back response for handle stream",
|
||||
"err", err, "peer", stream.Conn().RemotePeer())
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/google/uuid"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
@ -85,19 +84,6 @@ type ChainGen struct {
|
||||
lr repo.LockedRepo
|
||||
}
|
||||
|
||||
type mybs struct {
|
||||
blockstore.Blockstore
|
||||
}
|
||||
|
||||
func (m mybs) Get(c cid.Cid) (block.Block, error) {
|
||||
b, err := m.Blockstore.Get(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var rootkeyMultisig = genesis.MultisigMeta{
|
||||
Signers: []address.Address{remAccTestKey},
|
||||
Threshold: 1,
|
||||
@ -152,8 +138,6 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
bs = mybs{bs}
|
||||
|
||||
ks, err := lr.KeyStore()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting repo keystore failed: %w", err)
|
||||
@ -465,7 +449,12 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
|
||||
}
|
||||
}
|
||||
|
||||
return store.NewFullTipSet(blks), nil
|
||||
fts := store.NewFullTipSet(blks)
|
||||
if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fts, nil
|
||||
}
|
||||
|
||||
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
)
|
||||
|
||||
func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletAPI, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
||||
@ -140,35 +140,29 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA
|
||||
}
|
||||
|
||||
func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
|
||||
sigsS := make([][]byte, len(sigs))
|
||||
sigsS := make([]ffi.Signature, len(sigs))
|
||||
for i := 0; i < len(sigs); i++ {
|
||||
sigsS[i] = sigs[i].Data
|
||||
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
|
||||
}
|
||||
|
||||
aggregator := new(bls.AggregateSignature).AggregateCompressed(sigsS)
|
||||
if aggregator == nil {
|
||||
aggSig := ffi.Aggregate(sigsS)
|
||||
if aggSig == nil {
|
||||
if len(sigs) > 0 {
|
||||
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
|
||||
}
|
||||
|
||||
zeroSig := ffi.CreateZeroSignature()
|
||||
|
||||
// Note: for blst this condition should not happen - nil should not
|
||||
// be returned
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: new(bls.Signature).Compress(),
|
||||
Data: zeroSig[:],
|
||||
}, nil
|
||||
}
|
||||
aggSigAff := aggregator.ToAffine()
|
||||
if aggSigAff == nil {
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: new(bls.Signature).Compress(),
|
||||
}, nil
|
||||
}
|
||||
aggSig := aggSigAff.Compress()
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: aggSig,
|
||||
Data: aggSig[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,8 @@ package slashfilter
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -26,6 +28,10 @@ func New(dstore ds.Batching) *SlashFilter {
|
||||
}
|
||||
|
||||
func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error {
|
||||
if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) {
|
||||
return nil
|
||||
}
|
||||
|
||||
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
|
||||
{
|
||||
// double-fork mining (2 blocks at one epoch)
|
||||
|
@ -2,6 +2,7 @@ package market
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -129,6 +130,11 @@ func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Addres
|
||||
return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt)
|
||||
}
|
||||
|
||||
// GetReserved returns the amount that is currently reserved for the address
|
||||
func (fm *FundManager) GetReserved(addr address.Address) abi.TokenAmount {
|
||||
return fm.getFundedAddress(addr).getReserved()
|
||||
}
|
||||
|
||||
// FundedAddressState keeps track of the state of an address with funds in the
|
||||
// datastore
|
||||
type FundedAddressState struct {
|
||||
@ -147,7 +153,7 @@ type fundedAddress struct {
|
||||
env *fundManagerEnvironment
|
||||
str *Store
|
||||
|
||||
lk sync.Mutex
|
||||
lk sync.RWMutex
|
||||
state *FundedAddressState
|
||||
|
||||
// Note: These request queues are ephemeral, they are not saved to store
|
||||
@ -183,6 +189,13 @@ func (a *fundedAddress) start() {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *fundedAddress) getReserved() abi.TokenAmount {
|
||||
a.lk.RLock()
|
||||
defer a.lk.RUnlock()
|
||||
|
||||
return a.state.AmtReserved
|
||||
}
|
||||
|
||||
func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return a.requestAndWait(ctx, wallet, amt, &a.reservations)
|
||||
}
|
||||
@ -501,7 +514,13 @@ func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid c
|
||||
// request with an error
|
||||
newWithdrawalAmt := types.BigAdd(withdrawalAmt, amt)
|
||||
if newWithdrawalAmt.GreaterThan(netAvail) {
|
||||
err := xerrors.Errorf("insufficient funds for withdrawal of %d", amt)
|
||||
msg := fmt.Sprintf("insufficient funds for withdrawal of %s: ", types.FIL(amt))
|
||||
msg += fmt.Sprintf("net available (%s) = available (%s) - reserved (%s)",
|
||||
types.FIL(types.BigSub(netAvail, withdrawalAmt)), types.FIL(avail), types.FIL(a.state.AmtReserved))
|
||||
if !withdrawalAmt.IsZero() {
|
||||
msg += fmt.Sprintf(" - queued withdrawals (%s)", types.FIL(withdrawalAmt))
|
||||
}
|
||||
err := xerrors.Errorf(msg)
|
||||
a.debugf("%s", err)
|
||||
req.Complete(cid.Undef, err)
|
||||
continue
|
||||
|
@ -20,6 +20,10 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
states0 "github.com/filecoin-project/specs-actors/actors/states"
|
||||
states2 "github.com/filecoin-project/specs-actors/v2/actors/states"
|
||||
states3 "github.com/filecoin-project/specs-actors/v3/actors/states"
|
||||
)
|
||||
|
||||
var log = logging.Logger("statetree")
|
||||
@ -144,23 +148,12 @@ func VersionForNetwork(ver network.Version) types.StateTreeVersion {
|
||||
return types.StateTreeVersion1
|
||||
}
|
||||
|
||||
func adtForSTVersion(ver types.StateTreeVersion) actors.Version {
|
||||
switch ver {
|
||||
case types.StateTreeVersion0:
|
||||
return actors.Version0
|
||||
case types.StateTreeVersion1:
|
||||
return actors.Version2
|
||||
default:
|
||||
panic("unhandled state tree version")
|
||||
}
|
||||
}
|
||||
|
||||
func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, error) {
|
||||
var info cid.Cid
|
||||
switch ver {
|
||||
case types.StateTreeVersion0:
|
||||
// info is undefined
|
||||
case types.StateTreeVersion1:
|
||||
case types.StateTreeVersion1, types.StateTreeVersion2:
|
||||
var err error
|
||||
info, err = cst.Put(context.TODO(), new(types.StateInfo0))
|
||||
if err != nil {
|
||||
@ -169,13 +162,34 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
|
||||
}
|
||||
root, err := adt.NewMap(adt.WrapStore(context.TODO(), cst), adtForSTVersion(ver))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
store := adt.WrapStore(context.TODO(), cst)
|
||||
var hamt adt.Map
|
||||
switch ver {
|
||||
case types.StateTreeVersion0:
|
||||
tree, err := states0.NewTree(store)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create state tree: %w", err)
|
||||
}
|
||||
hamt = tree.Map
|
||||
case types.StateTreeVersion1:
|
||||
tree, err := states2.NewTree(store)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create state tree: %w", err)
|
||||
}
|
||||
hamt = tree.Map
|
||||
case types.StateTreeVersion2:
|
||||
tree, err := states3.NewTree(store)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create state tree: %w", err)
|
||||
}
|
||||
hamt = tree.Map
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
|
||||
}
|
||||
|
||||
s := &StateTree{
|
||||
root: root,
|
||||
root: hamt,
|
||||
info: info,
|
||||
version: ver,
|
||||
Store: cst,
|
||||
@ -194,30 +208,49 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
|
||||
root.Version = types.StateTreeVersion0
|
||||
}
|
||||
|
||||
switch root.Version {
|
||||
case types.StateTreeVersion0, types.StateTreeVersion1:
|
||||
// Load the actual state-tree HAMT.
|
||||
nd, err := adt.AsMap(
|
||||
adt.WrapStore(context.TODO(), cst), root.Actors,
|
||||
adtForSTVersion(root.Version),
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("loading hamt node %s failed: %s", c, err)
|
||||
return nil, err
|
||||
}
|
||||
store := adt.WrapStore(context.TODO(), cst)
|
||||
|
||||
s := &StateTree{
|
||||
root: nd,
|
||||
info: root.Info,
|
||||
version: root.Version,
|
||||
Store: cst,
|
||||
snaps: newStateSnaps(),
|
||||
var (
|
||||
hamt adt.Map
|
||||
err error
|
||||
)
|
||||
switch root.Version {
|
||||
case types.StateTreeVersion0:
|
||||
var tree *states0.Tree
|
||||
tree, err = states0.LoadTree(store, root.Actors)
|
||||
if tree != nil {
|
||||
hamt = tree.Map
|
||||
}
|
||||
case types.StateTreeVersion1:
|
||||
var tree *states2.Tree
|
||||
tree, err = states2.LoadTree(store, root.Actors)
|
||||
if tree != nil {
|
||||
hamt = tree.Map
|
||||
}
|
||||
case types.StateTreeVersion2:
|
||||
var tree *states3.Tree
|
||||
tree, err = states3.LoadTree(store, root.Actors)
|
||||
if tree != nil {
|
||||
hamt = tree.Map
|
||||
}
|
||||
s.lookupIDFun = s.lookupIDinternal
|
||||
return s, nil
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("failed to load state tree: %s", err)
|
||||
return nil, xerrors.Errorf("failed to load state tree: %w", err)
|
||||
}
|
||||
|
||||
s := &StateTree{
|
||||
root: hamt,
|
||||
info: root.Info,
|
||||
version: root.Version,
|
||||
Store: cst,
|
||||
snaps: newStateSnaps(),
|
||||
}
|
||||
s.lookupIDFun = s.lookupIDinternal
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
|
||||
|
@ -4,7 +4,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/rt"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -29,29 +34,95 @@ import (
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
|
||||
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// UpgradeFunc is a migration function run at every upgrade.
|
||||
// MigrationCache can be used to cache information used by a migration. This is primarily useful to
|
||||
// "pre-compute" some migration state ahead of time, and make it accessible in the migration itself.
|
||||
type MigrationCache interface {
|
||||
Write(key string, value cid.Cid) error
|
||||
Read(key string) (bool, cid.Cid, error)
|
||||
Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error)
|
||||
}
|
||||
|
||||
// MigrationFunc is a migration function run at every upgrade.
|
||||
//
|
||||
// - The cache is a per-upgrade cache, pre-populated by pre-migrations.
|
||||
// - The oldState is the state produced by the upgrade epoch.
|
||||
// - The returned newState is the new state that will be used by the next epoch.
|
||||
// - The height is the upgrade epoch height (already executed).
|
||||
// - The tipset is the tipset for the last non-null block before the upgrade. Do
|
||||
// not assume that ts.Height() is the upgrade height.
|
||||
type UpgradeFunc func(ctx context.Context, sm *StateManager, cb ExecCallback, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error)
|
||||
type MigrationFunc func(
|
||||
ctx context.Context,
|
||||
sm *StateManager, cache MigrationCache,
|
||||
cb ExecCallback, oldState cid.Cid,
|
||||
height abi.ChainEpoch, ts *types.TipSet,
|
||||
) (newState cid.Cid, err error)
|
||||
|
||||
// PreMigrationFunc is a function run _before_ a network upgrade to pre-compute part of the network
|
||||
// upgrade and speed it up.
|
||||
type PreMigrationFunc func(
|
||||
ctx context.Context,
|
||||
sm *StateManager, cache MigrationCache,
|
||||
oldState cid.Cid,
|
||||
height abi.ChainEpoch, ts *types.TipSet,
|
||||
) error
|
||||
|
||||
// PreMigration describes a pre-migration step to prepare for a network state upgrade. Pre-migrations
|
||||
// are optimizations, are not guaranteed to run, and may be canceled and/or run multiple times.
|
||||
type PreMigration struct {
|
||||
// PreMigration is the pre-migration function to run at the specified time. This function is
|
||||
// run asynchronously and must abort promptly when canceled.
|
||||
PreMigration PreMigrationFunc
|
||||
|
||||
// StartWithin specifies that this pre-migration should be started at most StartWithin
|
||||
// epochs before the upgrade.
|
||||
StartWithin abi.ChainEpoch
|
||||
|
||||
// DontStartWithin specifies that this pre-migration should not be started DontStartWithin
|
||||
// epochs before the final upgrade epoch.
|
||||
//
|
||||
// This should be set such that the pre-migration is likely to complete before StopWithin.
|
||||
DontStartWithin abi.ChainEpoch
|
||||
|
||||
// StopWithin specifies that this pre-migration should be stopped StopWithin epochs of the
|
||||
// final upgrade epoch.
|
||||
StopWithin abi.ChainEpoch
|
||||
}
|
||||
|
||||
type Upgrade struct {
|
||||
Height abi.ChainEpoch
|
||||
Network network.Version
|
||||
Expensive bool
|
||||
Migration UpgradeFunc
|
||||
Migration MigrationFunc
|
||||
|
||||
// PreMigrations specifies a set of pre-migration functions to run at the indicated epochs.
|
||||
// These functions should fill the given cache with information that can speed up the
|
||||
// eventual full migration at the upgrade epoch.
|
||||
PreMigrations []PreMigration
|
||||
}
|
||||
|
||||
type UpgradeSchedule []Upgrade
|
||||
|
||||
type migrationLogger struct{}
|
||||
|
||||
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
|
||||
switch level {
|
||||
case rt.DEBUG:
|
||||
log.Debugf(msg, args...)
|
||||
case rt.INFO:
|
||||
log.Infof(msg, args...)
|
||||
case rt.WARN:
|
||||
log.Warnf(msg, args...)
|
||||
case rt.ERROR:
|
||||
log.Errorf(msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
var us UpgradeSchedule
|
||||
|
||||
@ -96,32 +167,28 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradePersianHeight,
|
||||
Network: network.Version8,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeOrangeHeight,
|
||||
Network: network.Version9,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeActorsV3Height,
|
||||
Network: network.Version10,
|
||||
Migration: UpgradeActorsV3,
|
||||
PreMigrations: []PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV3,
|
||||
StartWithin: 120,
|
||||
DontStartWithin: 60,
|
||||
StopWithin: 35,
|
||||
}, {
|
||||
PreMigration: PreUpgradeActorsV3,
|
||||
StartWithin: 30,
|
||||
DontStartWithin: 15,
|
||||
StopWithin: 5,
|
||||
}},
|
||||
Expensive: true,
|
||||
}}
|
||||
|
||||
if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade
|
||||
updates = []Upgrade{{
|
||||
Height: build.UpgradeBreezeHeight,
|
||||
Network: network.Version1,
|
||||
Migration: UpgradeFaucetBurnRecovery,
|
||||
}, {
|
||||
Height: build.UpgradeSmokeHeight,
|
||||
Network: network.Version2,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeIgnitionHeight,
|
||||
Network: network.Version3,
|
||||
Migration: UpgradeIgnition,
|
||||
}, {
|
||||
Height: build.UpgradeRefuelHeight,
|
||||
Network: network.Version3,
|
||||
Migration: UpgradeRefuel,
|
||||
}, {
|
||||
Height: build.UpgradeLiftoffHeight,
|
||||
Network: network.Version3,
|
||||
Migration: UpgradeLiftoff,
|
||||
}}
|
||||
}
|
||||
|
||||
for _, u := range updates {
|
||||
if u.Height < 0 {
|
||||
// upgrade disabled
|
||||
@ -133,14 +200,43 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
}
|
||||
|
||||
func (us UpgradeSchedule) Validate() error {
|
||||
// Make sure we're not trying to upgrade to version 0.
|
||||
// Make sure each upgrade is valid.
|
||||
for _, u := range us {
|
||||
if u.Network <= 0 {
|
||||
return xerrors.Errorf("cannot upgrade to version <= 0: %d", u.Network)
|
||||
}
|
||||
|
||||
for _, m := range u.PreMigrations {
|
||||
if m.StartWithin <= 0 {
|
||||
return xerrors.Errorf("pre-migration must specify a positive start-within epoch")
|
||||
}
|
||||
|
||||
if m.DontStartWithin < 0 || m.StopWithin < 0 {
|
||||
return xerrors.Errorf("pre-migration must specify non-negative epochs")
|
||||
}
|
||||
|
||||
if m.StartWithin <= m.StopWithin {
|
||||
return xerrors.Errorf("pre-migration start-within must come before stop-within")
|
||||
}
|
||||
|
||||
// If we have a dont-start-within.
|
||||
if m.DontStartWithin != 0 {
|
||||
if m.DontStartWithin < m.StopWithin {
|
||||
return xerrors.Errorf("pre-migration dont-start-within must come before stop-within")
|
||||
}
|
||||
if m.StartWithin <= m.DontStartWithin {
|
||||
return xerrors.Errorf("pre-migration start-within must come after dont-start-within")
|
||||
}
|
||||
}
|
||||
}
|
||||
if !sort.SliceIsSorted(u.PreMigrations, func(i, j int) bool {
|
||||
return u.PreMigrations[i].StartWithin > u.PreMigrations[j].StartWithin //nolint:scopelint,gosec
|
||||
}) {
|
||||
return xerrors.Errorf("pre-migrations must be sorted by start epoch")
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure all the upgrades make sense.
|
||||
// Make sure the upgrade order makes sense.
|
||||
for i := 1; i < len(us); i++ {
|
||||
prev := &us[i-1]
|
||||
curr := &us[i]
|
||||
@ -162,12 +258,26 @@ func (us UpgradeSchedule) Validate() error {
|
||||
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) {
|
||||
retCid := root
|
||||
var err error
|
||||
f, ok := sm.stateMigrations[height]
|
||||
if ok {
|
||||
retCid, err = f(ctx, sm, cb, root, height, ts)
|
||||
u := sm.stateMigrations[height]
|
||||
if u != nil && u.upgrade != nil {
|
||||
startTime := time.Now()
|
||||
log.Warnw("STARTING migration", "height", height)
|
||||
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
|
||||
// have to migrate multiple times.
|
||||
tmpCache := u.cache.Clone()
|
||||
retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts)
|
||||
if err != nil {
|
||||
log.Errorw("FAILED migration", "height", height, "error", err)
|
||||
return cid.Undef, err
|
||||
}
|
||||
// Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This
|
||||
// can save us a _lot_ of time because very few actors will have changed if we
|
||||
// do a small revert then need to re-run the migration.
|
||||
u.cache.Update(tmpCache)
|
||||
log.Warnw("COMPLETED migration",
|
||||
"height", height,
|
||||
"duration", time.Since(startTime),
|
||||
)
|
||||
}
|
||||
|
||||
return retCid, nil
|
||||
@ -178,6 +288,109 @@ func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEp
|
||||
return ok
|
||||
}
|
||||
|
||||
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) {
|
||||
height := ts.Height()
|
||||
parent := ts.ParentState()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
log.Warn("STARTING pre-migration")
|
||||
// Clone the cache so we don't actually _update_ it
|
||||
// till we're done. Otherwise, if we fail, the next
|
||||
// migration to use the cache may assume that
|
||||
// certain blocks exist, even if they don't.
|
||||
tmpCache := cache.Clone()
|
||||
err := fn(ctx, sm, tmpCache, parent, height, ts)
|
||||
if err != nil {
|
||||
log.Errorw("FAILED pre-migration", "error", err)
|
||||
return
|
||||
}
|
||||
// Finally, if everything worked, update the cache.
|
||||
cache.Update(tmpCache)
|
||||
log.Warnw("COMPLETED pre-migration", "duration", time.Since(startTime))
|
||||
}
|
||||
|
||||
func (sm *StateManager) preMigrationWorker(ctx context.Context) {
|
||||
defer close(sm.shutdown)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
type op struct {
|
||||
after abi.ChainEpoch
|
||||
notAfter abi.ChainEpoch
|
||||
run func(ts *types.TipSet)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
// Turn each pre-migration into an operation in a schedule.
|
||||
var schedule []op
|
||||
for upgradeEpoch, migration := range sm.stateMigrations {
|
||||
cache := migration.cache
|
||||
for _, prem := range migration.preMigrations {
|
||||
preCtx, preCancel := context.WithCancel(ctx)
|
||||
migrationFunc := prem.PreMigration
|
||||
|
||||
afterEpoch := upgradeEpoch - prem.StartWithin
|
||||
notAfterEpoch := upgradeEpoch - prem.DontStartWithin
|
||||
stopEpoch := upgradeEpoch - prem.StopWithin
|
||||
// We can't start after we stop.
|
||||
if notAfterEpoch > stopEpoch {
|
||||
notAfterEpoch = stopEpoch - 1
|
||||
}
|
||||
|
||||
// Add an op to start a pre-migration.
|
||||
schedule = append(schedule, op{
|
||||
after: afterEpoch,
|
||||
notAfter: notAfterEpoch,
|
||||
|
||||
// TODO: are these values correct?
|
||||
run: func(ts *types.TipSet) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
runPreMigration(preCtx, sm, migrationFunc, cache, ts)
|
||||
}()
|
||||
},
|
||||
})
|
||||
|
||||
// Add an op to cancel the pre-migration if it's still running.
|
||||
schedule = append(schedule, op{
|
||||
after: stopEpoch,
|
||||
notAfter: -1,
|
||||
run: func(ts *types.TipSet) { preCancel() },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Then sort by epoch.
|
||||
sort.Slice(schedule, func(i, j int) bool {
|
||||
return schedule[i].after < schedule[j].after
|
||||
})
|
||||
|
||||
// Finally, when the head changes, see if there's anything we need to do.
|
||||
//
|
||||
// We're intentionally ignoring reorgs as they don't matter for our purposes.
|
||||
for change := range sm.cs.SubHeadChanges(ctx) {
|
||||
for _, head := range change {
|
||||
for len(schedule) > 0 {
|
||||
op := &schedule[0]
|
||||
if head.Val.Height() < op.after {
|
||||
break
|
||||
}
|
||||
|
||||
// If we haven't passed the pre-migration height...
|
||||
if op.notAfter < 0 || head.Val.Height() < op.notAfter {
|
||||
op.run(head.Val)
|
||||
}
|
||||
schedule = schedule[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error {
|
||||
fromAct, err := tree.GetActor(from)
|
||||
if err != nil {
|
||||
@ -231,7 +444,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Some initial parameters
|
||||
FundsForMiners := types.FromFil(1_000_000)
|
||||
LookbackEpoch := abi.ChainEpoch(32000)
|
||||
@ -517,7 +730,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
|
||||
if build.UpgradeLiftoffHeight <= epoch {
|
||||
@ -572,7 +785,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
|
||||
store := sm.cs.Store(ctx)
|
||||
tree, err := sm.StateTree(root)
|
||||
@ -598,7 +811,7 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
@ -644,7 +857,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
tree, err := sm.StateTree(root)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||
@ -658,7 +871,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
@ -700,6 +913,98 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Use all the CPUs except 3.
|
||||
workerCount := runtime.NumCPU() - 3
|
||||
if workerCount <= 0 {
|
||||
workerCount = 1
|
||||
}
|
||||
|
||||
config := nv10.Config{MaxWorkers: uint(workerCount)}
|
||||
newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err)
|
||||
}
|
||||
|
||||
// perform some basic sanity checks to make sure everything still works.
|
||||
store := store.ActorStore(ctx, sm.ChainStore().Blockstore())
|
||||
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
|
||||
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
|
||||
} else if newRoot2 != newRoot {
|
||||
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
|
||||
} else if _, err := newSm.GetActor(init_.Address); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||
workerCount := runtime.NumCPU()
|
||||
if workerCount <= 4 {
|
||||
workerCount = 1
|
||||
} else {
|
||||
workerCount /= 2
|
||||
}
|
||||
config := nv10.Config{MaxWorkers: uint(workerCount)}
|
||||
_, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
return err
|
||||
}
|
||||
|
||||
func upgradeActorsV3Common(
|
||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv10.Config,
|
||||
) (cid.Cid, error) {
|
||||
buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
// Load the state root.
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
}
|
||||
|
||||
if stateRoot.Version != types.StateTreeVersion1 {
|
||||
return cid.Undef, xerrors.Errorf(
|
||||
"expected state root version 1 for actors v3 upgrade, got %d",
|
||||
stateRoot.Version,
|
||||
)
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
|
||||
}
|
||||
|
||||
// Persist the result.
|
||||
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||
Version: types.StateTreeVersion2,
|
||||
Actors: newHamtRoot,
|
||||
Info: stateRoot.Info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// Persist the new tree.
|
||||
|
||||
{
|
||||
from := buf
|
||||
to := buf.Read()
|
||||
|
||||
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
|
||||
ia, err := tree.GetActor(builtin0.InitActorAddr)
|
||||
if err != nil {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -122,7 +123,7 @@ func TestForkHeightTriggers(t *testing.T) {
|
||||
cg.ChainStore(), UpgradeSchedule{{
|
||||
Network: 1,
|
||||
Height: testForkHeight,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
|
||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
cst := ipldcbor.NewCborStore(sm.ChainStore().Blockstore())
|
||||
|
||||
@ -252,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) {
|
||||
Network: 1,
|
||||
Expensive: true,
|
||||
Height: testForkHeight,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
|
||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
return root, nil
|
||||
}}})
|
||||
@ -317,3 +318,166 @@ func TestForkRefuseCall(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkPreMigration(t *testing.T) {
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fooCid, err := abi.CidBuilder.Sum([]byte("foo"))
|
||||
require.NoError(t, err)
|
||||
|
||||
barCid, err := abi.CidBuilder.Sum([]byte("bar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
failCid, err := abi.CidBuilder.Sum([]byte("fail"))
|
||||
require.NoError(t, err)
|
||||
|
||||
var wait20 sync.WaitGroup
|
||||
wait20.Add(3)
|
||||
|
||||
wasCanceled := make(chan struct{})
|
||||
|
||||
checkCache := func(t *testing.T, cache MigrationCache) {
|
||||
found, value, err := cache.Read("foo")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, fooCid, value)
|
||||
|
||||
found, value, err = cache.Read("bar")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, barCid, value)
|
||||
|
||||
found, _, err = cache.Read("fail")
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
}
|
||||
|
||||
counter := make(chan struct{}, 10)
|
||||
|
||||
sm, err := NewStateManagerWithUpgradeSchedule(
|
||||
cg.ChainStore(), UpgradeSchedule{{
|
||||
Network: 1,
|
||||
Height: testForkHeight,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
|
||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
|
||||
// Make sure the test that should be canceled, is canceled.
|
||||
select {
|
||||
case <-wasCanceled:
|
||||
case <-ctx.Done():
|
||||
return cid.Undef, ctx.Err()
|
||||
}
|
||||
|
||||
// the cache should be setup correctly.
|
||||
checkCache(t, cache)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return root, nil
|
||||
},
|
||||
PreMigrations: []PreMigration{{
|
||||
StartWithin: 20,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
wait20.Done()
|
||||
wait20.Wait()
|
||||
|
||||
err := cache.Write("foo", fooCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
StartWithin: 20,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
wait20.Done()
|
||||
wait20.Wait()
|
||||
|
||||
err := cache.Write("bar", barCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
StartWithin: 20,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
wait20.Done()
|
||||
wait20.Wait()
|
||||
|
||||
err := cache.Write("fail", failCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
// Fail this migration. The cached entry should not be persisted.
|
||||
return fmt.Errorf("failed")
|
||||
},
|
||||
}, {
|
||||
StartWithin: 15,
|
||||
StopWithin: 5,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
|
||||
<-ctx.Done()
|
||||
close(wasCanceled)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
StartWithin: 10,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
|
||||
checkCache(t, cache)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}}},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, sm.Start(context.Background()))
|
||||
defer func() {
|
||||
require.NoError(t, sm.Stop(context.Background()))
|
||||
}()
|
||||
|
||||
inv := vm.NewActorRegistry()
|
||||
inv.Register(nil, testActor{})
|
||||
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||
nvm, err := vm.NewVM(ctx, vmopt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nvm.SetInvoker(inv)
|
||||
return nvm, nil
|
||||
})
|
||||
|
||||
cg.SetStateManager(sm)
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
_, err := cg.NextTipSet()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// We have 5 pre-migration steps, and the migration. They should all have written something
|
||||
// to this channel.
|
||||
require.Equal(t, 6, len(counter))
|
||||
}
|
||||
|
@ -20,6 +20,10 @@ import (
|
||||
|
||||
// Used for genesis.
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
||||
|
||||
// we use the same adt for all receipts
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -42,6 +46,7 @@ import (
|
||||
)
|
||||
|
||||
const LookbackNoLimit = abi.ChainEpoch(-1)
|
||||
const ReceiptAmtBitwidth = 3
|
||||
|
||||
var log = logging.Logger("statemgr")
|
||||
|
||||
@ -58,15 +63,24 @@ type versionSpec struct {
|
||||
atOrBelow abi.ChainEpoch
|
||||
}
|
||||
|
||||
type migration struct {
|
||||
upgrade MigrationFunc
|
||||
preMigrations []PreMigration
|
||||
cache *nv10.MemMigrationCache
|
||||
}
|
||||
|
||||
type StateManager struct {
|
||||
cs *store.ChainStore
|
||||
|
||||
cancel context.CancelFunc
|
||||
shutdown chan struct{}
|
||||
|
||||
// Determines the network version at any given epoch.
|
||||
networkVersions []versionSpec
|
||||
latestVersion network.Version
|
||||
|
||||
// Maps chain epochs to upgrade functions.
|
||||
stateMigrations map[abi.ChainEpoch]UpgradeFunc
|
||||
// Maps chain epochs to migrations.
|
||||
stateMigrations map[abi.ChainEpoch]*migration
|
||||
// A set of potentially expensive/time consuming upgrades. Explicit
|
||||
// calls for, e.g., gas estimation fail against this epoch with
|
||||
// ErrExpensiveFork.
|
||||
@ -99,7 +113,7 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateMigrations := make(map[abi.ChainEpoch]UpgradeFunc, len(us))
|
||||
stateMigrations := make(map[abi.ChainEpoch]*migration, len(us))
|
||||
expensiveUpgrades := make(map[abi.ChainEpoch]struct{}, len(us))
|
||||
var networkVersions []versionSpec
|
||||
lastVersion := network.Version0
|
||||
@ -107,8 +121,13 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
|
||||
// If we have any upgrades, process them and create a version
|
||||
// schedule.
|
||||
for _, upgrade := range us {
|
||||
if upgrade.Migration != nil {
|
||||
stateMigrations[upgrade.Height] = upgrade.Migration
|
||||
if upgrade.Migration != nil || upgrade.PreMigrations != nil {
|
||||
migration := &migration{
|
||||
upgrade: upgrade.Migration,
|
||||
preMigrations: upgrade.PreMigrations,
|
||||
cache: nv10.NewMemMigrationCache(),
|
||||
}
|
||||
stateMigrations[upgrade.Height] = migration
|
||||
}
|
||||
if upgrade.Expensive {
|
||||
expensiveUpgrades[upgrade.Height] = struct{}{}
|
||||
@ -144,6 +163,33 @@ func cidsToKey(cids []cid.Cid) string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Start starts the state manager's optional background processes. At the moment, this schedules
|
||||
// pre-migration functions to run ahead of network upgrades.
|
||||
//
|
||||
// This method is not safe to invoke from multiple threads or concurrently with Stop.
|
||||
func (sm *StateManager) Start(context.Context) error {
|
||||
var ctx context.Context
|
||||
ctx, sm.cancel = context.WithCancel(context.Background())
|
||||
sm.shutdown = make(chan struct{})
|
||||
go sm.preMigrationWorker(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop starts the state manager's background processes.
|
||||
//
|
||||
// This method is not safe to invoke concurrently with Start.
|
||||
func (sm *StateManager) Stop(ctx context.Context) error {
|
||||
if sm.cancel != nil {
|
||||
sm.cancel()
|
||||
select {
|
||||
case <-sm.shutdown:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "tipSetState")
|
||||
defer span.End()
|
||||
@ -384,11 +430,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
|
||||
return cid.Cid{}, cid.Cid{}, err
|
||||
}
|
||||
|
||||
// XXX: Is the height correct? Or should it be epoch-1?
|
||||
rectarr, err := adt.NewArray(sm.cs.Store(ctx), actors.VersionForNetwork(sm.GetNtwkVersion(ctx, epoch)))
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to create receipts amt: %w", err)
|
||||
}
|
||||
rectarr := blockadt.MakeEmptyArray(sm.cs.Store(ctx))
|
||||
for i, receipt := range receipts {
|
||||
if err := rectarr.Set(uint64(i), receipt); err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
||||
@ -473,13 +515,26 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad
|
||||
ts = sm.cs.GetHeaviestTipSet()
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
|
||||
// First try to resolve the actor in the parent state, so we don't have to compute anything.
|
||||
tree, err := state.LoadStateTree(cst, ts.ParentState())
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err)
|
||||
}
|
||||
|
||||
resolved, err := vm.ResolveToKeyAddr(tree, cst, addr)
|
||||
if err == nil {
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
// If that fails, compute the tip-set and try again.
|
||||
st, _, err := sm.TipSetState(ctx, ts)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("resolve address failed to get tipset state: %w", err)
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
tree, err := state.LoadStateTree(cst, st)
|
||||
tree, err = state.LoadStateTree(cst, st)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("failed to load state tree")
|
||||
}
|
||||
@ -639,7 +694,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
|
||||
func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid, lookbackLimit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
|
||||
msg, err := sm.cs.GetCMessage(mcid)
|
||||
if err != nil {
|
||||
return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err)
|
||||
@ -656,7 +711,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty
|
||||
return head, r, foundMsg, nil
|
||||
}
|
||||
|
||||
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, LookbackNoLimit)
|
||||
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit)
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("failed to look back through chain for message %s", mcid)
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
||||
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
||||
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -207,17 +208,17 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra
|
||||
return nil, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := info.SealProofType.RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting window proof type: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting miner ID: %w", err)
|
||||
}
|
||||
|
||||
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect)
|
||||
proofType, err := miner.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("determining winning post proof type: %w", err)
|
||||
}
|
||||
|
||||
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
|
||||
}
|
||||
@ -560,6 +561,7 @@ func init() {
|
||||
var actors []rt.VMActor
|
||||
actors = append(actors, exported0.BuiltinActors()...)
|
||||
actors = append(actors, exported2.BuiltinActors()...)
|
||||
actors = append(actors, exported3.BuiltinActors()...)
|
||||
|
||||
for _, actor := range actors {
|
||||
exports := actor.Exports()
|
||||
|
@ -363,7 +363,7 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
|
||||
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
|
||||
// internal state as our new head, if and only if it is heavier than the current
|
||||
// head.
|
||||
// head and does not exceed the maximum fork length.
|
||||
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
cs.heaviestLk.Lock()
|
||||
defer cs.heaviestLk.Unlock()
|
||||
@ -380,6 +380,15 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
||||
// TODO: don't do this for initial sync. Now that we don't have a
|
||||
// difference between 'bootstrap sync' and 'caught up' sync, we need
|
||||
// some other heuristic.
|
||||
|
||||
exceeds, err := cs.exceedsForkLength(cs.heaviest, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exceeds {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cs.takeHeaviestTipSet(ctx, ts)
|
||||
} else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
|
||||
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
|
||||
@ -387,6 +396,67 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if the two tipsets have a fork length above `ForkLengthThreshold`.
|
||||
// `synced` is the head of the chain we are currently synced to and `external`
|
||||
// is the incoming tipset potentially belonging to a forked chain. It assumes
|
||||
// the external chain has already been validated and available in the ChainStore.
|
||||
// The "fast forward" case is covered in this logic as a valid fork of length 0.
|
||||
//
|
||||
// FIXME: We may want to replace some of the logic in `syncFork()` with this.
|
||||
// `syncFork()` counts the length on both sides of the fork at the moment (we
|
||||
// need to settle on that) but here we just enforce it on the `synced` side.
|
||||
func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) {
|
||||
if synced == nil || external == nil {
|
||||
// FIXME: If `cs.heaviest` is nil we should just bypass the entire
|
||||
// `MaybeTakeHeavierTipSet` logic (instead of each of the called
|
||||
// functions having to handle the nil case on their own).
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
// `forkLength`: number of tipsets we need to walk back from the our `synced`
|
||||
// chain to the common ancestor with the new `external` head in order to
|
||||
// adopt the fork.
|
||||
for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ {
|
||||
// First walk back as many tipsets in the external chain to match the
|
||||
// `synced` height to compare them. If we go past the `synced` height
|
||||
// the subsequent match will fail but it will still be useful to get
|
||||
// closer to the `synced` head parent's height in the next loop.
|
||||
for external.Height() > synced.Height() {
|
||||
if external.Height() == 0 {
|
||||
// We reached the genesis of the external chain without a match;
|
||||
// this is considered a fork outside the allowed limit (of "infinite"
|
||||
// length).
|
||||
return true, nil
|
||||
}
|
||||
external, err = cs.LoadTipSet(external.Parents())
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now check if we arrived at the common ancestor.
|
||||
if synced.Equals(external) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If we didn't, go back *one* tipset on the `synced` side (incrementing
|
||||
// the `forkLength`).
|
||||
if synced.Height() == 0 {
|
||||
// Same check as the `external` side, if we reach the start (genesis)
|
||||
// there is no common ancestor.
|
||||
return true, nil
|
||||
}
|
||||
synced, err = cs.LoadTipSet(synced.Parents())
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// We traversed the fork length allowed without finding a common ancestor.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// ForceHeadSilent forces a chain head tipset without triggering a reorg
|
||||
// operation.
|
||||
//
|
||||
@ -524,9 +594,13 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
|
||||
// FlushValidationCache removes all results of block validation from the
|
||||
// chain metadata store. Usually the first step after a new chain import.
|
||||
func (cs *ChainStore) FlushValidationCache() error {
|
||||
return FlushValidationCache(cs.ds)
|
||||
}
|
||||
|
||||
func FlushValidationCache(ds datastore.Batching) error {
|
||||
log.Infof("clearing block validation cache...")
|
||||
|
||||
dsWalk, err := cs.ds.Query(query.Query{
|
||||
dsWalk, err := ds.Query(query.Query{
|
||||
// Potential TODO: the validation cache is not a namespace on its own
|
||||
// but is rather constructed as prefixed-key `foo:bar` via .Instance(), which
|
||||
// in turn does not work with the filter, which can match only on `foo/bar`
|
||||
@ -546,7 +620,7 @@ func (cs *ChainStore) FlushValidationCache() error {
|
||||
return xerrors.Errorf("failed to run key listing query: %w", err)
|
||||
}
|
||||
|
||||
batch, err := cs.ds.Batch()
|
||||
batch, err := ds.Batch()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open a DS batch: %w", err)
|
||||
}
|
||||
|
@ -34,7 +34,8 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
blst "github.com/supranational/blst/bindings/go"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
|
||||
// named msgarray here to make it clear that these are the types used by
|
||||
// messages, regardless of specs-actors version.
|
||||
@ -55,7 +56,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
bstore "github.com/filecoin-project/lotus/lib/blockstore"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
)
|
||||
|
||||
@ -250,18 +250,6 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
||||
|
||||
syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming)
|
||||
|
||||
if from == syncer.self {
|
||||
// TODO: this is kindof a hack...
|
||||
log.Debug("got block from ourselves")
|
||||
|
||||
if err := syncer.Sync(ctx, fts.TipSet()); err != nil {
|
||||
log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
|
||||
// the blockstore
|
||||
if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil {
|
||||
@ -688,6 +676,10 @@ func blockSanityChecks(h *types.BlockHeader) error {
|
||||
return xerrors.Errorf("block had nil bls aggregate signature")
|
||||
}
|
||||
|
||||
if h.Miner.Protocol() != address.ID {
|
||||
return xerrors.Errorf("block had non-ID miner address")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1190,17 +1182,21 @@ func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signat
|
||||
trace.Int64Attribute("msgCount", int64(len(msgs))),
|
||||
)
|
||||
|
||||
msgsS := make([]blst.Message, len(msgs))
|
||||
msgsS := make([]ffi.Message, len(msgs))
|
||||
pubksS := make([]ffi.PublicKey, len(msgs))
|
||||
for i := 0; i < len(msgs); i++ {
|
||||
msgsS[i] = msgs[i].Bytes()
|
||||
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
|
||||
}
|
||||
|
||||
sigS := new(ffi.Signature)
|
||||
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
|
||||
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks,
|
||||
msgsS, []byte(bls.DST))
|
||||
valid := ffi.HashVerify(sigS, msgsS, pubksS)
|
||||
if !valid {
|
||||
return xerrors.New("bls aggregate signature failed to verify")
|
||||
}
|
||||
@ -1449,7 +1445,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
|
||||
return nil, ErrForkCheckpoint
|
||||
}
|
||||
|
||||
// TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2?
|
||||
// TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes.
|
||||
// Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
|
||||
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
|
||||
if err != nil {
|
||||
@ -1460,6 +1456,10 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
|
||||
}
|
||||
// Track the fork length on our side of the synced chain to enforce
|
||||
// `ForkLengthThreshold`. Initialized to 1 because we already walked back
|
||||
// one tipset from `known` (our synced head).
|
||||
forkLengthInHead := 1
|
||||
|
||||
for cur := 0; cur < len(tips); {
|
||||
if nts.Height() == 0 {
|
||||
@ -1476,6 +1476,13 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
|
||||
if nts.Height() < tips[cur].Height() {
|
||||
cur++
|
||||
} else {
|
||||
// Walk back one block in our synced chain to try to meet the fork's
|
||||
// height.
|
||||
forkLengthInHead++
|
||||
if forkLengthInHead > int(build.ForkLengthThreshold) {
|
||||
return nil, ErrForkTooLong
|
||||
}
|
||||
|
||||
// We will be forking away from nts, check that it isn't checkpointed
|
||||
if nts.Key() == chkpt {
|
||||
return nil, ErrForkCheckpoint
|
||||
|
@ -117,6 +117,13 @@ func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
|
||||
// get the next sync target; it should be c1 as the heaviest tipset but added last (same weight as c2)
|
||||
bop = <-stc
|
||||
if bop.ts.Equals(c2) {
|
||||
// there's a small race and we might get c2 first.
|
||||
// But we should still end on c1.
|
||||
bop.done()
|
||||
bop = <-stc
|
||||
}
|
||||
|
||||
if !bop.ts.Equals(c1) {
|
||||
t.Fatalf("Expected tipset %s to sync, but got %s", c1, bop.ts)
|
||||
}
|
||||
@ -143,8 +150,11 @@ func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
t.Fatalf("Expected tipset %s to sync, but got %s", e1, last)
|
||||
}
|
||||
|
||||
if len(sm.state) != 0 {
|
||||
t.Errorf("active syncs expected empty but got: %d", len(sm.state))
|
||||
sm.mx.Lock()
|
||||
activeSyncs := len(sm.state)
|
||||
sm.mx.Unlock()
|
||||
if activeSyncs != 0 {
|
||||
t.Errorf("active syncs expected empty but got: %d", activeSyncs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ func (f FIL) Unitless() string {
|
||||
var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"}
|
||||
|
||||
func (f FIL) Short() string {
|
||||
n := BigInt(f)
|
||||
n := BigInt(f).Abs()
|
||||
|
||||
dn := uint64(1)
|
||||
var prefix string
|
||||
@ -70,7 +70,7 @@ func (f FIL) UnmarshalText(text []byte) error {
|
||||
}
|
||||
|
||||
func ParseFIL(s string) (FIL, error) {
|
||||
suffix := strings.TrimLeft(s, ".1234567890")
|
||||
suffix := strings.TrimLeft(s, "-.1234567890")
|
||||
s = s[:len(s)-len(suffix)]
|
||||
var attofil bool
|
||||
if suffix != "" {
|
||||
|
@ -57,6 +57,52 @@ func TestFilShort(t *testing.T) {
|
||||
{fil: "0.000221234", expect: "221.234 μFIL"},
|
||||
{fil: "0.0002212344", expect: "221.234 μFIL"},
|
||||
{fil: "0.00022123444", expect: "221.234 μFIL"},
|
||||
|
||||
{fil: "-1", expect: "-1 FIL"},
|
||||
{fil: "-1.1", expect: "-1.1 FIL"},
|
||||
{fil: "-12", expect: "-12 FIL"},
|
||||
{fil: "-123", expect: "-123 FIL"},
|
||||
{fil: "-123456", expect: "-123456 FIL"},
|
||||
{fil: "-123.23", expect: "-123.23 FIL"},
|
||||
{fil: "-123456.234", expect: "-123456.234 FIL"},
|
||||
{fil: "-123456.2341234", expect: "-123456.234 FIL"},
|
||||
{fil: "-123456.234123445", expect: "-123456.234 FIL"},
|
||||
|
||||
{fil: "-0.1", expect: "-100 mFIL"},
|
||||
{fil: "-0.01", expect: "-10 mFIL"},
|
||||
{fil: "-0.001", expect: "-1 mFIL"},
|
||||
|
||||
{fil: "-0.0001", expect: "-100 μFIL"},
|
||||
{fil: "-0.00001", expect: "-10 μFIL"},
|
||||
{fil: "-0.000001", expect: "-1 μFIL"},
|
||||
|
||||
{fil: "-0.0000001", expect: "-100 nFIL"},
|
||||
{fil: "-0.00000001", expect: "-10 nFIL"},
|
||||
{fil: "-0.000000001", expect: "-1 nFIL"},
|
||||
|
||||
{fil: "-0.0000000001", expect: "-100 pFIL"},
|
||||
{fil: "-0.00000000001", expect: "-10 pFIL"},
|
||||
{fil: "-0.000000000001", expect: "-1 pFIL"},
|
||||
|
||||
{fil: "-0.0000000000001", expect: "-100 fFIL"},
|
||||
{fil: "-0.00000000000001", expect: "-10 fFIL"},
|
||||
{fil: "-0.000000000000001", expect: "-1 fFIL"},
|
||||
|
||||
{fil: "-0.0000000000000001", expect: "-100 aFIL"},
|
||||
{fil: "-0.00000000000000001", expect: "-10 aFIL"},
|
||||
{fil: "-0.000000000000000001", expect: "-1 aFIL"},
|
||||
|
||||
{fil: "-0.0000012", expect: "-1.2 μFIL"},
|
||||
{fil: "-0.00000123", expect: "-1.23 μFIL"},
|
||||
{fil: "-0.000001234", expect: "-1.234 μFIL"},
|
||||
{fil: "-0.0000012344", expect: "-1.234 μFIL"},
|
||||
{fil: "-0.00000123444", expect: "-1.234 μFIL"},
|
||||
|
||||
{fil: "-0.0002212", expect: "-221.2 μFIL"},
|
||||
{fil: "-0.00022123", expect: "-221.23 μFIL"},
|
||||
{fil: "-0.000221234", expect: "-221.234 μFIL"},
|
||||
{fil: "-0.0002212344", expect: "-221.234 μFIL"},
|
||||
{fil: "-0.00022123444", expect: "-221.234 μFIL"},
|
||||
} {
|
||||
s := s
|
||||
t.Run(s.fil, func(t *testing.T) {
|
||||
|
@ -9,8 +9,10 @@ type StateTreeVersion uint64
|
||||
const (
|
||||
// StateTreeVersion0 corresponds to actors < v2.
|
||||
StateTreeVersion0 StateTreeVersion = iota
|
||||
// StateTreeVersion1 corresponds to actors >= v2.
|
||||
// StateTreeVersion1 corresponds to actors v2
|
||||
StateTreeVersion1
|
||||
// StateTreeVersion2 corresponds to actors >= v3.
|
||||
StateTreeVersion2
|
||||
)
|
||||
|
||||
type StateRoot struct {
|
||||
|
@ -67,7 +67,7 @@ func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) {
|
||||
return gasLimit - gasUsed - gasToBurn.Int64(), gasToBurn.Int64()
|
||||
}
|
||||
|
||||
func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount) GasOutputs {
|
||||
func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount, chargeNetworkFee bool) GasOutputs {
|
||||
gasUsedBig := big.NewInt(gasUsed)
|
||||
out := ZeroGasOutputs()
|
||||
|
||||
@ -76,7 +76,12 @@ func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.
|
||||
baseFeeToPay = feeCap
|
||||
out.MinerPenalty = big.Mul(big.Sub(baseFee, feeCap), gasUsedBig)
|
||||
}
|
||||
out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig)
|
||||
|
||||
// If chargeNetworkFee is disabled, just skip computing the BaseFeeBurn. However,
|
||||
// we charge all the other fees regardless.
|
||||
if chargeNetworkFee {
|
||||
out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig)
|
||||
}
|
||||
|
||||
minerTip := gasPremium
|
||||
if big.Cmp(big.Add(baseFeeToPay, minerTip), feeCap) > 0 {
|
||||
|
@ -63,7 +63,7 @@ func TestGasOutputs(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(fmt.Sprintf("%v", test), func(t *testing.T) {
|
||||
output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium))
|
||||
output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium), true)
|
||||
i2s := func(i uint64) string {
|
||||
return fmt.Sprintf("%d", i)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
||||
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
||||
vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime"
|
||||
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
@ -62,6 +63,7 @@ func NewActorRegistry() *ActorRegistry {
|
||||
// add builtInCode using: register(cid, singleton)
|
||||
inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
|
||||
inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
|
||||
inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
|
||||
|
||||
return inv
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
@ -91,6 +92,8 @@ func newAccountActor(ver actors.Version) *types.Actor {
|
||||
code = builtin0.AccountActorCodeID
|
||||
case actors.Version2:
|
||||
code = builtin2.AccountActorCodeID
|
||||
case actors.Version3:
|
||||
code = builtin3.AccountActorCodeID
|
||||
default:
|
||||
panic("unsupported actors version")
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
@ -107,11 +108,18 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
|
||||
return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr)
|
||||
}
|
||||
|
||||
// workaround chain halt
|
||||
if build.IsNearUpgrade(blockA.Height, build.UpgradeOrangeHeight) {
|
||||
return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange")
|
||||
}
|
||||
if build.IsNearUpgrade(blockB.Height, build.UpgradeOrangeHeight) {
|
||||
return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange")
|
||||
}
|
||||
|
||||
// are blocks the same?
|
||||
if blockA.Cid().Equals(blockB.Cid()) {
|
||||
return nil, fmt.Errorf("no consensus fault: submitted blocks are the same")
|
||||
}
|
||||
|
||||
// (1) check conditions necessary to any consensus fault
|
||||
|
||||
// were blocks mined by same miner?
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/account"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -42,9 +43,11 @@ import (
|
||||
|
||||
const MaxCallDepth = 4096
|
||||
|
||||
var log = logging.Logger("vm")
|
||||
var actorLog = logging.Logger("actors")
|
||||
var gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
|
||||
var (
|
||||
log = logging.Logger("vm")
|
||||
actorLog = logging.Logger("actors")
|
||||
gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
|
||||
)
|
||||
|
||||
// stat counters
|
||||
var (
|
||||
@ -71,8 +74,10 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad
|
||||
return aast.PubkeyAddress()
|
||||
}
|
||||
|
||||
var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
|
||||
var _ blockstore.Viewer = (*gasChargingBlocks)(nil)
|
||||
var (
|
||||
_ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
|
||||
_ blockstore.Viewer = (*gasChargingBlocks)(nil)
|
||||
)
|
||||
|
||||
type gasChargingBlocks struct {
|
||||
chargeGas func(GasCharge)
|
||||
@ -193,9 +198,11 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtim
|
||||
return vm.VM.makeRuntime(ctx, msg, nil)
|
||||
}
|
||||
|
||||
type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
|
||||
type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
|
||||
type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
|
||||
type (
|
||||
CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
|
||||
NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
|
||||
LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
|
||||
)
|
||||
|
||||
type VM struct {
|
||||
cstate *state.StateTree
|
||||
@ -264,7 +271,6 @@ type ApplyRet struct {
|
||||
|
||||
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
|
||||
gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) {
|
||||
|
||||
defer atomic.AddUint64(&StatSends, 1)
|
||||
|
||||
st := vm.cstate
|
||||
@ -561,7 +567,13 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
if gasUsed < 0 {
|
||||
gasUsed = 0
|
||||
}
|
||||
gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium)
|
||||
|
||||
burn, err := vm.ShouldBurn(st, msg, errcode)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("deciding whether should burn failed: %w", err)
|
||||
}
|
||||
|
||||
gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium, burn)
|
||||
|
||||
if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder,
|
||||
gasOutputs.BaseFeeBurn); err != nil {
|
||||
@ -599,6 +611,29 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vm *VM) ShouldBurn(st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
|
||||
// Check to see if we should burn funds. We avoid burning on successful
|
||||
// window post. This won't catch _indirect_ window post calls, but this
|
||||
// is the best we can get for now.
|
||||
if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt {
|
||||
// Ok, we've checked the _method_, but we still need to check
|
||||
// the target actor. It would be nice if we could just look at
|
||||
// the trace, but I'm not sure if that's safe?
|
||||
if toActor, err := st.GetActor(msg.To); err != nil {
|
||||
// If the actor wasn't found, we probably deleted it or something. Move on.
|
||||
if !xerrors.Is(err, types.ErrActorNotFound) {
|
||||
// Otherwise, this should never fail and something is very wrong.
|
||||
return false, xerrors.Errorf("failed to lookup target actor: %w", err)
|
||||
}
|
||||
} else if builtin.IsStorageMinerActor(toActor.Code) {
|
||||
// Ok, this is a storage miner and we've processed a window post. Remove the burn.
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorError) {
|
||||
act, err := vm.cstate.GetActor(addr)
|
||||
if err != nil {
|
||||
@ -707,7 +742,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err
|
||||
close(freeBufs)
|
||||
}()
|
||||
|
||||
var batch = <-freeBufs
|
||||
batch := <-freeBufs
|
||||
batchCp := func(blk block.Block) error {
|
||||
numBlocks++
|
||||
totalCopySize += len(blk.RawData())
|
||||
|
125
cli/chain.go
125
cli/chain.go
@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -56,6 +57,8 @@ var chainCmd = &cli.Command{
|
||||
chainGasPriceCmd,
|
||||
chainInspectUsage,
|
||||
chainDecodeCmd,
|
||||
chainEncodeCmd,
|
||||
chainDisputeSetCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1106,8 +1109,8 @@ var slashConsensusFault = &cli.Command{
|
||||
ArgsUsage: "[blockCid1 blockCid2]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "miner",
|
||||
Usage: "Miner address",
|
||||
Name: "from",
|
||||
Usage: "optionally specify the account to report consensus from",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "extra",
|
||||
@ -1142,9 +1145,25 @@ var slashConsensusFault = &cli.Command{
|
||||
return xerrors.Errorf("getting block 2: %w", err)
|
||||
}
|
||||
|
||||
def, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
if b1.Miner != b2.Miner {
|
||||
return xerrors.Errorf("block1.miner:%s block2.miner:%s", b1.Miner, b2.Miner)
|
||||
}
|
||||
|
||||
var fromAddr address.Address
|
||||
if from := cctx.String("from"); from == "" {
|
||||
defaddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddr = defaddr
|
||||
} else {
|
||||
addr, err := address.NewFromString(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddr = addr
|
||||
}
|
||||
|
||||
bh1, err := cborutil.Dump(b1)
|
||||
@ -1186,18 +1205,9 @@ var slashConsensusFault = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
if cctx.String("miner") == "" {
|
||||
return xerrors.Errorf("--miner flag is required")
|
||||
}
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
From: def,
|
||||
To: b2.Miner,
|
||||
From: fromAddr,
|
||||
Value: types.NewInt(0),
|
||||
Method: builtin.MethodsMiner.ReportConsensusFault,
|
||||
Params: enc,
|
||||
@ -1320,3 +1330,86 @@ var chainDecodeParamsCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var chainEncodeCmd = &cli.Command{
|
||||
Name: "encode",
|
||||
Usage: "encode various types",
|
||||
Subcommands: []*cli.Command{
|
||||
chainEncodeParamsCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var chainEncodeParamsCmd = &cli.Command{
|
||||
Name: "params",
|
||||
Usage: "Encodes the given JSON params",
|
||||
ArgsUsage: "[toAddr method params]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "tipset",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "encoding",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
if cctx.Args().Len() != 3 {
|
||||
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
|
||||
}
|
||||
|
||||
to, err := address.NewFromString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing toAddr: %w", err)
|
||||
}
|
||||
|
||||
method, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing method id: %w", err)
|
||||
}
|
||||
|
||||
ts, err := LoadTipSet(ctx, cctx, api)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
act, err := api.StateGetActor(ctx, to, ts.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting actor: %w", err)
|
||||
}
|
||||
|
||||
methodMeta, found := stmgr.MethodsMap[act.Code][abi.MethodNum(method)]
|
||||
if !found {
|
||||
return fmt.Errorf("method %d not found on actor %s", method, act.Code)
|
||||
}
|
||||
|
||||
p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler)
|
||||
|
||||
if err := json.Unmarshal([]byte(cctx.Args().Get(2)), p); err != nil {
|
||||
return fmt.Errorf("unmarshaling input into params type: %w", err)
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if err := p.MarshalCBOR(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch cctx.String("encoding") {
|
||||
case "base64":
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(buf.Bytes()))
|
||||
case "hex":
|
||||
fmt.Println(hex.EncodeToString(buf.Bytes()))
|
||||
default:
|
||||
return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ var clientCmd = &cli.Command{
|
||||
WithCategory("retrieval", clientRetrieveCmd),
|
||||
WithCategory("util", clientCommPCmd),
|
||||
WithCategory("util", clientCarGenCmd),
|
||||
WithCategory("util", clientInfoCmd),
|
||||
WithCategory("util", clientBalancesCmd),
|
||||
WithCategory("util", clientListTransfers),
|
||||
WithCategory("util", clientRestartTransfer),
|
||||
WithCategory("util", clientCancelTransfer),
|
||||
@ -1732,9 +1732,9 @@ var clientGetDealCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var clientInfoCmd = &cli.Command{
|
||||
Name: "info",
|
||||
Usage: "Print storage market client information",
|
||||
var clientBalancesCmd = &cli.Command{
|
||||
Name: "balances",
|
||||
Usage: "Print storage market client balances",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "client",
|
||||
@ -1751,7 +1751,7 @@ var clientInfoCmd = &cli.Command{
|
||||
|
||||
var addr address.Address
|
||||
if clientFlag := cctx.String("client"); clientFlag != "" {
|
||||
ca, err := address.NewFromString("client")
|
||||
ca, err := address.NewFromString(clientFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1770,10 +1770,22 @@ var clientInfoCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Client Market Info:\n")
|
||||
reserved, err := api.MarketGetReserved(ctx, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Locked Funds:\t%s\n", types.FIL(balance.Locked))
|
||||
fmt.Printf("Escrowed Funds:\t%s\n", types.FIL(balance.Escrow))
|
||||
avail := big.Sub(big.Sub(balance.Escrow, balance.Locked), reserved)
|
||||
if avail.LessThan(big.Zero()) {
|
||||
avail = big.Zero()
|
||||
}
|
||||
|
||||
fmt.Printf("Client Market Balance for address %s:\n", addr)
|
||||
|
||||
fmt.Printf(" Escrowed Funds: %s\n", types.FIL(balance.Escrow))
|
||||
fmt.Printf(" Locked Funds: %s\n", types.FIL(balance.Locked))
|
||||
fmt.Printf(" Reserved Funds: %s\n", types.FIL(reserved))
|
||||
fmt.Printf(" Available to Withdraw: %s\n", types.FIL(avail))
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -1943,6 +1955,11 @@ var clientListTransfers = &cli.Command{
|
||||
Name: "list-transfers",
|
||||
Usage: "List ongoing data transfers for deals",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "verbose",
|
||||
Aliases: []string{"v"},
|
||||
Usage: "print verbose transfer details",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
@ -1974,6 +1991,7 @@ var clientListTransfers = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
verbose := cctx.Bool("verbose")
|
||||
completed := cctx.Bool("completed")
|
||||
color := cctx.Bool("color")
|
||||
watch := cctx.Bool("watch")
|
||||
@ -1989,7 +2007,7 @@ var clientListTransfers = &cli.Command{
|
||||
|
||||
tm.MoveCursor(1, 1)
|
||||
|
||||
OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed)
|
||||
OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed)
|
||||
|
||||
tm.Flush()
|
||||
|
||||
@ -2014,13 +2032,13 @@ var clientListTransfers = &cli.Command{
|
||||
}
|
||||
}
|
||||
}
|
||||
OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed)
|
||||
OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// OutputDataTransferChannels generates table output for a list of channels
|
||||
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool, showFailed bool) {
|
||||
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, color, showFailed bool) {
|
||||
sort.Slice(channels, func(i, j int) bool {
|
||||
return channels[i].TransferID < channels[j].TransferID
|
||||
})
|
||||
@ -2050,7 +2068,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
||||
tablewriter.Col("Voucher"),
|
||||
tablewriter.NewLineCol("Message"))
|
||||
for _, channel := range sendingChannels {
|
||||
w.Write(toChannelOutput(color, "Sending To", channel))
|
||||
w.Write(toChannelOutput(color, "Sending To", channel, verbose))
|
||||
}
|
||||
w.Flush(out) //nolint:errcheck
|
||||
|
||||
@ -2064,7 +2082,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
||||
tablewriter.Col("Voucher"),
|
||||
tablewriter.NewLineCol("Message"))
|
||||
for _, channel := range receivingChannels {
|
||||
w.Write(toChannelOutput(color, "Receiving From", channel))
|
||||
w.Write(toChannelOutput(color, "Receiving From", channel, verbose))
|
||||
}
|
||||
w.Flush(out) //nolint:errcheck
|
||||
}
|
||||
@ -2085,9 +2103,13 @@ func channelStatusString(useColor bool, status datatransfer.Status) string {
|
||||
}
|
||||
}
|
||||
|
||||
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} {
|
||||
rootCid := ellipsis(channel.BaseCID.String(), 8)
|
||||
otherParty := ellipsis(channel.OtherPeer.String(), 8)
|
||||
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} {
|
||||
rootCid := channel.BaseCID.String()
|
||||
otherParty := channel.OtherPeer.String()
|
||||
if !verbose {
|
||||
rootCid = ellipsis(rootCid, 8)
|
||||
otherParty = ellipsis(otherParty, 8)
|
||||
}
|
||||
|
||||
initiated := "N"
|
||||
if channel.IsInitiator {
|
||||
@ -2095,7 +2117,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
|
||||
}
|
||||
|
||||
voucher := channel.Voucher
|
||||
if len(voucher) > 40 {
|
||||
if len(voucher) > 40 && !verbose {
|
||||
voucher = ellipsis(voucher, 37)
|
||||
}
|
||||
|
||||
|
429
cli/disputer.go
Normal file
429
cli/disputer.go
Normal file
@ -0,0 +1,429 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const Confidence = 10
|
||||
|
||||
type minerDeadline struct {
|
||||
miner address.Address
|
||||
index uint64
|
||||
}
|
||||
|
||||
var chainDisputeSetCmd = &cli.Command{
|
||||
Name: "disputer",
|
||||
Usage: "interact with the window post disputer",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "max-fee",
|
||||
Usage: "Spend up to X FIL per DisputeWindowedPoSt message",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "optionally specify the account to send messages from",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
disputerStartCmd,
|
||||
disputerMsgCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var disputerMsgCmd = &cli.Command{
|
||||
Name: "dispute",
|
||||
Usage: "Send a specific DisputeWindowedPoSt message",
|
||||
ArgsUsage: "[minerAddress index postIndex]",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 3 {
|
||||
fmt.Println("Usage: dispute [minerAddress index postIndex]")
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
toa, err := address.NewFromString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("given 'miner' address %q was invalid: %w", cctx.Args().First(), err)
|
||||
}
|
||||
|
||||
deadline, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postIndex, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddr, err := getSender(ctx, api, cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{
|
||||
Deadline: deadline,
|
||||
PoStIndex: postIndex,
|
||||
})
|
||||
|
||||
if aerr != nil {
|
||||
return xerrors.Errorf("failed to serailize params: %w", aerr)
|
||||
}
|
||||
|
||||
dmsg := &types.Message{
|
||||
To: toa,
|
||||
From: fromAddr,
|
||||
Value: big.Zero(),
|
||||
Method: builtin3.MethodsMiner.DisputeWindowedPoSt,
|
||||
Params: dpp,
|
||||
}
|
||||
|
||||
rslt, err := api.StateCall(ctx, dmsg, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to simulate dispute: %w", err)
|
||||
}
|
||||
|
||||
if rslt.MsgRct.ExitCode == 0 {
|
||||
mss, err := getMaxFee(cctx.String("max-fee"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sm, err := api.MpoolPushMessage(ctx, dmsg, mss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("dispute message ", sm.Cid())
|
||||
} else {
|
||||
fmt.Println("dispute is unsuccessful")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var disputerStartCmd = &cli.Command{
|
||||
Name: "start",
|
||||
Usage: "Start the window post disputer",
|
||||
ArgsUsage: "[minerAddress]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.Uint64Flag{
|
||||
Name: "start-epoch",
|
||||
Usage: "only start disputing PoSts after this epoch ",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
fromAddr, err := getSender(ctx, api, cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mss, err := getMaxFee(cctx.String("max-fee"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startEpoch := abi.ChainEpoch(0)
|
||||
if cctx.IsSet("height") {
|
||||
startEpoch = abi.ChainEpoch(cctx.Uint64("height"))
|
||||
}
|
||||
|
||||
fmt.Println("checking sync status")
|
||||
|
||||
if err := SyncWait(ctx, api, false); err != nil {
|
||||
return xerrors.Errorf("sync wait: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("setting up window post disputer")
|
||||
|
||||
// subscribe to head changes and validate the current value
|
||||
|
||||
headChanges, err := api.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
head, ok := <-headChanges
|
||||
if !ok {
|
||||
return xerrors.Errorf("Notify stream was invalid")
|
||||
}
|
||||
|
||||
if len(head) != 1 {
|
||||
return xerrors.Errorf("Notify first entry should have been one item")
|
||||
}
|
||||
|
||||
if head[0].Type != store.HCCurrent {
|
||||
return xerrors.Errorf("expected current head on Notify stream (got %s)", head[0].Type)
|
||||
}
|
||||
|
||||
lastEpoch := head[0].Val.Height()
|
||||
lastStatusCheckEpoch := lastEpoch
|
||||
|
||||
// build initial deadlineMap
|
||||
|
||||
minerList, err := api.StateListMiners(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
knownMiners := make(map[address.Address]struct{})
|
||||
deadlineMap := make(map[abi.ChainEpoch][]minerDeadline)
|
||||
for _, miner := range minerList {
|
||||
dClose, dl, err := makeMinerDeadline(ctx, api, miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("making deadline: %w", err)
|
||||
}
|
||||
|
||||
deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
|
||||
|
||||
knownMiners[miner] = struct{}{}
|
||||
}
|
||||
|
||||
// when this fires, check for newly created miners, and purge any "missed" epochs from deadlineMap
|
||||
statusCheckTicker := time.NewTicker(time.Hour)
|
||||
defer statusCheckTicker.Stop()
|
||||
|
||||
fmt.Println("starting up window post disputer")
|
||||
|
||||
applyTsk := func(tsk types.TipSetKey) error {
|
||||
log.Infof("last checked height: %d", lastEpoch)
|
||||
dls, ok := deadlineMap[lastEpoch]
|
||||
delete(deadlineMap, lastEpoch)
|
||||
if !ok || startEpoch >= lastEpoch {
|
||||
// no deadlines closed at this epoch - Confidence, or we haven't reached the start cutoff yet
|
||||
return nil
|
||||
}
|
||||
|
||||
dpmsgs := make([]*types.Message, 0)
|
||||
|
||||
// TODO: Parallelizeable
|
||||
for _, dl := range dls {
|
||||
fullDeadlines, err := api.StateMinerDeadlines(ctx, dl.miner, tsk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load deadlines: %w", err)
|
||||
}
|
||||
|
||||
if int(dl.index) >= len(fullDeadlines) {
|
||||
return xerrors.Errorf("deadline index %d not found in deadlines", dl.index)
|
||||
}
|
||||
|
||||
ms, err := makeDisputeWindowedPosts(ctx, api, dl, fullDeadlines[dl.index].DisputableProofCount, fromAddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to check for disputes: %w", err)
|
||||
}
|
||||
|
||||
dpmsgs = append(dpmsgs, ms...)
|
||||
|
||||
dClose, dl, err := makeMinerDeadline(ctx, api, dl.miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("making deadline: %w", err)
|
||||
}
|
||||
|
||||
deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
|
||||
}
|
||||
|
||||
// TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop
|
||||
for _, dpmsg := range dpmsgs {
|
||||
log.Infof("disputing a PoSt from miner %s", dpmsg.To)
|
||||
m, err := api.MpoolPushMessage(ctx, dpmsg, mss)
|
||||
if err != nil {
|
||||
log.Infof("failed to dispute post message: %s", err.Error())
|
||||
} else {
|
||||
log.Infof("disputed a PoSt in message: %s", m.Cid())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
disputeLoop := func() error {
|
||||
select {
|
||||
case notif, ok := <-headChanges:
|
||||
if !ok {
|
||||
return xerrors.Errorf("head change channel errored")
|
||||
}
|
||||
|
||||
for _, val := range notif {
|
||||
switch val.Type {
|
||||
case store.HCApply:
|
||||
for ; lastEpoch <= val.Val.Height(); lastEpoch++ {
|
||||
err := applyTsk(val.Val.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case store.HCRevert:
|
||||
// do nothing
|
||||
default:
|
||||
return xerrors.Errorf("unexpected head change type %s", val.Type)
|
||||
}
|
||||
}
|
||||
case <-statusCheckTicker.C:
|
||||
log.Infof("Running status check: ")
|
||||
|
||||
minerList, err = api.StateListMiners(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner list: %w", err)
|
||||
}
|
||||
|
||||
for _, m := range minerList {
|
||||
_, ok := knownMiners[m]
|
||||
if !ok {
|
||||
dClose, dl, err := makeMinerDeadline(ctx, api, m)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("making deadline: %w", err)
|
||||
}
|
||||
|
||||
deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
|
||||
|
||||
knownMiners[m] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for ; lastStatusCheckEpoch < lastEpoch; lastStatusCheckEpoch++ {
|
||||
// if an epoch got "skipped" from the deadlineMap somehow, just fry it now instead of letting it sit around forever
|
||||
_, ok := deadlineMap[lastStatusCheckEpoch]
|
||||
if ok {
|
||||
log.Infof("epoch %d was skipped during execution, deleting it from deadlineMap")
|
||||
delete(deadlineMap, lastStatusCheckEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Status check complete")
|
||||
case <-ctx.Done():
|
||||
return xerrors.Errorf("context cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
err := disputeLoop()
|
||||
if err != nil {
|
||||
fmt.Println("disputer shutting down: ", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1
|
||||
// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent
|
||||
func makeDisputeWindowedPosts(ctx context.Context, api lapi.FullNode, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) {
|
||||
disputes := make([]*types.Message, 0)
|
||||
|
||||
for i := uint64(0); i < postsSnapshotted; i++ {
|
||||
|
||||
dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{
|
||||
Deadline: dl.index,
|
||||
PoStIndex: i,
|
||||
})
|
||||
|
||||
if aerr != nil {
|
||||
return nil, xerrors.Errorf("failed to serailize params: %w", aerr)
|
||||
}
|
||||
|
||||
dispute := &types.Message{
|
||||
To: dl.miner,
|
||||
From: sender,
|
||||
Value: big.Zero(),
|
||||
Method: builtin3.MethodsMiner.DisputeWindowedPoSt,
|
||||
Params: dpp,
|
||||
}
|
||||
|
||||
rslt, err := api.StateCall(ctx, dispute, types.EmptyTSK)
|
||||
if err == nil && rslt.MsgRct.ExitCode == 0 {
|
||||
disputes = append(disputes, dispute)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return disputes, nil
|
||||
}
|
||||
|
||||
func makeMinerDeadline(ctx context.Context, api lapi.FullNode, mAddr address.Address) (abi.ChainEpoch, *minerDeadline, error) {
|
||||
dl, err := api.StateMinerProvingDeadline(ctx, mAddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return -1, nil, xerrors.Errorf("getting proving index list: %w", err)
|
||||
}
|
||||
|
||||
return dl.Close, &minerDeadline{
|
||||
miner: mAddr,
|
||||
index: dl.Index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getSender(ctx context.Context, api lapi.FullNode, fromStr string) (address.Address, error) {
|
||||
if fromStr == "" {
|
||||
return api.WalletDefaultAddress(ctx)
|
||||
}
|
||||
|
||||
addr, err := address.NewFromString(fromStr)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
has, err := api.WalletHas(ctx, addr)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
if !has {
|
||||
return address.Undef, xerrors.Errorf("wallet doesn't contain: %s ", addr)
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func getMaxFee(maxStr string) (*lapi.MessageSendSpec, error) {
|
||||
if maxStr != "" {
|
||||
maxFee, err := types.ParseFIL(maxStr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parsing max-fee: %w", err)
|
||||
}
|
||||
return &lapi.MessageSendSpec{
|
||||
MaxFee: types.BigInt(maxFee),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
@ -362,15 +362,15 @@ var mpoolReplaceCmd = &cli.Command{
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "gas-feecap",
|
||||
Usage: "gas feecap for new message",
|
||||
Usage: "gas feecap for new message (burn and pay to miner, attoFIL/GasUnit)",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "gas-premium",
|
||||
Usage: "gas price for new message",
|
||||
Usage: "gas price for new message (pay to miner, attoFIL/GasUnit)",
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "gas-limit",
|
||||
Usage: "gas price for new message",
|
||||
Usage: "gas limit for new message (GasUnit)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "auto",
|
||||
@ -378,7 +378,7 @@ var mpoolReplaceCmd = &cli.Command{
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "max-fee",
|
||||
Usage: "Spend up to X FIL for this message (applicable for auto mode)",
|
||||
Usage: "Spend up to X attoFIL for this message (applicable for auto mode)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "<from nonce> | <message-cid>",
|
||||
|
@ -473,12 +473,12 @@ var msigApproveCmd = &cli.Command{
|
||||
return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID"))
|
||||
}
|
||||
|
||||
if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value> [ <method> <params> ]"))
|
||||
if cctx.Args().Len() > 2 && cctx.Args().Len() < 5 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value>"))
|
||||
}
|
||||
|
||||
if cctx.Args().Len() > 2 && cctx.Args().Len() != 5 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value>"))
|
||||
if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value> [ <method> <params> ]"))
|
||||
}
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
@ -1178,7 +1178,7 @@ var msigLockProposeCmd = &cli.Command{
|
||||
params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{
|
||||
StartEpoch: abi.ChainEpoch(start),
|
||||
UnlockDuration: abi.ChainEpoch(duration),
|
||||
Amount: abi.NewTokenAmount(amount.Int64()),
|
||||
Amount: big.Int(amount),
|
||||
})
|
||||
|
||||
if actErr != nil {
|
||||
|
21
cli/send.go
21
cli/send.go
@ -15,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
@ -51,7 +52,7 @@ var sendCmd = &cli.Command{
|
||||
&cli.Uint64Flag{
|
||||
Name: "method",
|
||||
Usage: "specify method to invoke",
|
||||
Value: 0,
|
||||
Value: uint64(builtin.MethodSend),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "params-json",
|
||||
@ -61,6 +62,10 @@ var sendCmd = &cli.Command{
|
||||
Name: "params-hex",
|
||||
Usage: "specify invocation parameters in hex",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "force",
|
||||
Usage: "must be specified for the action to take effect if maybe SysErrInsufficientFunds etc",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 2 {
|
||||
@ -143,6 +148,20 @@ var sendCmd = &cli.Command{
|
||||
Params: params,
|
||||
}
|
||||
|
||||
if !cctx.Bool("force") {
|
||||
// Funds insufficient check
|
||||
fromBalance, err := api.WalletBalance(ctx, msg.From)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
totalCost := types.BigAdd(types.BigMul(msg.GasFeeCap, types.NewInt(uint64(msg.GasLimit))), msg.Value)
|
||||
|
||||
if fromBalance.LessThan(totalCost) {
|
||||
fmt.Printf("WARNING: From balance %s less than total cost %s\n", types.FIL(fromBalance), types.FIL(totalCost))
|
||||
return fmt.Errorf("--force must be specified for this action to have an effect; you have been warned")
|
||||
}
|
||||
}
|
||||
|
||||
if cctx.IsSet("nonce") {
|
||||
msg.Nonce = cctx.Uint64("nonce")
|
||||
sm, err := api.WalletSignMessage(ctx, fromAddr, msg)
|
||||
|
@ -1617,7 +1617,7 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er
|
||||
return nil, fmt.Errorf("unknown method %d for actor %s", method, act)
|
||||
}
|
||||
|
||||
paramObj := methodMeta.Params
|
||||
paramObj := methodMeta.Params.Elem()
|
||||
if paramObj.NumField() != len(args) {
|
||||
return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField())
|
||||
}
|
||||
|
161
cli/wallet.go
161
cli/wallet.go
@ -509,6 +509,7 @@ var walletMarket = &cli.Command{
|
||||
Usage: "Interact with market balances",
|
||||
Subcommands: []*cli.Command{
|
||||
walletMarketWithdraw,
|
||||
walletMarketAdd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -518,13 +519,13 @@ var walletMarketWithdraw = &cli.Command{
|
||||
ArgsUsage: "[amount (FIL) optional, otherwise will withdraw max available]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "Specify address to withdraw funds from, otherwise it will use the default wallet address",
|
||||
Aliases: []string{"f"},
|
||||
Name: "wallet",
|
||||
Usage: "Specify address to withdraw funds to, otherwise it will use the default wallet address",
|
||||
Aliases: []string{"w"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Usage: "Market address to withdraw from (account or miner actor address, defaults to --from address)",
|
||||
Usage: "Market address to withdraw from (account or miner actor address, defaults to --wallet address)",
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
},
|
||||
@ -536,6 +537,123 @@ var walletMarketWithdraw = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
var wallet address.Address
|
||||
if cctx.String("wallet") != "" {
|
||||
wallet, err = address.NewFromString(cctx.String("wallet"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing from address: %w", err)
|
||||
}
|
||||
} else {
|
||||
wallet, err = api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting default wallet address: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
addr := wallet
|
||||
if cctx.String("address") != "" {
|
||||
addr, err = address.NewFromString(cctx.String("address"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing market address: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Work out if there are enough unreserved, unlocked funds to withdraw
|
||||
bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err)
|
||||
}
|
||||
|
||||
reserved, err := api.MarketGetReserved(ctx, addr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting market reserved amount for address %s: %w", addr.String(), err)
|
||||
}
|
||||
|
||||
avail := big.Subtract(big.Subtract(bal.Escrow, bal.Locked), reserved)
|
||||
|
||||
notEnoughErr := func(msg string) error {
|
||||
return xerrors.Errorf("%s; "+
|
||||
"available (%s) = escrow (%s) - locked (%s) - reserved (%s)",
|
||||
msg, types.FIL(avail), types.FIL(bal.Escrow), types.FIL(bal.Locked), types.FIL(reserved))
|
||||
}
|
||||
|
||||
if avail.IsZero() || avail.LessThan(big.Zero()) {
|
||||
avail = big.Zero()
|
||||
return notEnoughErr("no funds available to withdraw")
|
||||
}
|
||||
|
||||
// Default to withdrawing all available funds
|
||||
amt := avail
|
||||
|
||||
// If there was an amount argument, only withdraw that amount
|
||||
if cctx.Args().Present() {
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amt = abi.TokenAmount(f)
|
||||
}
|
||||
|
||||
// Check the amount is positive
|
||||
if amt.IsZero() || amt.LessThan(big.Zero()) {
|
||||
return xerrors.Errorf("amount must be > 0")
|
||||
}
|
||||
|
||||
// Check there are enough available funds
|
||||
if amt.GreaterThan(avail) {
|
||||
msg := fmt.Sprintf("can't withdraw more funds than available; requested: %s", types.FIL(amt))
|
||||
return notEnoughErr(msg)
|
||||
}
|
||||
|
||||
fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), wallet.String())
|
||||
smsg, err := api.MarketWithdraw(ctx, wallet, addr, amt)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fund manager withdraw error: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("WithdrawBalance message cid: %s\n", smsg)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var walletMarketAdd = &cli.Command{
|
||||
Name: "add",
|
||||
Usage: "Add funds to the Storage Market Actor",
|
||||
ArgsUsage: "<amount>",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "Specify address to move funds from, otherwise it will use the default wallet address",
|
||||
Aliases: []string{"f"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Usage: "Market address to move funds to (account or miner actor address, defaults to --from address)",
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting node API: %w", err)
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
// Get amount param
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass amount to add")
|
||||
}
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amt := abi.TokenAmount(f)
|
||||
|
||||
// Get from param
|
||||
var from address.Address
|
||||
if cctx.String("from") != "" {
|
||||
from, err = address.NewFromString(cctx.String("from"))
|
||||
@ -549,6 +667,7 @@ var walletMarketWithdraw = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
// Get address param
|
||||
addr := from
|
||||
if cctx.String("address") != "" {
|
||||
addr, err = address.NewFromString(cctx.String("address"))
|
||||
@ -557,38 +676,14 @@ var walletMarketWithdraw = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK)
|
||||
// Add balance to market actor
|
||||
fmt.Printf("Submitting Add Balance message for amount %s for address %s\n", types.FIL(amt), addr)
|
||||
smsg, err := api.MarketAddBalance(ctx, from, addr, amt)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err)
|
||||
return xerrors.Errorf("add balance error: %w", err)
|
||||
}
|
||||
|
||||
avail := big.Subtract(bal.Escrow, bal.Locked)
|
||||
amt := avail
|
||||
|
||||
if cctx.Args().Present() {
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amt = abi.TokenAmount(f)
|
||||
}
|
||||
|
||||
if amt.GreaterThan(avail) {
|
||||
return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amt), types.FIL(avail))
|
||||
}
|
||||
|
||||
if avail.IsZero() {
|
||||
return xerrors.Errorf("zero unlocked funds available to withdraw")
|
||||
}
|
||||
|
||||
fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), from.String())
|
||||
smsg, err := api.MarketWithdraw(ctx, from, addr, amt)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fund manager withdraw error: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("WithdrawBalance message cid: %s\n", smsg)
|
||||
fmt.Printf("AddBalance message cid: %s\n", smsg)
|
||||
|
||||
return nil
|
||||
},
|
||||
|
@ -27,6 +27,16 @@ func main() {
|
||||
Hidden: true,
|
||||
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "limit",
|
||||
Usage: "spam transaction count limit, <= 0 is no limit",
|
||||
Value: 0,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "rate",
|
||||
Usage: "spam transaction rate, count per second",
|
||||
Value: 5,
|
||||
},
|
||||
},
|
||||
Commands: []*cli.Command{runCmd},
|
||||
}
|
||||
@ -52,11 +62,17 @@ var runCmd = &cli.Command{
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return sendSmallFundsTxs(ctx, api, addr, 5)
|
||||
rate := cctx.Int("rate")
|
||||
if rate <= 0 {
|
||||
rate = 5
|
||||
}
|
||||
limit := cctx.Int("limit")
|
||||
|
||||
return sendSmallFundsTxs(ctx, api, addr, rate, limit)
|
||||
},
|
||||
}
|
||||
|
||||
func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate int) error {
|
||||
func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate, limit int) error {
|
||||
var sendSet []address.Address
|
||||
for i := 0; i < 20; i++ {
|
||||
naddr, err := api.WalletNew(ctx, types.KTSecp256k1)
|
||||
@ -66,9 +82,14 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre
|
||||
|
||||
sendSet = append(sendSet, naddr)
|
||||
}
|
||||
count := limit
|
||||
|
||||
tick := build.Clock.Ticker(time.Second / time.Duration(rate))
|
||||
for {
|
||||
if count <= 0 && limit > 0 {
|
||||
fmt.Printf("%d messages sent.\n", limit)
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-tick.C:
|
||||
msg := &types.Message{
|
||||
@ -81,6 +102,7 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count--
|
||||
fmt.Println("Message sent: ", smsg.Cid())
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
|
@ -40,6 +40,8 @@ import (
|
||||
var log = logging.Logger("lotus-bench")
|
||||
|
||||
type BenchResults struct {
|
||||
EnvVar map[string]string
|
||||
|
||||
SectorSize abi.SectorSize
|
||||
SectorNumber int
|
||||
|
||||
@ -446,6 +448,15 @@ var sealBenchCmd = &cli.Command{
|
||||
bo.VerifyWindowPostHot = verifyWindowpost2.Sub(verifyWindowpost1)
|
||||
}
|
||||
|
||||
bo.EnvVar = make(map[string]string)
|
||||
for _, envKey := range []string{"BELLMAN_NO_GPU", "FIL_PROOFS_MAXIMIZE_CACHING", "FIL_PROOFS_USE_GPU_COLUMN_BUILDER",
|
||||
"FIL_PROOFS_USE_GPU_TREE_BUILDER", "FIL_PROOFS_USE_MULTICORE_SDR", "BELLMAN_CUSTOM_GPU"} {
|
||||
envValue, found := os.LookupEnv(envKey)
|
||||
if found {
|
||||
bo.EnvVar[envKey] = envValue
|
||||
}
|
||||
}
|
||||
|
||||
if c.Bool("json-out") {
|
||||
data, err := json.MarshalIndent(bo, "", " ")
|
||||
if err != nil {
|
||||
@ -454,6 +465,10 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
fmt.Println(string(data))
|
||||
} else {
|
||||
fmt.Println("environment variable list:")
|
||||
for envKey, envValue := range bo.EnvVar {
|
||||
fmt.Printf("%s=%s\n", envKey, envValue)
|
||||
}
|
||||
fmt.Printf("----\nresults (v28) SectorSize:(%d), SectorNumber:(%d)\n", sectorSize, sectorNumber)
|
||||
if robench == "" {
|
||||
fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingSum.AddPiece, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.AddPiece))
|
||||
|
@ -57,6 +57,7 @@ type gatewayDepsAPI interface {
|
||||
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
|
||||
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
||||
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error)
|
||||
StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, h abi.ChainEpoch) (*api.MsgLookup, error)
|
||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error)
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||
@ -299,6 +300,10 @@ func (a *GatewayAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKe
|
||||
return a.api.StateNetworkVersion(ctx, tsk)
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
|
||||
return a.api.StateSearchMsgLimited(ctx, msg, a.stateWaitLookbackLimit)
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||
return a.api.StateWaitMsgLimited(ctx, msg, confidence, a.stateWaitLookbackLimit)
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ func startNodes(
|
||||
|
||||
// Create a gateway server in front of the full node
|
||||
gapiImpl := newGatewayAPI(fullNode, lookbackCap, stateWaitLookbackLimit)
|
||||
_, addr, err := builder.CreateRPCServer(gapiImpl)
|
||||
_, addr, err := builder.CreateRPCServer(t, gapiImpl)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a gateway client API that connects to the gateway server
|
||||
|
70
cmd/lotus-shed/blockmsgid.go
Normal file
70
cmd/lotus-shed/blockmsgid.go
Normal file
@ -0,0 +1,70 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
blake2b "github.com/minio/blake2b-simd"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var blockmsgidCmd = &cli.Command{
|
||||
Name: "blockmsgid",
|
||||
Usage: "Print a block's pubsub message ID",
|
||||
ArgsUsage: "<blockCid> ...",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
for _, arg := range cctx.Args().Slice() {
|
||||
blkcid, err := cid.Decode(arg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error decoding block cid: %w", err)
|
||||
}
|
||||
|
||||
blkhdr, err := api.ChainGetBlock(ctx, blkcid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving block header: %w", err)
|
||||
}
|
||||
|
||||
blkmsgs, err := api.ChainGetBlockMessages(ctx, blkcid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving block messages: %w", err)
|
||||
}
|
||||
|
||||
blkmsg := &types.BlockMsg{
|
||||
Header: blkhdr,
|
||||
}
|
||||
|
||||
for _, m := range blkmsgs.BlsMessages {
|
||||
blkmsg.BlsMessages = append(blkmsg.BlsMessages, m.Cid())
|
||||
}
|
||||
|
||||
for _, m := range blkmsgs.SecpkMessages {
|
||||
blkmsg.SecpkMessages = append(blkmsg.SecpkMessages, m.Cid())
|
||||
}
|
||||
|
||||
bytes, err := blkmsg.Serialize()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing BlockMsg: %w", err)
|
||||
}
|
||||
|
||||
msgId := blake2b.Sum256(bytes)
|
||||
msgId64 := base64.StdEncoding.EncodeToString(msgId[:])
|
||||
|
||||
fmt.Println(msgId64)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -120,7 +120,7 @@ var datastoreGetCmd = &cli.Command{
|
||||
},
|
||||
ArgsUsage: "[namespace key]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errchec
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
|
||||
|
||||
r, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
|
@ -50,6 +50,7 @@ func main() {
|
||||
electionCmd,
|
||||
rpcCmd,
|
||||
cidCmd,
|
||||
blockmsgidCmd,
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
|
@ -622,8 +622,8 @@ var actorControlSet = &cli.Command{
|
||||
|
||||
var actorSetOwnerCmd = &cli.Command{
|
||||
Name: "set-owner",
|
||||
Usage: "Set owner address",
|
||||
ArgsUsage: "[address]",
|
||||
Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)",
|
||||
ArgsUsage: "[newOwnerAddress senderAddress]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "really-do-it",
|
||||
@ -637,8 +637,8 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass address of new owner address")
|
||||
if cctx.NArg() != 2 {
|
||||
return fmt.Errorf("must pass new owner address and sender address")
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
@ -660,7 +660,17 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||
newAddrId, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fa, err := address.NewFromString(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddrId, err := api.StateLookupID(ctx, fa, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -675,13 +685,17 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
sp, err := actors.SerializeParams(&newAddr)
|
||||
if fromAddrId != mi.Owner && fromAddrId != newAddrId {
|
||||
return xerrors.New("from address must either be the old owner or the new owner")
|
||||
}
|
||||
|
||||
sp, err := actors.SerializeParams(&newAddrId)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("serializing params: %w", err)
|
||||
}
|
||||
|
||||
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
||||
From: mi.Owner,
|
||||
From: fromAddrId,
|
||||
To: maddr,
|
||||
Method: miner.Methods.ChangeOwnerAddress,
|
||||
Value: big.Zero(),
|
||||
@ -691,7 +705,7 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return xerrors.Errorf("mpool push: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Propose Message CID:", smsg.Cid())
|
||||
fmt.Println("Message CID:", smsg.Cid())
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
|
||||
@ -701,34 +715,11 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Println("Propose owner change failed!")
|
||||
fmt.Println("owner change failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
smsg, err = api.MpoolPushMessage(ctx, &types.Message{
|
||||
From: newAddr,
|
||||
To: maddr,
|
||||
Method: miner.Methods.ChangeOwnerAddress,
|
||||
Value: big.Zero(),
|
||||
Params: sp,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("mpool push: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Approve Message CID:", smsg.Cid())
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err = api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Println("Approve owner change failed!")
|
||||
return err
|
||||
}
|
||||
fmt.Println("message succeeded!")
|
||||
|
||||
return nil
|
||||
},
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user