diff --git a/.circleci/config.yml b/.circleci/config.yml index e51c153c6..81b18b231 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,7 @@ version: 2.1 orbs: go: gotest/tools@0.0.13 + aws-cli: circleci/aws-cli@1.3.2 executors: golang: @@ -200,6 +201,10 @@ jobs: <<: *test test-window-post: <<: *test + test-window-post-dispute: + <<: *test + test-terminate: + <<: *test test-conformance: description: | Run tests using a corpus of interoperable test vectors for Filecoin @@ -262,6 +267,16 @@ jobs: path: /tmp/test-reports - store_artifacts: path: /tmp/test-artifacts/conformance-coverage.html + build-ntwk-calibration: + description: | + Compile lotus binaries for the calibration network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make calibnet build-lotus-soup: description: | Compile `lotus-soup` Testground test plan @@ -448,6 +463,114 @@ jobs: name: Publish release command: ./scripts/publish-release.sh + build-and-push-image: + description: build and push docker images to public AWS ECR registry + executor: aws-cli/default + parameters: + profile-name: + type: string + default: "default" + description: AWS profile name to be configured. + + aws-access-key-id: + type: env_var_name + default: AWS_ACCESS_KEY_ID + description: > + AWS access key id for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_ACCESS_KEY. + + aws-secret-access-key: + type: env_var_name + default: AWS_SECRET_ACCESS_KEY + description: > + AWS secret key for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_SECRET_ACCESS_KEY. + + region: + type: env_var_name + default: AWS_REGION + description: > + Name of env var storing your AWS region information, + defaults to AWS_REGION + + account-url: + type: env_var_name + default: AWS_ECR_ACCOUNT_URL + description: > + Env var storing Amazon ECR account URL that maps to an AWS account, + e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com + defaults to AWS_ECR_ACCOUNT_URL + + dockerfile: + type: string + default: Dockerfile + description: Name of dockerfile to use. Defaults to Dockerfile. + + path: + type: string + default: . + description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory). + + extra-build-args: + type: string + default: "" + description: > + Extra flags to pass to docker build. For examples, see + https://docs.docker.com/engine/reference/commandline/build + + repo: + type: string + description: Name of an Amazon ECR repository + + tag: + type: string + default: "latest" + description: A comma-separated string containing docker image tags to build and push (default = latest) + + steps: + - aws-cli/setup: + profile-name: <> + aws-access-key-id: <> + aws-secret-access-key: <> + aws-region: <> + + - run: + name: Log into Amazon ECR + command: | + aws ecr-public get-login-password --region $<> --profile <> | docker login --username AWS --password-stdin $<> + + - checkout + + - setup_remote_docker: + version: 19.03.13 + docker_layer_caching: false + + - run: + name: Build docker image + command: | + registry_id=$(echo $<> | sed "s;\..*;;g") + + docker_tag_args="" + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker_tag_args="$docker_tag_args -t $<>/<>:$tag" + done + + docker build \ + <<#parameters.extra-build-args>><><> \ + -f <>/<> \ + $docker_tag_args \ + <> + + - run: + name: Push image to Amazon ECR + command: | + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker push $<>/<>:${tag} + done workflows: version: 2.1 @@ -479,9 +602,20 @@ workflows: test-suite-name: cli packages: "./cli/... ./cmd/... ./api/..." - test-window-post: + codecov-upload: true go-test-flags: "-run=TestWindowedPost" winpost-test: "1" test-suite-name: window-post + - test-window-post-dispute: + codecov-upload: true + go-test-flags: "-run=TestWindowPostDispute" + winpost-test: "1" + test-suite-name: window-post-dispute + - test-terminate: + codecov-upload: true + go-test-flags: "-run=TestTerminate" + winpost-test: "1" + test-suite-name: terminate - test-short: go-test-flags: "--timeout 10m --short" test-suite-name: short @@ -497,6 +631,7 @@ workflows: test-suite-name: conformance-bleeding-edge packages: "./conformance" vectors-branch: master + - build-ntwk-calibration - build-lotus-soup - trigger-testplans: filters: @@ -533,3 +668,8 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+$/ + - build-and-push-image: + dockerfile: Dockerfile.lotus + path: . + repo: lotus-dev + tag: '${CIRCLE_SHA1:0:8}' diff --git a/.codecov.yml b/.codecov.yml index a53081be7..1967f6eca 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -5,5 +5,15 @@ ignore: - "api/test/*" - "gen/**/*" - "gen/*" + - "cmd/lotus-shed/*" + - "cmd/tvx/*" + - "cmd/lotus-pcr/*" + - "cmd/tvx/*" + - "cmd/lotus-chainwatch/*" + - "cmd/lotus-health/*" + - "cmd/lotus-fountain/*" + - "cmd/lotus-townhall/*" + - "cmd/lotus-stats/*" + - "cmd/lotus-pcr/*" github_checks: annotations: false diff --git a/.gitmodules b/.gitmodules index 127386beb..cdee35ce3 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,6 +7,3 @@ [submodule "extern/test-vectors"] path = extern/test-vectors url = https://github.com/filecoin-project/test-vectors.git -[submodule "extern/blst"] - path = extern/blst - url = https://github.com/supranational/blst.git diff --git a/CHANGELOG.md b/CHANGELOG.md index cd14f8622..d28432d9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,125 @@ # Lotus changelog +# 1.4.1 / 2021-01-20 + +This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. In particular, [#5341](https://github.com/filecoin-project/lotus/pull/5341) introduces the ability for Lotus miners to terminate sectors. + +## Changes + +#### Core Lotus + +- fix(sync): enforce ForkLengthThreshold for synced chain (https://github.com/filecoin-project/lotus/pull/5182) +- introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101) +- Skip bootstrapping if no peers specified (https://github.com/filecoin-project/lotus/pull/5301) +- Chainxchg write response timeout (https://github.com/filecoin-project/lotus/pull/5254) +- update NewestNetworkVersion (https://github.com/filecoin-project/lotus/pull/5277) +- fix(sync): remove checks bypass when we submit the block (https://github.com/filecoin-project/lotus/pull/4192) +- chore: export vm.ShouldBurn (https://github.com/filecoin-project/lotus/pull/5355) +- fix(sync): enforce fork len when changing head (https://github.com/filecoin-project/lotus/pull/5244) +- Use 55th percentile instead of median for gas-price (https://github.com/filecoin-project/lotus/pull/5369) +- update go-libp2p-pubsub to v0.4.1 (https://github.com/filecoin-project/lotus/pull/5329) + +#### Sealing + +- Sector termination support (https://github.com/filecoin-project/lotus/pull/5341) +- update weight canSeal and canStore when attach (https://github.com/filecoin-project/lotus/pull/5242/files) +- sector-storage/mock: improve mocked readpiece (https://github.com/filecoin-project/lotus/pull/5208) +- Fix deadlock in runWorker in sched_worker.go (https://github.com/filecoin-project/lotus/pull/5251) +- Skip checking terminated sectors provable (https://github.com/filecoin-project/lotus/pull/5217) +- storagefsm: Fix unsealedInfoMap.lk init race (https://github.com/filecoin-project/lotus/pull/5319) +- Multicore AddPiece CommP (https://github.com/filecoin-project/lotus/pull/5320) +- storagefsm: Send correct event on ErrExpiredTicket in CommitFailed (https://github.com/filecoin-project/lotus/pull/5366) +- expose StateSearchMessage on gateway (https://github.com/filecoin-project/lotus/pull/5382) +- fix FileSize to return correct disk usage recursively (https://github.com/filecoin-project/lotus/pull/5384) + +#### Dealmaking + +- Better error message when withdrawing funds (https://github.com/filecoin-project/lotus/pull/5293) +- add verbose for list transfers (https://github.com/filecoin-project/lotus/pull/5259) +- cli - rename `client info` to `client balances` (https://github.com/filecoin-project/lotus/pull/5304) +- Better CLI for wallet market withdraw and client info (https://github.com/filecoin-project/lotus/pull/5303) + +#### UX + +- correct flag usages for replace cmd (https://github.com/filecoin-project/lotus/pull/5255) +- lotus state call will panic (https://github.com/filecoin-project/lotus/pull/5275) +- fix get sector bug (https://github.com/filecoin-project/lotus/pull/4976) +- feat: lotus wallet market add (adds funds to storage market actor) (https://github.com/filecoin-project/lotus/pull/5300) +- Fix client flag parsing in client balances cli (https://github.com/filecoin-project/lotus/pull/5312) +- delete slash-consensus miner (https://github.com/filecoin-project/lotus/pull/4577) +- add fund sufficient check in send (https://github.com/filecoin-project/lotus/pull/5252) +- enable parse and shorten negative FIL values (https://github.com/filecoin-project/lotus/pull/5315) +- add limit and rate for chain noise (https://github.com/filecoin-project/lotus/pull/5223) +- add bench env print (https://github.com/filecoin-project/lotus/pull/5222) +- Implement full-node restore option (https://github.com/filecoin-project/lotus/pull/5362) +- add color for token amount (https://github.com/filecoin-project/lotus/pull/5352) +- correct log in maybeUseAddress (https://github.com/filecoin-project/lotus/pull/5359) +- add slash-consensus from flag (https://github.com/filecoin-project/lotus/pull/5378) + +#### Testing + +- tvx extract: more tipset extraction goodness (https://github.com/filecoin-project/lotus/pull/5258) +- Fix race in blockstore test suite (https://github.com/filecoin-project/lotus/pull/5297) + + +#### Build & Networks + +- Remove LOTUS_DISABLE_V2_ACTOR_MIGRATION envvar (https://github.com/filecoin-project/lotus/pull/5289) +- Create a calibnet build option (https://github.com/filecoin-project/lotus/pull/5288) +- Calibnet: Set Orange epoch (https://github.com/filecoin-project/lotus/pull/5325) + +#### Management + +- Update SECURITY.md (https://github.com/filecoin-project/lotus/pull/5246) +- README: Contribute section (https://github.com/filecoin-project/lotus/pull/5330) +- README: refine Contribute section (https://github.com/filecoin-project/lotus/pull/5331) +- Add misc tooling to codecov ignore list (https://github.com/filecoin-project/lotus/pull/5347) + +# 1.4.0 / 2020-12-19 + +This is a MANDATORY hotfix release of Lotus that resolves a chain halt at height 336,459 caused by nondeterminism in specs-actors. The fix is to update actors to 2.3.3 in order to incorporate this fix https://github.com/filecoin-project/specs-actors/pull/1334. + +# 1.3.0 / 2020-12-16 + +This is a mandatory release of Lotus that introduces the third post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 343200, before which time all nodes must have updated to this release (or later). The change that breaks consensus is an implementation of FIP-0009(https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0009.md). + +## Changes + +- Disable gas burning for window post messages (https://github.com/filecoin-project/lotus/pull/5200) +- fix lock propose (https://github.com/filecoin-project/lotus/pull/5197) + +# 1.2.3 / 2020-12-15 + +This is an optional Lotus release that introduces many performance improvements, bugfixes, and UX improvements. + +## Changes + +- When waiting for deal commit messages, ignore unsuccessful messages (https://github.com/filecoin-project/lotus/pull/5189) +- Bigger copy buffer size for stores (https://github.com/filecoin-project/lotus/pull/5177) +- Print MinPieceSize when querying ask (https://github.com/filecoin-project/lotus/pull/5178) +- Optimize miner info & sectors list loading (https://github.com/filecoin-project/lotus/pull/5176) +- Allow miners to filter (un)verified deals (https://github.com/filecoin-project/lotus/pull/5094) +- Fix curSealing out of MaxSealingSectors limit (https://github.com/filecoin-project/lotus/pull/5166) +- Add mpool pending from / to filter (https://github.com/filecoin-project/lotus/pull/5169) +- Add metrics for delayed blocks (https://github.com/filecoin-project/lotus/pull/5171) +- Fix PushUntrusted publishing -- the message is local (https://github.com/filecoin-project/lotus/pull/5173) +- Avoid potential hang in events API when starting event listener (https://github.com/filecoin-project/lotus/pull/5159) +- Show data transfer ID in list-deals (https://github.com/filecoin-project/lotus/pull/5150) +- Fix events API mutex locking (https://github.com/filecoin-project/lotus/pull/5160) +- Message pool refactors (https://github.com/filecoin-project/lotus/pull/5162) +- Fix lotus-shed cid output (https://github.com/filecoin-project/lotus/pull/5072) +- Use FundManager to withdraw funds, add MarketWithdraw API (https://github.com/filecoin-project/lotus/pull/5112) +- Add keygen outfile (https://github.com/filecoin-project/lotus/pull/5118) +- Update sr2 stat aggregation (https://github.com/filecoin-project/lotus/pull/5114) +- Fix miner control address lookup (https://github.com/filecoin-project/lotus/pull/5119) +- Fix send with declared nonce 0 (https://github.com/filecoin-project/lotus/pull/5111) +- Introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101) +- Miner control address config for (pre)commits (https://github.com/filecoin-project/lotus/pull/5103) +- Delete repeated call func (https://github.com/filecoin-project/lotus/pull/5099) +- lotus-shed ledger show command (https://github.com/filecoin-project/lotus/pull/5098) +- Log a message when there aren't enough peers for sync (https://github.com/filecoin-project/lotus/pull/5105) +- Miner code cleanup (https://github.com/filecoin-project/lotus/pull/5107) + # 1.2.2 / 2020-12-03 This is an optional Lotus release that introduces various improvements to the mining logic and deal-making workflow, as well as several new UX features. @@ -88,7 +208,7 @@ This is a very small release of Lotus that fixes an issue users are experiencing # 1.2.0 / 2020-11-18 -This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have update to this release (or later). This release also bumps the required version of Go to 1.15. +This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have updated to this release (or later). This release also bumps the required version of Go to 1.15. The changes that break consensus are: diff --git a/Dockerfile.lotus b/Dockerfile.lotus new file mode 100644 index 000000000..43d8fbc23 --- /dev/null +++ b/Dockerfile.lotus @@ -0,0 +1,74 @@ +FROM golang:1.15.6 AS builder-deps +MAINTAINER Lotus Development Team + +RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev + +ARG RUST_VERSION=nightly +ENV XDG_CACHE_HOME="/tmp" + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \ + chmod +x rustup-init; \ + ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \ + rm rustup-init; \ + chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ + rustup --version; \ + cargo --version; \ + rustc --version; + + +FROM builder-deps AS builder-local +MAINTAINER Lotus Development Team + +COPY ./ /opt/filecoin +WORKDIR /opt/filecoin +RUN make clean deps + + +FROM builder-local AS builder +MAINTAINER Lotus Development Team + +WORKDIR /opt/filecoin + +ARG RUSTFLAGS="" +ARG GOFLAGS="" + +RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats + + +FROM ubuntu:20.04 AS base +MAINTAINER Lotus Development Team + +# Base resources +COPY --from=builder /etc/ssl/certs /etc/ssl/certs +COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/ + +RUN useradd -r -u 532 -U fc + + +FROM base AS lotus +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV LOTUS_PATH /var/lib/lotus + +RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters + +USER fc + +ENTRYPOINT ["/usr/local/bin/lotus"] + +CMD ["-help"] diff --git a/Makefile b/Makefile index aa7575698..98f9d2f2c 100644 --- a/Makefile +++ b/Makefile @@ -63,6 +63,9 @@ debug: lotus lotus-miner lotus-worker lotus-seed 2k: GOFLAGS+=-tags=2k 2k: lotus lotus-miner lotus-worker lotus-seed +calibnet: GOFLAGS+=-tags=calibnet +calibnet: lotus lotus-miner lotus-worker lotus-seed + lotus: $(BUILD_DEPS) rm -f lotus go build $(GOFLAGS) -o lotus ./cmd/lotus diff --git a/README.md b/README.md index 59927faa3..636c01b44 100644 --- a/README.md +++ b/README.md @@ -24,24 +24,31 @@ For instructions on how to build, install and setup lotus, please visit [https:/ Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details. -## Development +## Related packages -The main branches under development at the moment are: -* [`master`](https://github.com/filecoin-project/lotus): current testnet. -* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes. -* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits. +These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation: -### Tracker - -All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch. - -### Packages - -The lotus Filecoin implementation unfolds into the following packages: - -- [This repo](https://github.com/filecoin-project/lotus) - [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board) -- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board) +- [specs-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board) + +## Contribute + +Lotus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations: + +1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs). +2. If the change is complex and requires prior discussion, [open an issue](github.com/filecoin-project/lotus/issues) or a [discussion](https://github.com/filecoin-project/lotus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted. +3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn. + +When implementing a change: + +1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`. +2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc. +3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go. +4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers. +5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted). +6. Add tests. +7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description. +8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message. ## License diff --git a/SECURITY.md b/SECURITY.md index 592206bc5..d53c2b920 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,11 +2,11 @@ ## Reporting a Vulnerability -For *critical* bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md +For reporting security vulnerabilities/bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md. Security vulnerabilities should be reported via our [Vulnerability Reporting channels](https://github.com/filecoin-project/community/blob/master/SECURITY.md#vulnerability-reporting) and will be eligible for a [Bug Bounty](https://security.filecoin.io/bug-bounty/). Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report. -Here are some examples of bugs we would consider 'critical': +Here are some examples of bugs we would consider to be security vulnerabilities: * If you can spend from a `multisig` wallet you do not control the keys for. * If you can cause a miner to be slashed without them actually misbehaving. @@ -16,8 +16,8 @@ Here are some examples of bugs we would consider 'critical': * If you can craft a message that causes a persistent fork in the network. * If you can cause the total amount of Filecoin in the network to no longer be 2 billion. -This is not an exhaustive list, but should provide some idea of what we consider 'critical'. +This is not an exhaustive list, but should provide some idea of what we consider as a security vulnerability, . ## Reporting a non security bug -For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). +For non-security bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). diff --git a/api/api_full.go b/api/api_full.go index b91ecd3e7..12b3ecf63 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -391,6 +391,8 @@ type FullNode interface { StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) + // StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*MsgLookup, error) // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the // message arrives on chain, and gets to the indicated confidence depth. StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error) @@ -515,6 +517,10 @@ type FullNode interface { // along with the address removal. MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) + // MarketAddBalance adds funds to the market actor + MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) + // MarketGetReserved gets the amount of funds that are currently reserved for the address + MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) // MarketReserveFunds reserves funds for a deal MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) // MarketReleaseFunds releases funds reserved by MarketReserveFunds @@ -943,7 +949,8 @@ const ( ) type Deadline struct { - PostSubmissions bitfield.BitField + PostSubmissions bitfield.BitField + DisputableProofCount uint64 } type Partition struct { diff --git a/api/api_gateway.go b/api/api_gateway.go index c76c1672d..2be0e057a 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -39,6 +39,7 @@ type GatewayAPI interface { StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateSearchMsg(ctx context.Context, msg cid.Cid) (*MsgLookup, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error) diff --git a/api/api_storage.go b/api/api_storage.go index 85eb03115..042dad73b 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -65,7 +65,17 @@ type StorageMiner interface { // SectorGetExpectedSealDuration gets the expected time for a sector to seal SectorGetExpectedSealDuration(context.Context) (time.Duration, error) SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error + // SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can + // be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. SectorRemove(context.Context, abi.SectorNumber) error + // SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then + // automatically removes it from storage + SectorTerminate(context.Context, abi.SectorNumber) error + // SectorTerminateFlush immediately sends a terminate message with sectors batched for termination. + // Returns null if message wasn't sent + SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) + // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message + SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) @@ -217,9 +227,12 @@ const ( PreCommitAddr AddrUse = iota CommitAddr PoStAddr + + TerminateSectorsAddr ) type AddressConfig struct { PreCommitControl []address.Address CommitControl []address.Address + TerminateControl []address.Address } diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index 6d84675ef..3da39ef56 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -206,6 +206,7 @@ type FullNodeStruct struct { StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"` StateWaitMsgLimited func(context.Context, cid.Cid, uint64, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` + StateSearchMsgLimited func(context.Context, cid.Cid, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"` @@ -244,6 +245,8 @@ type FullNodeStruct struct { MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"` + MarketAddBalance func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"` + MarketGetReserved func(ctx context.Context, addr address.Address) (types.BigInt, error) `perm:"sign"` MarketReserveFunds func(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"` MarketReleaseFunds func(ctx context.Context, addr address.Address, amt types.BigInt) error `perm:"sign"` MarketWithdraw func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"` @@ -312,6 +315,9 @@ type StorageMinerStruct struct { SectorGetExpectedSealDuration func(context.Context) (time.Duration, error) `perm:"read"` SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"admin"` SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"` + SectorTerminate func(context.Context, abi.SectorNumber) error `perm:"admin"` + SectorTerminateFlush func(ctx context.Context) (*cid.Cid, error) `perm:"admin"` + SectorTerminatePending func(ctx context.Context) ([]abi.SectorID, error) `perm:"admin"` SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"` WorkerConnect func(context.Context, string) error `perm:"admin" retry:"true"` // TODO: worker perm @@ -438,6 +444,7 @@ type GatewayStruct struct { StateMinerProvingDeadline func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) + StateSearchMsg func(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) StateSectorGetInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) @@ -1008,6 +1015,10 @@ func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api return c.Internal.StateSearchMsg(ctx, msgc) } +func (c *FullNodeStruct) StateSearchMsgLimited(ctx context.Context, msgc cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) { + return c.Internal.StateSearchMsgLimited(ctx, msgc, limit) +} + func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { return c.Internal.StateListMiners(ctx, tsk) } @@ -1148,6 +1159,14 @@ func (c *FullNodeStruct) MsigRemoveSigner(ctx context.Context, msig address.Addr return c.Internal.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease) } +func (c *FullNodeStruct) MarketAddBalance(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) { + return c.Internal.MarketAddBalance(ctx, wallet, addr, amt) +} + +func (c *FullNodeStruct) MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) { + return c.Internal.MarketGetReserved(ctx, addr) +} + func (c *FullNodeStruct) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) { return c.Internal.MarketReserveFunds(ctx, wallet, addr, amt) } @@ -1300,6 +1319,18 @@ func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.Sector return c.Internal.SectorRemove(ctx, number) } +func (c *StorageMinerStruct) SectorTerminate(ctx context.Context, number abi.SectorNumber) error { + return c.Internal.SectorTerminate(ctx, number) +} + +func (c *StorageMinerStruct) SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) { + return c.Internal.SectorTerminateFlush(ctx) +} + +func (c *StorageMinerStruct) SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) { + return c.Internal.SectorTerminatePending(ctx) +} + func (c *StorageMinerStruct) SectorMarkForUpgrade(ctx context.Context, number abi.SectorNumber) error { return c.Internal.SectorMarkForUpgrade(ctx, number) } @@ -1754,6 +1785,10 @@ func (g GatewayStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSet return g.Internal.StateNetworkVersion(ctx, tsk) } +func (g GatewayStruct) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) { + return g.Internal.StateSearchMsg(ctx, msg) +} + func (g GatewayStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { return g.Internal.StateSectorGetInfo(ctx, maddr, n, tsk) } diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go index eedcec6ca..606b9f22b 100644 --- a/api/test/ccupgrade.go +++ b/api/test/ccupgrade.go @@ -17,9 +17,9 @@ import ( func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { for _, height := range []abi.ChainEpoch{ - 1, // before + 2, // before 162, // while sealing - 520, // after upgrade deal + 530, // after upgrade deal 5000, // after } { height := height // make linters happy by copying @@ -31,7 +31,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) { ctx := context.Background() - n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner) + n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(upgradeHeight)}, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) miner := sn[0] diff --git a/api/test/deals.go b/api/test/deals.go index 1189f070e..3fa6f2d4b 100644 --- a/api/test/deals.go +++ b/api/test/deals.go @@ -8,103 +8,40 @@ import ( "math/rand" "os" "path/filepath" - "sync/atomic" "testing" "time" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/stretchr/testify/require" - "github.com/ipfs/go-cid" files "github.com/ipfs/go-ipfs-files" "github.com/ipld/go-car" + "github.com/stretchr/testify/require" "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/node/impl" + ipld "github.com/ipfs/go-ipld-format" dag "github.com/ipfs/go-merkledag" dstest "github.com/ipfs/go-merkledag/test" unixfile "github.com/ipfs/go-unixfs/file" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl" - ipld "github.com/ipfs/go-ipld-format" ) func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) { + s := setupOneClientOneMiner(t, b, blocktime) + defer s.blockMiner.Stop() - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - MakeDeal(t, ctx, 6, client, miner, carExport, fastRet, startEpoch) - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done + MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch) } func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) { + s := setupOneClientOneMiner(t, b, blocktime) + defer s.blockMiner.Stop() - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - MakeDeal(t, ctx, 6, client, miner, false, false, startEpoch) - MakeDeal(t, ctx, 7, client, miner, false, false, startEpoch) - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done + MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch) + MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch) } func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) { @@ -152,95 +89,41 @@ func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api } func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) { - - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() + s := setupOneClientOneMiner(t, b, blocktime) + defer s.blockMiner.Stop() data := make([]byte, 1600) rand.New(rand.NewSource(int64(8))).Read(data) r := bytes.NewReader(data) - fcid, err := client.ClientImportLocal(ctx, r) + fcid, err := s.client.ClientImportLocal(s.ctx, r) if err != nil { t.Fatal(err) } fmt.Println("FILE CID: ", fcid) - deal := startDeal(t, ctx, miner, client, fcid, true, startEpoch) + deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch) - waitDealPublished(t, ctx, miner, deal) + waitDealPublished(t, s.ctx, s.miner, deal) fmt.Println("deal published, retrieving") // Retrieval - info, err := client.ClientGetDealInfo(ctx, *deal) + info, err := s.client.ClientGetDealInfo(s.ctx, *deal) require.NoError(t, err) - testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data) - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done + testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data) } func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) { - - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() + s := setupOneClientOneMiner(t, b, blocktime) + defer s.blockMiner.Stop() { data1 := make([]byte, 800) rand.New(rand.NewSource(int64(3))).Read(data1) r := bytes.NewReader(data1) - fcid1, err := client.ClientImportLocal(ctx, r) + fcid1, err := s.client.ClientImportLocal(s.ctx, r) if err != nil { t.Fatal(err) } @@ -249,35 +132,31 @@ func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration rand.New(rand.NewSource(int64(9))).Read(data2) r2 := bytes.NewReader(data2) - fcid2, err := client.ClientImportLocal(ctx, r2) + fcid2, err := s.client.ClientImportLocal(s.ctx, r2) if err != nil { t.Fatal(err) } - deal1 := startDeal(t, ctx, miner, client, fcid1, true, 0) + deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0) // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) - waitDealSealed(t, ctx, miner, client, deal1, true) + waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true) - deal2 := startDeal(t, ctx, miner, client, fcid2, true, 0) + deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0) time.Sleep(time.Second) - waitDealSealed(t, ctx, miner, client, deal2, false) + waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false) // Retrieval - info, err := client.ClientGetDealInfo(ctx, *deal2) + info, err := s.client.ClientGetDealInfo(s.ctx, *deal2) require.NoError(t, err) - rf, _ := miner.SectorsRefs(ctx) + rf, _ := s.miner.SectorsRefs(s.ctx) fmt.Printf("refs: %+v\n", rf) - testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2) + testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2) } - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done } func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid { @@ -459,3 +338,40 @@ func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath strin } return rdata } + +type dealsScaffold struct { + ctx context.Context + client *impl.FullNodeAPI + miner TestStorageNode + blockMiner *BlockMiner +} + +func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold { + n, sn := b(t, OneFull, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + return connectAndStartMining(t, b, blocktime, client, miner) +} + +func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold { + ctx := context.Background() + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + blockMiner := NewBlockMiner(ctx, t, miner, blocktime) + blockMiner.MineBlocks() + + return &dealsScaffold{ + ctx: ctx, + client: client, + miner: miner, + blockMiner: blockMiner, + } +} diff --git a/api/test/test.go b/api/test/test.go index a1b82c590..7d804e8ae 100644 --- a/api/test/test.go +++ b/api/test/test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "testing" "time" @@ -48,6 +49,7 @@ type TestStorageNode struct { ListenAddr multiaddr.Multiaddr MineOne func(context.Context, miner.MineReq) error + Stop func(context.Context) error } var PresealGenesis = -1 @@ -109,14 +111,19 @@ var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}} var OneFull = DefaultFullOpts(1) var TwoFull = DefaultFullOpts(2) -var FullNodeWithActorsV2At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { +var FullNodeWithActorsV3At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { return FullNodeOpts{ Opts: func(nodes []TestNode) node.Option { return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - // Skip directly to tape height so precommits work. - Network: network.Version5, - Height: upgradeHeight, + // prepare for upgrade. + Network: network.Version9, + Height: 1, Migration: stmgr.UpgradeActorsV2, + }, { + // Skip directly to tape height so precommits work. + Network: network.Version10, + Height: upgradeHeight, + Migration: stmgr.UpgradeActorsV3, }}) }, } @@ -157,7 +164,11 @@ func (ts *testSuite) testVersion(t *testing.T) { if err != nil { t.Fatal(err) } - require.Equal(t, v.Version, build.BuildVersion) + versions := strings.Split(v.Version, "+") + if len(versions) <= 0 { + t.Fatal("empty version") + } + require.Equal(t, versions[0], build.BuildVersion) } func (ts *testSuite) testSearchMsg(t *testing.T) { diff --git a/api/test/window_post.go b/api/test/window_post.go index ff107ae8d..99d480836 100644 --- a/api/test/window_post.go +++ b/api/test/window_post.go @@ -14,14 +14,20 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/mock" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" bminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/impl" @@ -200,7 +206,7 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { for _, height := range []abi.ChainEpoch{ - 1, // before + 2, // before 162, // while sealing 5000, // while proving } { @@ -211,12 +217,13 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector } } + func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner) + n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(upgradeHeight)}, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) miner := sn[0] @@ -428,3 +435,592 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged } + +func TestTerminate(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nSectors := uint64(2) + + n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(2)}, []StorageMiner{{Full: 0, Preseal: int(nSectors)}}) + + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + done := make(chan struct{}) + go func() { + defer close(done) + for ctx.Err() == nil { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { + if ctx.Err() != nil { + // context was canceled, ignore the error. + return + } + t.Error(err) + } + } + }() + defer func() { + cancel() + <-done + }() + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors)) + + fmt.Printf("Seal a sector\n") + + pledgeSectors(t, ctx, miner, 1, 0, nil) + + fmt.Printf("wait for power\n") + + { + // Wait until proven. + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 + fmt.Printf("End for head.Height > %d\n", waitUntil) + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > waitUntil { + fmt.Printf("Now head.Height = %d\n", head.Height()) + break + } + } + } + + nSectors++ + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors)) + + fmt.Println("Terminate a sector") + + toTerminate := abi.SectorNumber(3) + + err = miner.SectorTerminate(ctx, toTerminate) + require.NoError(t, err) + + msgTriggerred := false +loop: + for { + si, err := miner.SectorsStatus(ctx, toTerminate, false) + require.NoError(t, err) + + fmt.Println("state: ", si.State, msgTriggerred) + + switch sealing.SectorState(si.State) { + case sealing.Terminating: + if !msgTriggerred { + { + p, err := miner.SectorTerminatePending(ctx) + require.NoError(t, err) + require.Len(t, p, 1) + require.Equal(t, abi.SectorNumber(3), p[0].Number) + } + + c, err := miner.SectorTerminateFlush(ctx) + require.NoError(t, err) + if c != nil { + msgTriggerred = true + fmt.Println("terminate message:", c) + + { + p, err := miner.SectorTerminatePending(ctx) + require.NoError(t, err) + require.Len(t, p, 0) + } + } + } + case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed: + break loop + } + + time.Sleep(100 * time.Millisecond) + } + + // check power decreased + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1))) + + // check in terminated set + { + parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + bflen := func(b bitfield.BitField) uint64 { + l, err := b.Count() + require.NoError(t, err) + return l + } + + require.Equal(t, uint64(1), bflen(parts[0].AllSectors)) + require.Equal(t, uint64(0), bflen(parts[0].LiveSectors)) + } + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { + fmt.Printf("Now head.Height = %d\n", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + require.NoError(t, err) + fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1))) +} + +func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // First, we configure two miners. After sealing, we're going to turn off the first miner so + // it doesn't submit proofs. + /// + // Then we're going to manually submit bad proofs. + n, sn := b(t, []FullNodeOpts{ + FullNodeWithActorsV3At(2), + }, []StorageMiner{ + {Full: 0, Preseal: PresealGenesis}, + {Full: 0}, + }) + + client := n[0].FullNode.(*impl.FullNodeAPI) + chainMiner := sn[0] + evilMiner := sn[1] + + { + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := chainMiner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + + if err := evilMiner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + } + + defaultFrom, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + build.Clock.Sleep(time.Second) + + // Mine with the _second_ node (the good one). + done := make(chan struct{}) + go func() { + defer close(done) + for ctx.Err() == nil { + build.Clock.Sleep(blocktime) + if err := chainMiner.MineOne(ctx, MineNext); err != nil { + if ctx.Err() != nil { + // context was canceled, ignore the error. + return + } + t.Error(err) + } + } + }() + defer func() { + cancel() + <-done + }() + + // Give the chain miner enough sectors to win every block. + pledgeSectors(t, ctx, chainMiner, 10, 0, nil) + // And the evil one 1 sector. No cookie for you. + pledgeSectors(t, ctx, evilMiner, 1, 0, nil) + + // Let the evil miner's sectors gain power. + evilMinerAddr, err := evilMiner.ActorAddress(ctx) + require.NoError(t, err) + + di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + fmt.Printf("Running one proving period\n") + fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2) + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 { + fmt.Printf("Now head.Height = %d\n", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + + p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr) + require.NoError(t, err) + + // make sure it has gained power. + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + + evilSectors, err := evilMiner.SectorsList(ctx) + require.NoError(t, err) + evilSectorNo := evilSectors[0] // only one. + evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK) + require.NoError(t, err) + + fmt.Println("evil miner stopping") + + // Now stop the evil miner, and start manually submitting bad proofs. + require.NoError(t, evilMiner.Stop(ctx)) + + fmt.Println("evil miner stopped") + + // Wait until we need to prove our sector. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index == evilSectorLoc.Deadline { + break + } + build.Clock.Sleep(blocktime) + } + + err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) + require.NoError(t, err, "evil proof not accepted") + + // Wait until after the proving period. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index != evilSectorLoc.Deadline { + break + } + build.Clock.Sleep(blocktime) + } + + fmt.Println("accepted evil proof") + + // Make sure the evil node didn't lose any power. + p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + + // OBJECTION! The good miner files a DISPUTE!!!! + { + params := &minerActor.DisputeWindowedPoStParams{ + Deadline: evilSectorLoc.Deadline, + PoStIndex: 0, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: evilMinerAddr, + Method: minerActor.Methods.DisputeWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: defaultFrom, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + fmt.Println("waiting dispute") + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence) + require.NoError(t, err) + require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error()) + } + + // Objection SUSTAINED! + // Make sure the evil node lost power. + p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + require.True(t, p.MinerPower.RawBytePower.IsZero()) + + // Now we begin the redemption arc. + require.True(t, p.MinerPower.RawBytePower.IsZero()) + + // First, recover the sector. + + { + minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + params := &minerActor.DeclareFaultsRecoveredParams{ + Recoveries: []minerActor.RecoveryDeclaration{{ + Deadline: evilSectorLoc.Deadline, + Partition: evilSectorLoc.Partition, + Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}), + }}, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: evilMinerAddr, + Method: minerActor.Methods.DeclareFaultsRecovered, + Params: enc, + Value: types.FromFil(30), // repay debt. + From: minerInfo.Owner, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence) + require.NoError(t, err) + require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error()) + } + + // Then wait for the deadline. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index == evilSectorLoc.Deadline { + break + } + build.Clock.Sleep(blocktime) + } + + // Now try to be evil again + err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) + require.Error(t, err) + require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt") + + // It didn't work because we're recovering. +} + +func submitBadProof( + ctx context.Context, + client api.FullNode, maddr address.Address, + di *dline.Info, dlIdx, partIdx uint64, +) error { + head, err := client.ChainHead(ctx) + if err != nil { + return err + } + + from, err := client.WalletDefaultAddress(ctx) + if err != nil { + return err + } + + minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key()) + if err != nil { + return err + } + + commEpoch := di.Open + commRand, err := client.ChainGetRandomnessFromTickets( + ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit, + commEpoch, nil, + ) + if err != nil { + return err + } + params := &minerActor.SubmitWindowedPoStParams{ + ChainCommitEpoch: commEpoch, + ChainCommitRand: commRand, + Deadline: dlIdx, + Partitions: []minerActor.PoStPartition{{Index: partIdx}}, + Proofs: []proof3.PoStProof{{ + PoStProof: minerInfo.WindowPoStProofType, + ProofBytes: []byte("I'm soooo very evil."), + }}, + } + + enc, aerr := actors.SerializeParams(params) + if aerr != nil { + return aerr + } + + msg := &types.Message{ + To: maddr, + Method: minerActor.Methods.SubmitWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: from, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return err + } + + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence) + if err != nil { + return err + } + if rec.Receipt.ExitCode.IsError() { + return rec.Receipt.ExitCode + } + return nil +} + +func TestWindowPostDisputeFails(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(2)}, OneMiner) + + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + { + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + } + + defaultFrom, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + build.Clock.Sleep(time.Second) + + // Mine with the _second_ node (the good one). + done := make(chan struct{}) + go func() { + defer close(done) + for ctx.Err() == nil { + build.Clock.Sleep(blocktime) + if err := miner.MineOne(ctx, MineNext); err != nil { + if ctx.Err() != nil { + // context was canceled, ignore the error. + return + } + t.Error(err) + } + } + }() + defer func() { + cancel() + <-done + }() + + pledgeSectors(t, ctx, miner, 10, 0, nil) + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + fmt.Printf("Running one proving period\n") + fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2) + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 { + fmt.Printf("Now head.Height = %d\n", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + expectedPower := types.NewInt(uint64(ssz) * (GenesisPreseals + 10)) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + // make sure it has gained power. + require.Equal(t, p.MinerPower.RawBytePower, expectedPower) + + // Wait until a proof has been submitted. + var targetDeadline uint64 +waitForProof: + for { + deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + for dlIdx, dl := range deadlines { + nonEmpty, err := dl.PostSubmissions.IsEmpty() + require.NoError(t, err) + if nonEmpty { + targetDeadline = uint64(dlIdx) + break waitForProof + } + } + + build.Clock.Sleep(blocktime) + } + + for { + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + // wait until the deadline finishes. + if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) { + break + } + + build.Clock.Sleep(blocktime) + } + + // Try to object to the proof. This should fail. + { + params := &minerActor.DisputeWindowedPoStParams{ + Deadline: targetDeadline, + PoStIndex: 0, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: maddr, + Method: minerActor.Methods.DisputeWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: defaultFrom, + } + _, err := client.MpoolPushMessage(ctx, msg, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)") + } +} diff --git a/build/bootstrap.go b/build/bootstrap.go index 80c1529ff..cd72cfd1b 100644 --- a/build/bootstrap.go +++ b/build/bootstrap.go @@ -2,11 +2,9 @@ package build import ( "context" - "os" "strings" "github.com/filecoin-project/lotus/lib/addrutil" - "golang.org/x/xerrors" rice "github.com/GeertJohan/go.rice" "github.com/libp2p/go-libp2p-core/peer" @@ -17,24 +15,16 @@ func BuiltinBootstrap() ([]peer.AddrInfo, error) { return nil, nil } - var out []peer.AddrInfo - b := rice.MustFindBox("bootstrap") - err := b.Walk("", func(path string, info os.FileInfo, err error) error { - if err != nil { - return xerrors.Errorf("failed to walk box: %w", err) + + if BootstrappersFile != "" { + spi := b.MustString(BootstrappersFile) + if spi == "" { + return nil, nil } - if !strings.HasSuffix(path, ".pi") { - return nil - } - spi := b.MustString(path) - if spi == "" { - return nil - } - pi, err := addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n")) - out = append(out, pi...) - return err - }) - return out, err + return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n")) + } + + return nil, nil } diff --git a/build/bootstrap/calibnet.pi b/build/bootstrap/calibnet.pi new file mode 100644 index 000000000..cb3a2efbd --- /dev/null +++ b/build/bootstrap/calibnet.pi @@ -0,0 +1,4 @@ +/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWK1QYsm6iqyhgH7vqsbeoNoKHbT368h1JLHS1qYN36oyc +/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWKDyJZoPsNak1iYNN1GGmvGnvhyVbWBL6iusYfP3RpgYs +/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWJRSTnzABB6MYYEBbSTT52phQntVD1PpRTMh1xt9mh6yH +/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWQLi3kY6HnMYLUtwCe26zWMdNhniFgHVNn1DioQc7NiWv diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/mainnet.pi similarity index 100% rename from build/bootstrap/bootstrappers.pi rename to build/bootstrap/mainnet.pi diff --git a/build/genesis.go b/build/genesis.go index dc4ded273..812f5a9df 100644 --- a/build/genesis.go +++ b/build/genesis.go @@ -14,7 +14,7 @@ func MaybeGenesis() []byte { log.Warnf("loading built-in genesis: %s", err) return nil } - genBytes, err := builtinGen.Bytes("devnet.car") + genBytes, err := builtinGen.Bytes(GenesisFile) if err != nil { log.Warnf("loading built-in genesis: %s", err) } diff --git a/build/genesis/calibnet.car b/build/genesis/calibnet.car new file mode 100644 index 000000000..f8af39878 Binary files /dev/null and b/build/genesis/calibnet.car differ diff --git a/build/genesis/devnet.car b/build/genesis/mainnet.car similarity index 100% rename from build/genesis/devnet.car rename to build/genesis/mainnet.car diff --git a/build/isnearupgrade.go b/build/isnearupgrade.go new file mode 100644 index 000000000..4273f0e9e --- /dev/null +++ b/build/isnearupgrade.go @@ -0,0 +1,9 @@ +package build + +import ( + "github.com/filecoin-project/go-state-types/abi" +) + +func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool { + return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality +} diff --git a/build/params_2k.go b/build/params_2k.go index c86de7ffa..63f7e70b4 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -3,13 +3,13 @@ package build import ( - "math" - "os" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/policy" ) +const BootstrappersFile = "" +const GenesisFile = "" + const UpgradeBreezeHeight = -1 const BreezeGasTampingDuration = 0 @@ -18,12 +18,16 @@ const UpgradeIgnitionHeight = -2 const UpgradeRefuelHeight = -3 const UpgradeTapeHeight = -4 -var UpgradeActorsV2Height = abi.ChainEpoch(10) -var UpgradeLiftoffHeight = abi.ChainEpoch(-5) +const UpgradeActorsV2Height = 10 +const UpgradeLiftoffHeight = -5 const UpgradeKumquatHeight = 15 const UpgradeCalicoHeight = 20 const UpgradePersianHeight = 25 +const UpgradeOrangeHeight = 27 +const UpgradeClausHeight = 30 + +const UpgradeActorsV3Height = 35 var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, @@ -34,11 +38,6 @@ func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) - if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" { - UpgradeActorsV2Height = math.MaxInt64 - UpgradeLiftoffHeight = 11 - } - BuildType |= Build2k } diff --git a/build/params_calibnet.go b/build/params_calibnet.go new file mode 100644 index 000000000..d925cfb33 --- /dev/null +++ b/build/params_calibnet.go @@ -0,0 +1,70 @@ +// +build calibnet + +package build + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" +) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +const BootstrappersFile = "calibnet.pi" +const GenesisFile = "calibnet.car" + +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 120 + +const UpgradeSmokeHeight = -2 + +const UpgradeIgnitionHeight = -3 +const UpgradeRefuelHeight = -4 + +var UpgradeActorsV2Height = abi.ChainEpoch(30) + +const UpgradeTapeHeight = 60 + +// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier. +// Miners, clients, developers, custodians all need time to prepare. +// We still have upgrades and state changes to do, but can happen after signaling timing here. +const UpgradeLiftoffHeight = -5 + +const UpgradeKumquatHeight = 90 + +const UpgradeCalicoHeight = 92000 +const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60) + +// 2020-12-17T19:00:00Z +const UpgradeClausHeight = 161386 + +// 2021-01-17T19:00:00Z +const UpgradeOrangeHeight = 250666 + +// 2021-01-28T21:00:00Z +const UpgradeActorsV3Height = 282586 + +func init() { + policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 30)) + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg512MiBV1, + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + ) + + SetAddressNetwork(address.Testnet) + + Devnet = true + + BuildType = BuildCalibnet +} + +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 4 diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 3d8f5e374..277e0862b 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -1,11 +1,11 @@ // +build !debug // +build !2k // +build !testground +// +build !calibnet package build import ( - "math" "os" "github.com/filecoin-project/go-address" @@ -19,7 +19,11 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ UpgradeSmokeHeight: DrandMainnet, } +const BootstrappersFile = "mainnet.pi" +const GenesisFile = "mainnet.car" + const UpgradeBreezeHeight = 41280 + const BreezeGasTampingDuration = 120 const UpgradeSmokeHeight = 51000 @@ -27,7 +31,7 @@ const UpgradeSmokeHeight = 51000 const UpgradeIgnitionHeight = 94000 const UpgradeRefuelHeight = 130800 -var UpgradeActorsV2Height = abi.ChainEpoch(138720) +const UpgradeActorsV2Height = 138720 const UpgradeTapeHeight = 140760 @@ -41,6 +45,14 @@ const UpgradeKumquatHeight = 170000 const UpgradeCalicoHeight = 265200 const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60) +const UpgradeOrangeHeight = 336458 + +// 2020-12-22T02:00:00Z +const UpgradeClausHeight = 343200 + +// TODO +const UpgradeActorsV3Height = 999999999 + func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) @@ -48,11 +60,9 @@ func init() { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" { - UpgradeActorsV2Height = math.MaxInt64 - } - Devnet = false + + BuildType = BuildMainnet } const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 994c32934..5e2d18ff1 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024 // Consensus / Network const AllowableClockDriftSecs = uint64(1) -const NewestNetworkVersion = network.Version8 +const NewestNetworkVersion = network.Version9 const ActorUpgradeNetworkVersion = network.Version4 // Epochs diff --git a/build/params_testground.go b/build/params_testground.go index 0ee986a7c..759b29692 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -90,16 +90,22 @@ var ( UpgradeKumquatHeight abi.ChainEpoch = -6 UpgradeCalicoHeight abi.ChainEpoch = -7 UpgradePersianHeight abi.ChainEpoch = -8 + UpgradeOrangeHeight abi.ChainEpoch = -9 + UpgradeClausHeight abi.ChainEpoch = -10 + UpgradeActorsV3Height abi.ChainEpoch = -11 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } - NewestNetworkVersion = network.Version8 + NewestNetworkVersion = network.Version9 ActorUpgradeNetworkVersion = network.Version4 Devnet = true ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") + + BootstrappersFile = "" + GenesisFile = "" ) const BootstrapPeerThreshold = 1 diff --git a/build/version.go b/build/version.go index 895d92a49..fe9fc07c5 100644 --- a/build/version.go +++ b/build/version.go @@ -10,26 +10,32 @@ var CurrentCommit string var BuildType int const ( - BuildDefault = 0 - Build2k = 0x1 - BuildDebug = 0x3 + BuildDefault = 0 + BuildMainnet = 0x1 + Build2k = 0x2 + BuildDebug = 0x3 + BuildCalibnet = 0x4 ) func buildType() string { switch BuildType { case BuildDefault: return "" - case BuildDebug: - return "+debug" + case BuildMainnet: + return "+mainnet" case Build2k: return "+2k" + case BuildDebug: + return "+debug" + case BuildCalibnet: + return "+calibnet" default: return "+huh?" } } // BuildVersion is the local build version, set by build system -const BuildVersion = "1.2.2" +const BuildVersion = "1.4.1" func UserVersion() string { return BuildVersion + buildType() + CurrentCommit @@ -84,7 +90,7 @@ func VersionForType(nodeType NodeType) (Version, error) { // semver versions of the rpc api exposed var ( FullAPIVersion = newVer(1, 0, 0) - MinerAPIVersion = newVer(1, 0, 0) + MinerAPIVersion = newVer(1, 0, 1) WorkerAPIVersion = newVer(1, 0, 0) ) diff --git a/chain/actors/adt/adt.go b/chain/actors/adt/adt.go index 6a454ac26..084471bb8 100644 --- a/chain/actors/adt/adt.go +++ b/chain/actors/adt/adt.go @@ -2,16 +2,9 @@ package adt import ( "github.com/ipfs/go-cid" - "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/chain/actors" - - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" - adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" ) type Map interface { @@ -24,26 +17,6 @@ type Map interface { ForEach(v cbor.Unmarshaler, fn func(key string) error) error } -func AsMap(store Store, root cid.Cid, version actors.Version) (Map, error) { - switch version { - case actors.Version0: - return adt0.AsMap(store, root) - case actors.Version2: - return adt2.AsMap(store, root) - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} - -func NewMap(store Store, version actors.Version) (Map, error) { - switch version { - case actors.Version0: - return adt0.MakeEmptyMap(store), nil - case actors.Version2: - return adt2.MakeEmptyMap(store), nil - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} - type Array interface { Root() (cid.Cid, error) @@ -54,23 +27,3 @@ type Array interface { ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error } - -func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) { - switch actors.VersionForNetwork(version) { - case actors.Version0: - return adt0.AsArray(store, root) - case actors.Version2: - return adt2.AsArray(store, root) - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} - -func NewArray(store Store, version actors.Version) (Array, error) { - switch version { - case actors.Version0: - return adt0.MakeEmptyArray(store), nil - case actors.Version2: - return adt2.MakeEmptyArray(store), nil - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 38ed2654b..53a03e6f3 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -13,6 +13,7 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" ) func init() { @@ -22,9 +23,12 @@ func init() { builtin.RegisterActorState(builtin2.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } -var Methods = builtin2.MethodsAccount +var Methods = builtin3.MethodsAccount func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { @@ -32,6 +36,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return load0(store, act.Head) case builtin2.AccountActorCodeID: return load2(store, act.Head) + case builtin3.AccountActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/builtin/account/v3.go b/chain/actors/builtin/account/v3.go new file mode 100644 index 000000000..16b489a3e --- /dev/null +++ b/chain/actors/builtin/account/v3.go @@ -0,0 +1,30 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + account3.State + store adt.Store +} + +func (s *state3) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index afba8efe8..1e535dca4 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -2,12 +2,12 @@ package builtin import ( "github.com/filecoin-project/go-address" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing" "github.com/ipfs/go-cid" "golang.org/x/xerrors" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" @@ -15,9 +15,12 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" + smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing" + smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing" + smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" - smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing" ) var SystemActorAddr = builtin0.SystemActorAddr @@ -38,11 +41,12 @@ const ( ) const ( - MethodSend = builtin2.MethodSend - MethodConstructor = builtin2.MethodConstructor + MethodSend = builtin3.MethodSend + MethodConstructor = builtin3.MethodConstructor ) -// TODO: Why does actors have 2 different versions of this? +// These are all just type aliases across actor versions 0, 2, & 3. In the future, that might change +// and we might need to do something fancier. type SectorInfo = proof0.SectorInfo type PoStProof = proof0.PoStProof type FilterEstimate = smoothing0.FilterEstimate @@ -51,13 +55,17 @@ func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { return (FilterEstimate)(v0) } -// Doesn't change between actors v0 and v1 +// Doesn't change between actors v0, v2, and v3. func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) } -func FromV2FilterEstimate(v1 smoothing2.FilterEstimate) FilterEstimate { - return (FilterEstimate)(v1) +func FromV2FilterEstimate(v2 smoothing2.FilterEstimate) FilterEstimate { + return (FilterEstimate)(v2) +} + +func FromV3FilterEstimate(v3 smoothing3.FilterEstimate) FilterEstimate { + return (FilterEstimate)(v3) } type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) @@ -82,30 +90,42 @@ func ActorNameByCode(c cid.Cid) string { return builtin0.ActorNameByCode(c) case builtin2.IsBuiltinActor(c): return builtin2.ActorNameByCode(c) + case builtin3.IsBuiltinActor(c): + return builtin3.ActorNameByCode(c) default: return "" } } func IsBuiltinActor(c cid.Cid) bool { - return builtin0.IsBuiltinActor(c) || builtin2.IsBuiltinActor(c) + return builtin0.IsBuiltinActor(c) || + builtin2.IsBuiltinActor(c) || + builtin3.IsBuiltinActor(c) } func IsAccountActor(c cid.Cid) bool { - return c == builtin0.AccountActorCodeID || c == builtin2.AccountActorCodeID + return c == builtin0.AccountActorCodeID || + c == builtin2.AccountActorCodeID || + c == builtin3.AccountActorCodeID } func IsStorageMinerActor(c cid.Cid) bool { - return c == builtin0.StorageMinerActorCodeID || c == builtin2.StorageMinerActorCodeID + return c == builtin0.StorageMinerActorCodeID || + c == builtin2.StorageMinerActorCodeID || + c == builtin3.StorageMinerActorCodeID } func IsMultisigActor(c cid.Cid) bool { - return c == builtin0.MultisigActorCodeID || c == builtin2.MultisigActorCodeID + return c == builtin0.MultisigActorCodeID || + c == builtin2.MultisigActorCodeID || + c == builtin3.MultisigActorCodeID } func IsPaymentChannelActor(c cid.Cid) bool { - return c == builtin0.PaymentChannelActorCodeID || c == builtin2.PaymentChannelActorCodeID + return c == builtin0.PaymentChannelActorCodeID || + c == builtin2.PaymentChannelActorCodeID || + c == builtin3.PaymentChannelActorCodeID } func makeAddress(addr string) address.Address { diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 65bfd992f..284aad82e 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -1,10 +1,10 @@ package cron import ( - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" ) var ( - Address = builtin2.CronActorAddr - Methods = builtin2.MethodsCron + Address = builtin3.CronActorAddr + Methods = builtin3.MethodsCron ) diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 466af6a6c..f9e912768 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -15,6 +15,7 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" ) func init() { @@ -24,11 +25,14 @@ func init() { builtin.RegisterActorState(builtin2.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } var ( - Address = builtin2.InitActorAddr - Methods = builtin2.MethodsInit + Address = builtin3.InitActorAddr + Methods = builtin3.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -37,6 +41,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return load0(store, act.Head) case builtin2.InitActorCodeID: return load2(store, act.Head) + case builtin3.InitActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/builtin/init/v3.go b/chain/actors/builtin/init/v3.go new file mode 100644 index 000000000..e586b3b11 --- /dev/null +++ b/chain/actors/builtin/init/v3.go @@ -0,0 +1,86 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + init3.State + store adt.Store +} + +func (s *state3) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state3) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state3) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state3) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state3) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state3) Remove(addrs ...address.Address) (err error) { + m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state3) addressMap() (adt.Map, error) { + return adt3.AsMap(s.store, s.AddressMap, builtin3.DefaultHamtBitwidth) +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 8bb31f2b4..628b27e08 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -12,6 +12,7 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -25,11 +26,14 @@ func init() { builtin.RegisterActorState(builtin2.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } var ( - Address = builtin2.StorageMarketActorAddr - Methods = builtin2.MethodsMarket + Address = builtin3.StorageMarketActorAddr + Methods = builtin3.MethodsMarket ) func Load(store adt.Store, act *types.Actor) (st State, err error) { @@ -38,6 +42,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) { return load0(store, act.Head) case builtin2.StorageMarketActorCodeID: return load2(store, act.Head) + case builtin3.StorageMarketActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go new file mode 100644 index 000000000..3309d37a7 --- /dev/null +++ b/chain/actors/builtin/market/v3.go @@ -0,0 +1,205 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + market3.State + store adt.Store +} + +func (s *state3) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state3) BalancesChanged(otherState State) (bool, error) { + otherState2, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState2.State.EscrowTable) || !s.State.LockedTable.Equals(otherState2.State.LockedTable), nil +} + +func (s *state3) StatesChanged(otherState State) (bool, error) { + otherState2, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState2.State.States), nil +} + +func (s *state3) States() (DealStates, error) { + stateArray, err := adt3.AsArray(s.store, s.State.States, market3.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates3{stateArray}, nil +} + +func (s *state3) ProposalsChanged(otherState State) (bool, error) { + otherState2, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState2.State.Proposals), nil +} + +func (s *state3) Proposals() (DealProposals, error) { + proposalArray, err := adt3.AsArray(s.store, s.State.Proposals, market3.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals3{proposalArray}, nil +} + +func (s *state3) EscrowTable() (BalanceTable, error) { + bt, err := adt3.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable3{bt}, nil +} + +func (s *state3) LockedTable() (BalanceTable, error) { + bt, err := adt3.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable3{bt}, nil +} + +func (s *state3) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market3.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +type balanceTable3 struct { + *adt3.BalanceTable +} + +func (bt *balanceTable3) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt3.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates3 struct { + adt.Array +} + +func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal2 market3.DealState + found, err := s.Array.Get(uint64(dealID), &deal2) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV3DealState(deal2) + return &deal, true, nil +} + +func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds1 market3.DealState + return s.Array.ForEach(&ds1, func(idx int64) error { + return cb(abi.DealID(idx), fromV3DealState(ds1)) + }) +} + +func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) { + var ds1 market3.DealState + if err := ds1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV3DealState(ds1) + return &ds, nil +} + +func (s *dealStates3) array() adt.Array { + return s.Array +} + +func fromV3DealState(v1 market3.DealState) DealState { + return (DealState)(v1) +} + +type dealProposals3 struct { + adt.Array +} + +func (s *dealProposals3) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal2 market3.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal2) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV3DealProposal(proposal2) + return &proposal, true, nil +} + +func (s *dealProposals3) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp1 market3.DealProposal + return s.Array.ForEach(&dp1, func(idx int64) error { + return cb(abi.DealID(idx), fromV3DealProposal(dp1)) + }) +} + +func (s *dealProposals3) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp1 market3.DealProposal + if err := dp1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV3DealProposal(dp1) + return &dp, nil +} + +func (s *dealProposals3) array() adt.Array { + return s.Array +} + +func fromV3DealProposal(v1 market3.DealProposal) DealProposal { + return (DealProposal)(v1) +} diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 5821d092b..49a468efb 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -2,6 +2,7 @@ package miner import ( "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" @@ -20,6 +21,9 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" ) func init() { @@ -29,11 +33,14 @@ func init() { builtin.RegisterActorState(builtin2.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } -var Methods = builtin2.MethodsMiner +var Methods = builtin3.MethodsMiner -// Unchanged between v0 and v2 actors +// Unchanged between v0, v2, and v3 actors var WPoStProvingPeriod = miner0.WPoStProvingPeriod var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines var WPoStChallengeWindow = miner0.WPoStChallengeWindow @@ -42,12 +49,18 @@ var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff const MinSectorExpiration = miner0.MinSectorExpiration +// Not used / checked in v0 +var DeclarationsMax = miner2.DeclarationsMax +var AddressedSectorsMax = miner2.AddressedSectorsMax + func Load(store adt.Store, act *types.Actor) (st State, err error) { switch act.Code { case builtin0.StorageMinerActorCodeID: return load0(store, act.Head) case builtin2.StorageMinerActorCodeID: return load2(store, act.Head) + case builtin3.StorageMinerActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -91,9 +104,10 @@ type State interface { type Deadline interface { LoadPartition(idx uint64) (Partition, error) ForEachPartition(cb func(idx uint64, part Partition) error) error - PostSubmissions() (bitfield.BitField, error) + PartitionsPoSted() (bitfield.BitField, error) PartitionsChanged(Deadline) (bool, error) + DisputableProofCount() (uint64, error) } type Partition interface { @@ -137,6 +151,60 @@ type DeclareFaultsParams = miner0.DeclareFaultsParams type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams +type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams + +func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { + // We added support for the new proofs in network version 7, and removed support for the old + // ones in network version 8. + if nver < network.Version7 { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } + } + + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } +} + +func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil + default: + return -1, xerrors.Errorf("unknown proof type %d", proof) + } +} type MinerInfo struct { Owner address.Address // Must be an ID-address. @@ -146,7 +214,7 @@ type MinerInfo struct { WorkerChangeEpoch abi.ChainEpoch PeerId *peer.ID Multiaddrs []abi.Multiaddrs - SealProofType abi.RegisteredSealProof + WindowPoStProofType abi.RegisteredPoStProof SectorSize abi.SectorSize WindowPoStPartitionSectors uint64 ConsensusFaultElapsed abi.ChainEpoch diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 69160e4e0..ebe5cf085 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -297,6 +297,11 @@ func (s *state0) Info() (MinerInfo, error) { pid = &peerID } + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } + mi := MinerInfo{ Owner: info.Owner, Worker: info.Worker, @@ -307,7 +312,7 @@ func (s *state0) Info() (MinerInfo, error) { PeerId: pid, Multiaddrs: info.Multiaddrs, - SealProofType: info.SealProofType, + WindowPoStProofType: wpp, SectorSize: info.SectorSize, WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, ConsensusFaultElapsed: -1, @@ -382,10 +387,15 @@ func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) { return !d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil } -func (d *deadline0) PostSubmissions() (bitfield.BitField, error) { +func (d *deadline0) PartitionsPoSted() (bitfield.BitField, error) { return d.Deadline.PostSubmissions, nil } +func (d *deadline0) DisputableProofCount() (uint64, error) { + // field doesn't exist until v3 + return 0, nil +} + func (p *partition0) AllSectors() (bitfield.BitField, error) { return p.Partition.Sectors, nil } diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index 2c67484ea..79f984213 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -296,6 +296,11 @@ func (s *state2) Info() (MinerInfo, error) { pid = &peerID } + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } + mi := MinerInfo{ Owner: info.Owner, Worker: info.Worker, @@ -306,7 +311,7 @@ func (s *state2) Info() (MinerInfo, error) { PeerId: pid, Multiaddrs: info.Multiaddrs, - SealProofType: info.SealProofType, + WindowPoStProofType: wpp, SectorSize: info.SectorSize, WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, ConsensusFaultElapsed: info.ConsensusFaultElapsed, @@ -381,10 +386,15 @@ func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) { return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil } -func (d *deadline2) PostSubmissions() (bitfield.BitField, error) { +func (d *deadline2) PartitionsPoSted() (bitfield.BitField, error) { return d.Deadline.PostSubmissions, nil } +func (d *deadline2) DisputableProofCount() (uint64, error) { + // field doesn't exist until v3 + return 0, nil +} + func (p *partition2) AllSectors() (bitfield.BitField, error) { return p.Partition.Sectors, nil } diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go new file mode 100644 index 000000000..3379e720e --- /dev/null +++ b/chain/actors/builtin/miner/v3.go @@ -0,0 +1,434 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + miner3.State + store adt.Store +} + +type deadline3 struct { + miner3.Deadline + store adt.Store +} + +type partition3 struct { + miner3.Partition + store adt.Store +} + +func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state3) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state3) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state3) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state3) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state3) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state3) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV3SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state3) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state3) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner3.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner3.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner3.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner3.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV3SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner3.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info2 miner3.SectorOnChainInfo + if err := sectors.ForEach(&info2, func(_ int64) error { + info := fromV3SectorOnChainInfo(info2) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos2, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos2)) + for i, info2 := range infos2 { + info := fromV3SectorOnChainInfo(*info2) + infos[i] = &info + } + return infos, nil +} + +func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state3) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline3{*dl, s.store}, nil +} + +func (s *state3) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner3.Deadline) error { + return cb(i, &deadline3{*dl, s.store}) + }) +} + +func (s *state3) NumDeadlines() (uint64, error) { + return miner3.WPoStPeriodDeadlines, nil +} + +func (s *state3) DeadlinesChanged(other State) (bool, error) { + other2, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other2.Deadlines), nil +} + +func (s *state3) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state3) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state3) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.DeadlineInfo(epoch), nil +} + +func (s *state3) sectors() (adt.Array, error) { + return adt3.AsArray(s.store, s.Sectors, miner3.SectorsAmtBitwidth) +} + +func (s *state3) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner3.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV3SectorOnChainInfo(si), nil +} + +func (s *state3) precommits() (adt.Map, error) { + return adt3.AsMap(s.store, s.PreCommittedSectors, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner3.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV3SectorPreCommitOnChainInfo(sp), nil +} + +func (d *deadline3) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition3{*p, d.store}, nil +} + +func (d *deadline3) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner3.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition3{part, d.store}) + }) +} + +func (d *deadline3) PartitionsChanged(other Deadline) (bool, error) { + other2, ok := other.(*deadline3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil +} + +func (d *deadline3) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline3) DisputableProofCount() (uint64, error) { + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil +} + +func (p *partition3) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition3) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition3) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { + return SectorOnChainInfo{ + SectorNumber: v3.SectorNumber, + SealProof: v3.SealProof, + SealedCID: v3.SealedCID, + DealIDs: v3.DealIDs, + Activation: v3.Activation, + Expiration: v3.Expiration, + DealWeight: v3.DealWeight, + VerifiedDealWeight: v3.VerifiedDealWeight, + InitialPledge: v3.InitialPledge, + ExpectedDayReward: v3.ExpectedDayReward, + ExpectedStoragePledge: v3.ExpectedStoragePledge, + } +} + +func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v3.Info), + PreCommitDeposit: v3.PreCommitDeposit, + PreCommitEpoch: v3.PreCommitEpoch, + DealWeight: v3.DealWeight, + VerifiedDealWeight: v3.VerifiedDealWeight, + } +} diff --git a/chain/actors/builtin/multisig/message.go b/chain/actors/builtin/multisig/message.go index 3d2c66e6b..a43b919aa 100644 --- a/chain/actors/builtin/multisig/message.go +++ b/chain/actors/builtin/multisig/message.go @@ -9,14 +9,14 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" ) -var Methods = builtin2.MethodsMultisig +var Methods = builtin3.MethodsMultisig func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -24,6 +24,8 @@ func Message(version actors.Version, from address.Address) MessageBuilder { return message0{from} case actors.Version2: return message2{message0{from}} + case actors.Version3: + return message3{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -47,11 +49,11 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = multisig2.ProposalHashData -type ProposeReturn = multisig2.ProposeReturn +type ProposalHashData = multisig3.ProposalHashData +type ProposeReturn = multisig3.ProposeReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := multisig2.TxnIDParams{ID: multisig2.TxnID(id)} + params := multisig3.TxnIDParams{ID: multisig3.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) diff --git a/chain/actors/builtin/multisig/message3.go b/chain/actors/builtin/multisig/message3.go new file mode 100644 index 000000000..f5f6d8cdf --- /dev/null +++ b/chain/actors/builtin/multisig/message3.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message3 struct{ message0 } + +func (m message3) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig3.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init3.ExecParams{ + CodeCID: builtin3.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin3.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/state.go b/chain/actors/builtin/multisig/state.go index fea42ba5f..5f9fb6a52 100644 --- a/chain/actors/builtin/multisig/state.go +++ b/chain/actors/builtin/multisig/state.go @@ -12,6 +12,7 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -25,6 +26,9 @@ func init() { builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } func Load(store adt.Store, act *types.Actor) (State, error) { @@ -33,6 +37,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return load0(store, act.Head) case builtin2.MultisigActorCodeID: return load2(store, act.Head) + case builtin3.MultisigActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/builtin/multisig/state0.go b/chain/actors/builtin/multisig/state0.go index e6f9a9c36..5548b0a5c 100644 --- a/chain/actors/builtin/multisig/state0.go +++ b/chain/actors/builtin/multisig/state0.go @@ -13,8 +13,8 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" - multisig0 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" ) var _ State = (*state0)(nil) diff --git a/chain/actors/builtin/multisig/state3.go b/chain/actors/builtin/multisig/state3.go new file mode 100644 index 000000000..7ade2ab64 --- /dev/null +++ b/chain/actors/builtin/multisig/state3.go @@ -0,0 +1,95 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + msig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + msig3.State + store adt.Store +} + +func (s *state3) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state3) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state3) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state3) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state3) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state3) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state3) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt3.AsMap(s.store, s.State.PendingTxns, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig3.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) + }) +} + +func (s *state3) PendingTxnChanged(other State) (bool, error) { + other2, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other2.PendingTxns), nil +} + +func (s *state3) transactions() (adt.Map, error) { + return adt3.AsMap(s.store, s.PendingTxns, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig3.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} diff --git a/chain/actors/builtin/paych/message.go b/chain/actors/builtin/paych/message.go index 5709d4b23..39c091d45 100644 --- a/chain/actors/builtin/paych/message.go +++ b/chain/actors/builtin/paych/message.go @@ -8,10 +8,10 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" ) -var Methods = builtin2.MethodsPaych +var Methods = builtin3.MethodsPaych func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -19,6 +19,8 @@ func Message(version actors.Version, from address.Address) MessageBuilder { return message0{from} case actors.Version2: return message2{from} + case actors.Version3: + return message3{from} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } diff --git a/chain/actors/builtin/paych/message3.go b/chain/actors/builtin/paych/message3.go new file mode 100644 index 000000000..50503a140 --- /dev/null +++ b/chain/actors/builtin/paych/message3.go @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message3 struct{ from address.Address } + +func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych3.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init3.ExecParams{ + CodeCID: builtin3.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin3.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message3) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.Settle, + }, nil +} + +func (m message3) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/state.go b/chain/actors/builtin/paych/state.go index 20c7a74b7..accb96244 100644 --- a/chain/actors/builtin/paych/state.go +++ b/chain/actors/builtin/paych/state.go @@ -15,6 +15,7 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -28,6 +29,9 @@ func init() { builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } // Load returns an abstract copy of payment channel state, irregardless of actor version @@ -37,6 +41,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return load0(store, act.Head) case builtin2.PaymentChannelActorCodeID: return load2(store, act.Head) + case builtin3.PaymentChannelActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/builtin/paych/state3.go b/chain/actors/builtin/paych/state3.go new file mode 100644 index 000000000..14bb4cb61 --- /dev/null +++ b/chain/actors/builtin/paych/state3.go @@ -0,0 +1,104 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + paych3.State + store adt.Store + lsAmt *adt3.Array +} + +// Channel owner, who has funded the actor +func (s *state3) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state3) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state3) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state3) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state3) getOrLoadLsAmt() (*adt3.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt3.AsArray(s.store, s.State.LaneStates, paych3.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state3) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +// Iterate lane states +func (s *state3) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych3.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState3{ls}) + }) +} + +type laneState3 struct { + paych3.LaneState +} + +func (ls *laneState3) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState3) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index e0cf0d700..712fb0b98 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -16,6 +16,7 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" ) func init() { @@ -25,11 +26,14 @@ func init() { builtin.RegisterActorState(builtin2.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } var ( - Address = builtin2.StoragePowerActorAddr - Methods = builtin2.MethodsPower + Address = builtin3.StoragePowerActorAddr + Methods = builtin3.MethodsPower ) func Load(store adt.Store, act *types.Actor) (st State, err error) { @@ -38,6 +42,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) { return load0(store, act.Head) case builtin2.StoragePowerActorCodeID: return load2(store, act.Head) + case builtin3.StoragePowerActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/builtin/power/v3.go b/chain/actors/builtin/power/v3.go new file mode 100644 index 000000000..fd161dda5 --- /dev/null +++ b/chain/actors/builtin/power/v3.go @@ -0,0 +1,149 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + power3.State + store adt.Store +} + +func (s *state3) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state3) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state3) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state3) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power3.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state3) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state3) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV3FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state3) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state3) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state3) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power3.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state3) ClaimsChanged(other State) (bool, error) { + other2, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other2.State.Claims), nil +} + +func (s *state3) claims() (adt.Map, error) { + return adt3.AsMap(s.store, s.Claims, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power3.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV3Claim(ci), nil +} + +func fromV3Claim(v3 power3.Claim) Claim { + return Claim{ + RawBytePower: v3.RawBytePower, + QualityAdjPower: v3.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 952ca270b..156b3ec55 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/cbor" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -22,11 +23,14 @@ func init() { builtin.RegisterActorState(builtin2.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } var ( - Address = builtin2.RewardActorAddr - Methods = builtin2.MethodsReward + Address = builtin3.RewardActorAddr + Methods = builtin3.MethodsReward ) func Load(store adt.Store, act *types.Actor) (st State, err error) { @@ -35,6 +39,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) { return load0(store, act.Head) case builtin2.RewardActorCodeID: return load2(store, act.Head) + case builtin3.RewardActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/builtin/reward/v3.go b/chain/actors/builtin/reward/v3.go new file mode 100644 index 000000000..18bd58f8e --- /dev/null +++ b/chain/actors/builtin/reward/v3.go @@ -0,0 +1,86 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + reward3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/reward" + smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + reward3.State + store adt.Store +} + +func (s *state3) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state3) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil +} + +func (s *state3) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state3) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state3) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state3) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state3) CumsumBaseline() (reward3.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state3) CumsumRealized() (reward3.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state3) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner3.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing3.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state3) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner3.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing3.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} diff --git a/chain/actors/builtin/verifreg/util.go b/chain/actors/builtin/verifreg/util.go index 4136c0c30..16e50c50a 100644 --- a/chain/actors/builtin/verifreg/util.go +++ b/chain/actors/builtin/verifreg/util.go @@ -6,16 +6,21 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" ) -func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) { +// taking this as a function instead of asking the caller to call it helps reduce some of the error +// checking boilerplate. +// +// "go made me do it" +type rootFunc func() (adt.Map, error) + +// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth +func getDataCap(store adt.Store, ver actors.Version, root rootFunc, addr address.Address) (bool, abi.StoragePower, error) { if addr.Protocol() != address.ID { return false, big.Zero(), xerrors.Errorf("can only look up ID addresses") } - - vh, err := adt.AsMap(store, root, ver) + vh, err := root() if err != nil { return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err) } @@ -30,8 +35,9 @@ func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address. return true, dcap, nil } -func forEachCap(store adt.Store, ver actors.Version, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error { - vh, err := adt.AsMap(store, root, ver) +// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth +func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr address.Address, dcap abi.StoragePower) error) error { + vh, err := root() if err != nil { return xerrors.Errorf("loading verified clients: %w", err) } diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index 64def4706..0dc4696f4 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" ) var _ State = (*state0)(nil) @@ -32,17 +33,25 @@ func (s *state0) RootKey() (address.Address, error) { } func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr) + return getDataCap(s.store, actors.Version0, s.verifiedClients, addr) } func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version0, s.State.Verifiers, addr) + return getDataCap(s.store, actors.Version0, s.verifiers, addr) } func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version0, s.State.Verifiers, cb) + return forEachCap(s.store, actors.Version0, s.verifiers, cb) } func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version0, s.State.VerifiedClients, cb) + return forEachCap(s.store, actors.Version0, s.verifiedClients, cb) +} + +func (s *state0) verifiedClients() (adt.Map, error) { + return adt0.AsMap(s.store, s.VerifiedClients) +} + +func (s *state0) verifiers() (adt.Map, error) { + return adt0.AsMap(s.store, s.Verifiers) } diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index 5ee3bad05..a5ef84532 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" ) var _ State = (*state2)(nil) @@ -32,17 +33,25 @@ func (s *state2) RootKey() (address.Address, error) { } func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr) + return getDataCap(s.store, actors.Version2, s.verifiedClients, addr) } func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version2, s.State.Verifiers, addr) + return getDataCap(s.store, actors.Version2, s.verifiers, addr) } func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version2, s.State.Verifiers, cb) + return forEachCap(s.store, actors.Version2, s.verifiers, cb) } func (s *state2) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version2, s.State.VerifiedClients, cb) + return forEachCap(s.store, actors.Version2, s.verifiedClients, cb) +} + +func (s *state2) verifiedClients() (adt.Map, error) { + return adt2.AsMap(s.store, s.VerifiedClients) +} + +func (s *state2) verifiers() (adt.Map, error) { + return adt2.AsMap(s.store, s.Verifiers) } diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go new file mode 100644 index 000000000..fb0c46d0c --- /dev/null +++ b/chain/actors/builtin/verifreg/v3.go @@ -0,0 +1,58 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +type state3 struct { + verifreg3.State + store adt.Store +} + +func (s *state3) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state3) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version3, s.verifiedClients, addr) +} + +func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version3, s.verifiers, addr) +} + +func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version3, s.verifiers, cb) +} + +func (s *state3) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version3, s.verifiedClients, cb) +} + +func (s *state3) verifiedClients() (adt.Map, error) { + return adt3.AsMap(s.store, s.VerifiedClients, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) verifiers() (adt.Map, error) { + return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth) +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index a4468d8a0..4e3f3559b 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -3,6 +3,7 @@ package verifreg import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -22,11 +23,14 @@ func init() { builtin.RegisterActorState(builtin2.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + builtin.RegisterActorState(builtin3.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) } var ( - Address = builtin2.VerifiedRegistryActorAddr - Methods = builtin2.MethodsVerifiedRegistry + Address = builtin3.VerifiedRegistryActorAddr + Methods = builtin3.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -35,6 +39,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return load0(store, act.Head) case builtin2.VerifiedRegistryActorCodeID: return load2(store, act.Head) + case builtin3.VerifiedRegistryActorCodeID: + return load3(store, act.Head) } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index 31d83cd3d..e32b36743 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -6,21 +6,28 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/chain/actors" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych" + verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg" ) const ( - ChainFinality = miner0.ChainFinality + ChainFinality = miner3.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych2.SettleDelay + PaychSettleDelay = paych3.SettleDelay ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -31,6 +38,10 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner3.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner3.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner3.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + AddSupportedProofTypes(types...) } @@ -49,6 +60,13 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner3.PreCommitSealProofTypesV0[t] = struct{}{} + + miner3.PreCommitSealProofTypesV7[t] = struct{}{} + miner3.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner3.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} } } @@ -58,6 +76,7 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { // Set for all miner versions. miner0.PreCommitChallengeDelay = delay miner2.PreCommitChallengeDelay = delay + miner3.PreCommitChallengeDelay = delay } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. @@ -73,6 +92,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { for _, policy := range builtin2.SealProofPolicies { policy.ConsensusMinerMinPower = p } + + for _, policy := range builtin3.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -80,6 +103,7 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg0.MinVerifiedDealSize = size verifreg2.MinVerifiedDealSize = size + verifreg3.MinVerifiedDealSize = size } func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { @@ -88,6 +112,8 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) ab return miner0.MaxSealDuration[t] case actors.Version2: return miner2.MaxProveCommitDuration[t] + case actors.Version3: + return miner3.MaxProveCommitDuration[t] default: panic("unsupported actors version") } @@ -103,6 +129,8 @@ func DealProviderCollateralBounds( return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) case actors.Version2: return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + case actors.Version3: + return market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) default: panic("unsupported network version") } @@ -116,6 +144,12 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { miner2.WPoStChallengeWindow = period miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines) + + miner3.WPoStChallengeWindow = period + miner3.WPoStProvingPeriod = period * abi.ChainEpoch(miner3.WPoStPeriodDeadlines) + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner3.WPoStDisputeWindow = period * 30 } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -132,17 +166,17 @@ func GetMaxSectorExpirationExtension() abi.ChainEpoch { // TODO: we'll probably need to abstract over this better in the future. func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin2.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin3.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } - return int(miner2.AddressedSectorsMax / sectorsPerPart), nil + return int(miner3.AddressedSectorsMax / sectorsPerPart), nil } func GetDefaultSectorSize() abi.SectorSize { // supported sector sizes are the same across versions. - szs := make([]abi.SectorSize, 0, len(miner2.PreCommitSealProofTypesV8)) - for spt := range miner2.PreCommitSealProofTypesV8 { + szs := make([]abi.SectorSize, 0, len(miner3.PreCommitSealProofTypesV8)) + for spt := range miner3.PreCommitSealProofTypesV8 { ss, err := spt.SectorSize() if err != nil { panic(err) diff --git a/chain/actors/version.go b/chain/actors/version.go index 1cafa45c9..763c5a42c 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -11,6 +11,7 @@ type Version int const ( Version0 Version = 0 Version2 Version = 2 + Version3 Version = 3 ) // Converts a network version into an actors adt version. @@ -18,8 +19,10 @@ func VersionForNetwork(version network.Version) Version { switch version { case network.Version0, network.Version1, network.Version2, network.Version3: return Version0 - case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8: + case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9: return Version2 + case network.Version10: + return Version3 default: panic(fmt.Sprintf("unsupported network version %d", version)) } diff --git a/chain/exchange/server.go b/chain/exchange/server.go index 31eec46ca..7c1624e57 100644 --- a/chain/exchange/server.go +++ b/chain/exchange/server.go @@ -56,7 +56,11 @@ func (s *server) HandleStream(stream inet.Stream) { } _ = stream.SetDeadline(time.Now().Add(WriteResDeadline)) - if err := cborutil.WriteCborRPC(stream, resp); err != nil { + buffered := bufio.NewWriter(stream) + if err = cborutil.WriteCborRPC(buffered, resp); err == nil { + err = buffered.Flush() + } + if err != nil { _ = stream.SetDeadline(time.Time{}) log.Warnw("failed to write back response for handle stream", "err", err, "peer", stream.Conn().RemotePeer()) diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 8e749095c..1ad8dad6d 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -14,7 +14,6 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/google/uuid" - block "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" offline "github.com/ipfs/go-ipfs-exchange-offline" @@ -85,19 +84,6 @@ type ChainGen struct { lr repo.LockedRepo } -type mybs struct { - blockstore.Blockstore -} - -func (m mybs) Get(c cid.Cid) (block.Block, error) { - b, err := m.Blockstore.Get(c) - if err != nil { - return nil, err - } - - return b, nil -} - var rootkeyMultisig = genesis.MultisigMeta{ Signers: []address.Address{remAccTestKey}, Threshold: 1, @@ -152,8 +138,6 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { } }() - bs = mybs{bs} - ks, err := lr.KeyStore() if err != nil { return nil, xerrors.Errorf("getting repo keystore failed: %w", err) @@ -465,7 +449,12 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners } } - return store.NewFullTipSet(blks), nil + fts := store.NewFullTipSet(blks) + if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil { + return nil, err + } + + return fts, nil } func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, diff --git a/chain/gen/mining.go b/chain/gen/mining.go index cca4b6169..5de0fec0e 100644 --- a/chain/gen/mining.go +++ b/chain/gen/mining.go @@ -9,10 +9,10 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs/bls" ) func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletAPI, bt *api.BlockTemplate) (*types.FullBlock, error) { @@ -140,35 +140,29 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA } func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) { - sigsS := make([][]byte, len(sigs)) + sigsS := make([]ffi.Signature, len(sigs)) for i := 0; i < len(sigs); i++ { - sigsS[i] = sigs[i].Data + copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes]) } - aggregator := new(bls.AggregateSignature).AggregateCompressed(sigsS) - if aggregator == nil { + aggSig := ffi.Aggregate(sigsS) + if aggSig == nil { if len(sigs) > 0 { return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs)) } + zeroSig := ffi.CreateZeroSignature() + // Note: for blst this condition should not happen - nil should not // be returned return &crypto.Signature{ Type: crypto.SigTypeBLS, - Data: new(bls.Signature).Compress(), + Data: zeroSig[:], }, nil } - aggSigAff := aggregator.ToAffine() - if aggSigAff == nil { - return &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: new(bls.Signature).Compress(), - }, nil - } - aggSig := aggSigAff.Compress() return &crypto.Signature{ Type: crypto.SigTypeBLS, - Data: aggSig, + Data: aggSig[:], }, nil } diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go index ee0435156..5edcd5439 100644 --- a/chain/gen/slashfilter/slashfilter.go +++ b/chain/gen/slashfilter/slashfilter.go @@ -3,6 +3,8 @@ package slashfilter import ( "fmt" + "github.com/filecoin-project/lotus/build" + "golang.org/x/xerrors" "github.com/ipfs/go-cid" @@ -26,6 +28,10 @@ func New(dstore ds.Batching) *SlashFilter { } func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { + if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) { + return nil + } + epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) { // double-fork mining (2 blocks at one epoch) diff --git a/chain/market/fundmanager.go b/chain/market/fundmanager.go index 5df7589fa..99f8b9173 100644 --- a/chain/market/fundmanager.go +++ b/chain/market/fundmanager.go @@ -2,6 +2,7 @@ package market import ( "context" + "fmt" "sync" "github.com/filecoin-project/go-address" @@ -129,6 +130,11 @@ func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Addres return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt) } +// GetReserved returns the amount that is currently reserved for the address +func (fm *FundManager) GetReserved(addr address.Address) abi.TokenAmount { + return fm.getFundedAddress(addr).getReserved() +} + // FundedAddressState keeps track of the state of an address with funds in the // datastore type FundedAddressState struct { @@ -147,7 +153,7 @@ type fundedAddress struct { env *fundManagerEnvironment str *Store - lk sync.Mutex + lk sync.RWMutex state *FundedAddressState // Note: These request queues are ephemeral, they are not saved to store @@ -183,6 +189,13 @@ func (a *fundedAddress) start() { } } +func (a *fundedAddress) getReserved() abi.TokenAmount { + a.lk.RLock() + defer a.lk.RUnlock() + + return a.state.AmtReserved +} + func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) { return a.requestAndWait(ctx, wallet, amt, &a.reservations) } @@ -501,7 +514,13 @@ func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid c // request with an error newWithdrawalAmt := types.BigAdd(withdrawalAmt, amt) if newWithdrawalAmt.GreaterThan(netAvail) { - err := xerrors.Errorf("insufficient funds for withdrawal of %d", amt) + msg := fmt.Sprintf("insufficient funds for withdrawal of %s: ", types.FIL(amt)) + msg += fmt.Sprintf("net available (%s) = available (%s) - reserved (%s)", + types.FIL(types.BigSub(netAvail, withdrawalAmt)), types.FIL(avail), types.FIL(a.state.AmtReserved)) + if !withdrawalAmt.IsZero() { + msg += fmt.Sprintf(" - queued withdrawals (%s)", types.FIL(withdrawalAmt)) + } + err := xerrors.Errorf(msg) a.debugf("%s", err) req.Complete(cid.Undef, err) continue diff --git a/chain/state/statetree.go b/chain/state/statetree.go index 7fa55b31c..46a13ccc6 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -20,6 +20,10 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" + + states0 "github.com/filecoin-project/specs-actors/actors/states" + states2 "github.com/filecoin-project/specs-actors/v2/actors/states" + states3 "github.com/filecoin-project/specs-actors/v3/actors/states" ) var log = logging.Logger("statetree") @@ -144,23 +148,12 @@ func VersionForNetwork(ver network.Version) types.StateTreeVersion { return types.StateTreeVersion1 } -func adtForSTVersion(ver types.StateTreeVersion) actors.Version { - switch ver { - case types.StateTreeVersion0: - return actors.Version0 - case types.StateTreeVersion1: - return actors.Version2 - default: - panic("unhandled state tree version") - } -} - func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, error) { var info cid.Cid switch ver { case types.StateTreeVersion0: // info is undefined - case types.StateTreeVersion1: + case types.StateTreeVersion1, types.StateTreeVersion2: var err error info, err = cst.Put(context.TODO(), new(types.StateInfo0)) if err != nil { @@ -169,13 +162,34 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e default: return nil, xerrors.Errorf("unsupported state tree version: %d", ver) } - root, err := adt.NewMap(adt.WrapStore(context.TODO(), cst), adtForSTVersion(ver)) - if err != nil { - return nil, err + + store := adt.WrapStore(context.TODO(), cst) + var hamt adt.Map + switch ver { + case types.StateTreeVersion0: + tree, err := states0.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + case types.StateTreeVersion1: + tree, err := states2.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + case types.StateTreeVersion2: + tree, err := states3.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + default: + return nil, xerrors.Errorf("unsupported state tree version: %d", ver) } s := &StateTree{ - root: root, + root: hamt, info: info, version: ver, Store: cst, @@ -194,30 +208,49 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { root.Version = types.StateTreeVersion0 } - switch root.Version { - case types.StateTreeVersion0, types.StateTreeVersion1: - // Load the actual state-tree HAMT. - nd, err := adt.AsMap( - adt.WrapStore(context.TODO(), cst), root.Actors, - adtForSTVersion(root.Version), - ) - if err != nil { - log.Errorf("loading hamt node %s failed: %s", c, err) - return nil, err - } + store := adt.WrapStore(context.TODO(), cst) - s := &StateTree{ - root: nd, - info: root.Info, - version: root.Version, - Store: cst, - snaps: newStateSnaps(), + var ( + hamt adt.Map + err error + ) + switch root.Version { + case types.StateTreeVersion0: + var tree *states0.Tree + tree, err = states0.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case types.StateTreeVersion1: + var tree *states2.Tree + tree, err = states2.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case types.StateTreeVersion2: + var tree *states3.Tree + tree, err = states3.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map } - s.lookupIDFun = s.lookupIDinternal - return s, nil default: return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) } + if err != nil { + log.Errorf("failed to load state tree: %s", err) + return nil, xerrors.Errorf("failed to load state tree: %w", err) + } + + s := &StateTree{ + root: hamt, + info: root.Info, + version: root.Version, + Store: cst, + snaps: newStateSnaps(), + } + s.lookupIDFun = s.lookupIDinternal + + return s, nil } func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error { diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 3bdf23f59..b36f2c0bd 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -4,7 +4,12 @@ import ( "bytes" "context" "encoding/binary" - "math" + "runtime" + "sort" + "sync" + "time" + + "github.com/filecoin-project/go-state-types/rt" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -29,29 +34,95 @@ import ( adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/v2/actors/migration/nv4" "github.com/filecoin-project/specs-actors/v2/actors/migration/nv7" + "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "golang.org/x/xerrors" ) -// UpgradeFunc is a migration function run at every upgrade. +// MigrationCache can be used to cache information used by a migration. This is primarily useful to +// "pre-compute" some migration state ahead of time, and make it accessible in the migration itself. +type MigrationCache interface { + Write(key string, value cid.Cid) error + Read(key string) (bool, cid.Cid, error) + Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error) +} + +// MigrationFunc is a migration function run at every upgrade. // +// - The cache is a per-upgrade cache, pre-populated by pre-migrations. // - The oldState is the state produced by the upgrade epoch. // - The returned newState is the new state that will be used by the next epoch. // - The height is the upgrade epoch height (already executed). // - The tipset is the tipset for the last non-null block before the upgrade. Do // not assume that ts.Height() is the upgrade height. -type UpgradeFunc func(ctx context.Context, sm *StateManager, cb ExecCallback, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) +type MigrationFunc func( + ctx context.Context, + sm *StateManager, cache MigrationCache, + cb ExecCallback, oldState cid.Cid, + height abi.ChainEpoch, ts *types.TipSet, +) (newState cid.Cid, err error) + +// PreMigrationFunc is a function run _before_ a network upgrade to pre-compute part of the network +// upgrade and speed it up. +type PreMigrationFunc func( + ctx context.Context, + sm *StateManager, cache MigrationCache, + oldState cid.Cid, + height abi.ChainEpoch, ts *types.TipSet, +) error + +// PreMigration describes a pre-migration step to prepare for a network state upgrade. Pre-migrations +// are optimizations, are not guaranteed to run, and may be canceled and/or run multiple times. +type PreMigration struct { + // PreMigration is the pre-migration function to run at the specified time. This function is + // run asynchronously and must abort promptly when canceled. + PreMigration PreMigrationFunc + + // StartWithin specifies that this pre-migration should be started at most StartWithin + // epochs before the upgrade. + StartWithin abi.ChainEpoch + + // DontStartWithin specifies that this pre-migration should not be started DontStartWithin + // epochs before the final upgrade epoch. + // + // This should be set such that the pre-migration is likely to complete before StopWithin. + DontStartWithin abi.ChainEpoch + + // StopWithin specifies that this pre-migration should be stopped StopWithin epochs of the + // final upgrade epoch. + StopWithin abi.ChainEpoch +} type Upgrade struct { Height abi.ChainEpoch Network network.Version Expensive bool - Migration UpgradeFunc + Migration MigrationFunc + + // PreMigrations specifies a set of pre-migration functions to run at the indicated epochs. + // These functions should fill the given cache with information that can speed up the + // eventual full migration at the upgrade epoch. + PreMigrations []PreMigration } type UpgradeSchedule []Upgrade +type migrationLogger struct{} + +func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) { + switch level { + case rt.DEBUG: + log.Debugf(msg, args...) + case rt.INFO: + log.Infof(msg, args...) + case rt.WARN: + log.Warnf(msg, args...) + case rt.ERROR: + log.Errorf(msg, args...) + } +} + func DefaultUpgradeSchedule() UpgradeSchedule { var us UpgradeSchedule @@ -96,32 +167,28 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Height: build.UpgradePersianHeight, Network: network.Version8, Migration: nil, + }, { + Height: build.UpgradeOrangeHeight, + Network: network.Version9, + Migration: nil, + }, { + Height: build.UpgradeActorsV3Height, + Network: network.Version10, + Migration: UpgradeActorsV3, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV3, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV3, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, }} - if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade - updates = []Upgrade{{ - Height: build.UpgradeBreezeHeight, - Network: network.Version1, - Migration: UpgradeFaucetBurnRecovery, - }, { - Height: build.UpgradeSmokeHeight, - Network: network.Version2, - Migration: nil, - }, { - Height: build.UpgradeIgnitionHeight, - Network: network.Version3, - Migration: UpgradeIgnition, - }, { - Height: build.UpgradeRefuelHeight, - Network: network.Version3, - Migration: UpgradeRefuel, - }, { - Height: build.UpgradeLiftoffHeight, - Network: network.Version3, - Migration: UpgradeLiftoff, - }} - } - for _, u := range updates { if u.Height < 0 { // upgrade disabled @@ -133,14 +200,43 @@ func DefaultUpgradeSchedule() UpgradeSchedule { } func (us UpgradeSchedule) Validate() error { - // Make sure we're not trying to upgrade to version 0. + // Make sure each upgrade is valid. for _, u := range us { if u.Network <= 0 { return xerrors.Errorf("cannot upgrade to version <= 0: %d", u.Network) } + + for _, m := range u.PreMigrations { + if m.StartWithin <= 0 { + return xerrors.Errorf("pre-migration must specify a positive start-within epoch") + } + + if m.DontStartWithin < 0 || m.StopWithin < 0 { + return xerrors.Errorf("pre-migration must specify non-negative epochs") + } + + if m.StartWithin <= m.StopWithin { + return xerrors.Errorf("pre-migration start-within must come before stop-within") + } + + // If we have a dont-start-within. + if m.DontStartWithin != 0 { + if m.DontStartWithin < m.StopWithin { + return xerrors.Errorf("pre-migration dont-start-within must come before stop-within") + } + if m.StartWithin <= m.DontStartWithin { + return xerrors.Errorf("pre-migration start-within must come after dont-start-within") + } + } + } + if !sort.SliceIsSorted(u.PreMigrations, func(i, j int) bool { + return u.PreMigrations[i].StartWithin > u.PreMigrations[j].StartWithin //nolint:scopelint,gosec + }) { + return xerrors.Errorf("pre-migrations must be sorted by start epoch") + } } - // Make sure all the upgrades make sense. + // Make sure the upgrade order makes sense. for i := 1; i < len(us); i++ { prev := &us[i-1] curr := &us[i] @@ -162,12 +258,26 @@ func (us UpgradeSchedule) Validate() error { func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) { retCid := root var err error - f, ok := sm.stateMigrations[height] - if ok { - retCid, err = f(ctx, sm, cb, root, height, ts) + u := sm.stateMigrations[height] + if u != nil && u.upgrade != nil { + startTime := time.Now() + log.Warnw("STARTING migration", "height", height) + // Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may + // have to migrate multiple times. + tmpCache := u.cache.Clone() + retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts) if err != nil { + log.Errorw("FAILED migration", "height", height, "error", err) return cid.Undef, err } + // Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This + // can save us a _lot_ of time because very few actors will have changed if we + // do a small revert then need to re-run the migration. + u.cache.Update(tmpCache) + log.Warnw("COMPLETED migration", + "height", height, + "duration", time.Since(startTime), + ) } return retCid, nil @@ -178,6 +288,109 @@ func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEp return ok } +func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) { + height := ts.Height() + parent := ts.ParentState() + + startTime := time.Now() + + log.Warn("STARTING pre-migration") + // Clone the cache so we don't actually _update_ it + // till we're done. Otherwise, if we fail, the next + // migration to use the cache may assume that + // certain blocks exist, even if they don't. + tmpCache := cache.Clone() + err := fn(ctx, sm, tmpCache, parent, height, ts) + if err != nil { + log.Errorw("FAILED pre-migration", "error", err) + return + } + // Finally, if everything worked, update the cache. + cache.Update(tmpCache) + log.Warnw("COMPLETED pre-migration", "duration", time.Since(startTime)) +} + +func (sm *StateManager) preMigrationWorker(ctx context.Context) { + defer close(sm.shutdown) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + type op struct { + after abi.ChainEpoch + notAfter abi.ChainEpoch + run func(ts *types.TipSet) + } + + var wg sync.WaitGroup + defer wg.Wait() + + // Turn each pre-migration into an operation in a schedule. + var schedule []op + for upgradeEpoch, migration := range sm.stateMigrations { + cache := migration.cache + for _, prem := range migration.preMigrations { + preCtx, preCancel := context.WithCancel(ctx) + migrationFunc := prem.PreMigration + + afterEpoch := upgradeEpoch - prem.StartWithin + notAfterEpoch := upgradeEpoch - prem.DontStartWithin + stopEpoch := upgradeEpoch - prem.StopWithin + // We can't start after we stop. + if notAfterEpoch > stopEpoch { + notAfterEpoch = stopEpoch - 1 + } + + // Add an op to start a pre-migration. + schedule = append(schedule, op{ + after: afterEpoch, + notAfter: notAfterEpoch, + + // TODO: are these values correct? + run: func(ts *types.TipSet) { + wg.Add(1) + go func() { + defer wg.Done() + runPreMigration(preCtx, sm, migrationFunc, cache, ts) + }() + }, + }) + + // Add an op to cancel the pre-migration if it's still running. + schedule = append(schedule, op{ + after: stopEpoch, + notAfter: -1, + run: func(ts *types.TipSet) { preCancel() }, + }) + } + } + + // Then sort by epoch. + sort.Slice(schedule, func(i, j int) bool { + return schedule[i].after < schedule[j].after + }) + + // Finally, when the head changes, see if there's anything we need to do. + // + // We're intentionally ignoring reorgs as they don't matter for our purposes. + for change := range sm.cs.SubHeadChanges(ctx) { + for _, head := range change { + for len(schedule) > 0 { + op := &schedule[0] + if head.Val.Height() < op.after { + break + } + + // If we haven't passed the pre-migration height... + if op.notAfter < 0 || head.Val.Height() < op.notAfter { + op.run(head.Val) + } + schedule = schedule[1:] + } + } + } +} + func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error { fromAct, err := tree.GetActor(from) if err != nil { @@ -231,7 +444,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo return nil } -func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { // Some initial parameters FundsForMiners := types.FromFil(1_000_000) LookbackEpoch := abi.ChainEpoch(32000) @@ -517,7 +730,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal return tree.Flush(ctx) } -func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { store := sm.cs.Store(ctx) if build.UpgradeLiftoffHeight <= epoch { @@ -572,7 +785,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo return tree.Flush(ctx) } -func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { store := sm.cs.Store(ctx) tree, err := sm.StateTree(root) @@ -598,7 +811,7 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root return tree.Flush(ctx) } -func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync()) store := store.ActorStore(ctx, buf) @@ -644,7 +857,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo return newRoot, nil } -func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { tree, err := sm.StateTree(root) if err != nil { return cid.Undef, xerrors.Errorf("getting state tree: %w", err) @@ -658,7 +871,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root return tree.Flush(ctx) } -func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { store := sm.cs.Store(ctx) var stateRoot types.StateRoot if err := store.Get(ctx, root, &stateRoot); err != nil { @@ -700,6 +913,98 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root return newRoot, nil } +func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv10.Config{MaxWorkers: uint(workerCount)} + newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err) + } + + // perform some basic sanity checks to make sure everything still works. + store := store.ActorStore(ctx, sm.ChainStore().Blockstore()) + if newSm, err := state.LoadStateTree(store, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err) + } else if newRoot2, err := newSm.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err) + } else if newRoot2 != newRoot { + return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) + } else if _, err := newSm.GetActor(init_.Address); err != nil { + return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv10.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV3Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv10.Config, +) (cid.Cid, error) { + buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion1 { + return cid.Undef, xerrors.Errorf( + "expected state root version 1 for actors v3 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion2, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error { ia, err := tree.GetActor(builtin0.InitActorAddr) if err != nil { diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index a2b7a179f..95e7ef699 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "sync" "testing" "github.com/ipfs/go-cid" @@ -122,7 +123,7 @@ func TestForkHeightTriggers(t *testing.T) { cg.ChainStore(), UpgradeSchedule{{ Network: 1, Height: testForkHeight, - Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { cst := ipldcbor.NewCborStore(sm.ChainStore().Blockstore()) @@ -252,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) { Network: 1, Expensive: true, Height: testForkHeight, - Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { return root, nil }}}) @@ -317,3 +318,166 @@ func TestForkRefuseCall(t *testing.T) { } } } + +func TestForkPreMigration(t *testing.T) { + logging.SetAllLoggers(logging.LevelInfo) + + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + fooCid, err := abi.CidBuilder.Sum([]byte("foo")) + require.NoError(t, err) + + barCid, err := abi.CidBuilder.Sum([]byte("bar")) + require.NoError(t, err) + + failCid, err := abi.CidBuilder.Sum([]byte("fail")) + require.NoError(t, err) + + var wait20 sync.WaitGroup + wait20.Add(3) + + wasCanceled := make(chan struct{}) + + checkCache := func(t *testing.T, cache MigrationCache) { + found, value, err := cache.Read("foo") + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fooCid, value) + + found, value, err = cache.Read("bar") + require.NoError(t, err) + require.True(t, found) + require.Equal(t, barCid, value) + + found, _, err = cache.Read("fail") + require.NoError(t, err) + require.False(t, found) + } + + counter := make(chan struct{}, 10) + + sm, err := NewStateManagerWithUpgradeSchedule( + cg.ChainStore(), UpgradeSchedule{{ + Network: 1, + Height: testForkHeight, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, + root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + + // Make sure the test that should be canceled, is canceled. + select { + case <-wasCanceled: + case <-ctx.Done(): + return cid.Undef, ctx.Err() + } + + // the cache should be setup correctly. + checkCache(t, cache) + + counter <- struct{}{} + + return root, nil + }, + PreMigrations: []PreMigration{{ + StartWithin: 20, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + wait20.Done() + wait20.Wait() + + err := cache.Write("foo", fooCid) + require.NoError(t, err) + + counter <- struct{}{} + + return nil + }, + }, { + StartWithin: 20, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + wait20.Done() + wait20.Wait() + + err := cache.Write("bar", barCid) + require.NoError(t, err) + + counter <- struct{}{} + + return nil + }, + }, { + StartWithin: 20, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + wait20.Done() + wait20.Wait() + + err := cache.Write("fail", failCid) + require.NoError(t, err) + + counter <- struct{}{} + + // Fail this migration. The cached entry should not be persisted. + return fmt.Errorf("failed") + }, + }, { + StartWithin: 15, + StopWithin: 5, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + + <-ctx.Done() + close(wasCanceled) + + counter <- struct{}{} + + return nil + }, + }, { + StartWithin: 10, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + + checkCache(t, cache) + + counter <- struct{}{} + + return nil + }, + }}}, + }) + if err != nil { + t.Fatal(err) + } + require.NoError(t, sm.Start(context.Background())) + defer func() { + require.NoError(t, sm.Stop(context.Background())) + }() + + inv := vm.NewActorRegistry() + inv.Register(nil, testActor{}) + + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { + nvm, err := vm.NewVM(ctx, vmopt) + if err != nil { + return nil, err + } + nvm.SetInvoker(inv) + return nvm, nil + }) + + cg.SetStateManager(sm) + + for i := 0; i < 50; i++ { + _, err := cg.NextTipSet() + if err != nil { + t.Fatal(err) + } + } + // We have 5 pre-migration steps, and the migration. They should all have written something + // to this channel. + require.Equal(t, 6, len(counter)) +} diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 10c71d8dc..62f2ec04c 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -20,6 +20,10 @@ import ( // Used for genesis. msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" + + // we use the same adt for all receipts + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -42,6 +46,7 @@ import ( ) const LookbackNoLimit = abi.ChainEpoch(-1) +const ReceiptAmtBitwidth = 3 var log = logging.Logger("statemgr") @@ -58,15 +63,24 @@ type versionSpec struct { atOrBelow abi.ChainEpoch } +type migration struct { + upgrade MigrationFunc + preMigrations []PreMigration + cache *nv10.MemMigrationCache +} + type StateManager struct { cs *store.ChainStore + cancel context.CancelFunc + shutdown chan struct{} + // Determines the network version at any given epoch. networkVersions []versionSpec latestVersion network.Version - // Maps chain epochs to upgrade functions. - stateMigrations map[abi.ChainEpoch]UpgradeFunc + // Maps chain epochs to migrations. + stateMigrations map[abi.ChainEpoch]*migration // A set of potentially expensive/time consuming upgrades. Explicit // calls for, e.g., gas estimation fail against this epoch with // ErrExpensiveFork. @@ -99,7 +113,7 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule return nil, err } - stateMigrations := make(map[abi.ChainEpoch]UpgradeFunc, len(us)) + stateMigrations := make(map[abi.ChainEpoch]*migration, len(us)) expensiveUpgrades := make(map[abi.ChainEpoch]struct{}, len(us)) var networkVersions []versionSpec lastVersion := network.Version0 @@ -107,8 +121,13 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule // If we have any upgrades, process them and create a version // schedule. for _, upgrade := range us { - if upgrade.Migration != nil { - stateMigrations[upgrade.Height] = upgrade.Migration + if upgrade.Migration != nil || upgrade.PreMigrations != nil { + migration := &migration{ + upgrade: upgrade.Migration, + preMigrations: upgrade.PreMigrations, + cache: nv10.NewMemMigrationCache(), + } + stateMigrations[upgrade.Height] = migration } if upgrade.Expensive { expensiveUpgrades[upgrade.Height] = struct{}{} @@ -144,6 +163,33 @@ func cidsToKey(cids []cid.Cid) string { return out } +// Start starts the state manager's optional background processes. At the moment, this schedules +// pre-migration functions to run ahead of network upgrades. +// +// This method is not safe to invoke from multiple threads or concurrently with Stop. +func (sm *StateManager) Start(context.Context) error { + var ctx context.Context + ctx, sm.cancel = context.WithCancel(context.Background()) + sm.shutdown = make(chan struct{}) + go sm.preMigrationWorker(ctx) + return nil +} + +// Stop starts the state manager's background processes. +// +// This method is not safe to invoke concurrently with Start. +func (sm *StateManager) Stop(ctx context.Context) error { + if sm.cancel != nil { + sm.cancel() + select { + case <-sm.shutdown: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} + func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) { ctx, span := trace.StartSpan(ctx, "tipSetState") defer span.End() @@ -384,11 +430,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp return cid.Cid{}, cid.Cid{}, err } - // XXX: Is the height correct? Or should it be epoch-1? - rectarr, err := adt.NewArray(sm.cs.Store(ctx), actors.VersionForNetwork(sm.GetNtwkVersion(ctx, epoch))) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to create receipts amt: %w", err) - } + rectarr := blockadt.MakeEmptyArray(sm.cs.Store(ctx)) for i, receipt := range receipts { if err := rectarr.Set(uint64(i), receipt); err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) @@ -473,13 +515,26 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad ts = sm.cs.GetHeaviestTipSet() } + cst := cbor.NewCborStore(sm.cs.Blockstore()) + + // First try to resolve the actor in the parent state, so we don't have to compute anything. + tree, err := state.LoadStateTree(cst, ts.ParentState()) + if err != nil { + return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err) + } + + resolved, err := vm.ResolveToKeyAddr(tree, cst, addr) + if err == nil { + return resolved, nil + } + + // If that fails, compute the tip-set and try again. st, _, err := sm.TipSetState(ctx, ts) if err != nil { return address.Undef, xerrors.Errorf("resolve address failed to get tipset state: %w", err) } - cst := cbor.NewCborStore(sm.cs.Blockstore()) - tree, err := state.LoadStateTree(cst, st) + tree, err = state.LoadStateTree(cst, st) if err != nil { return address.Undef, xerrors.Errorf("failed to load state tree") } @@ -639,7 +694,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid } } -func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid, lookbackLimit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { msg, err := sm.cs.GetCMessage(mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) @@ -656,7 +711,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty return head, r, foundMsg, nil } - fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, LookbackNoLimit) + fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit) if err != nil { log.Warnf("failed to look back through chain for message %s", mcid) diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 1e29e72d8..1d59b2630 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -25,6 +25,7 @@ import ( exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" + exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -207,17 +208,17 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra return nil, xerrors.Errorf("getting miner info: %w", err) } - wpt, err := info.SealProofType.RegisteredWinningPoStProof() - if err != nil { - return nil, xerrors.Errorf("getting window proof type: %w", err) - } - mid, err := address.IDFromAddress(maddr) if err != nil { return nil, xerrors.Errorf("getting miner ID: %w", err) } - ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect) + proofType, err := miner.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType) + if err != nil { + return nil, xerrors.Errorf("determining winning post proof type: %w", err) + } + + ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect) if err != nil { return nil, xerrors.Errorf("generating winning post challenges: %w", err) } @@ -560,6 +561,7 @@ func init() { var actors []rt.VMActor actors = append(actors, exported0.BuiltinActors()...) actors = append(actors, exported2.BuiltinActors()...) + actors = append(actors, exported3.BuiltinActors()...) for _, actor := range actors { exports := actor.Exports() diff --git a/chain/store/store.go b/chain/store/store.go index 2ca09dfe9..ec7714734 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -363,7 +363,7 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { // MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our // internal state as our new head, if and only if it is heavier than the current -// head. +// head and does not exceed the maximum fork length. func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() @@ -380,6 +380,15 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // TODO: don't do this for initial sync. Now that we don't have a // difference between 'bootstrap sync' and 'caught up' sync, we need // some other heuristic. + + exceeds, err := cs.exceedsForkLength(cs.heaviest, ts) + if err != nil { + return err + } + if exceeds { + return nil + } + return cs.takeHeaviestTipSet(ctx, ts) } else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) { log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts) @@ -387,6 +396,67 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS return nil } +// Check if the two tipsets have a fork length above `ForkLengthThreshold`. +// `synced` is the head of the chain we are currently synced to and `external` +// is the incoming tipset potentially belonging to a forked chain. It assumes +// the external chain has already been validated and available in the ChainStore. +// The "fast forward" case is covered in this logic as a valid fork of length 0. +// +// FIXME: We may want to replace some of the logic in `syncFork()` with this. +// `syncFork()` counts the length on both sides of the fork at the moment (we +// need to settle on that) but here we just enforce it on the `synced` side. +func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) { + if synced == nil || external == nil { + // FIXME: If `cs.heaviest` is nil we should just bypass the entire + // `MaybeTakeHeavierTipSet` logic (instead of each of the called + // functions having to handle the nil case on their own). + return false, nil + } + + var err error + // `forkLength`: number of tipsets we need to walk back from the our `synced` + // chain to the common ancestor with the new `external` head in order to + // adopt the fork. + for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ { + // First walk back as many tipsets in the external chain to match the + // `synced` height to compare them. If we go past the `synced` height + // the subsequent match will fail but it will still be useful to get + // closer to the `synced` head parent's height in the next loop. + for external.Height() > synced.Height() { + if external.Height() == 0 { + // We reached the genesis of the external chain without a match; + // this is considered a fork outside the allowed limit (of "infinite" + // length). + return true, nil + } + external, err = cs.LoadTipSet(external.Parents()) + if err != nil { + return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err) + } + } + + // Now check if we arrived at the common ancestor. + if synced.Equals(external) { + return false, nil + } + + // If we didn't, go back *one* tipset on the `synced` side (incrementing + // the `forkLength`). + if synced.Height() == 0 { + // Same check as the `external` side, if we reach the start (genesis) + // there is no common ancestor. + return true, nil + } + synced, err = cs.LoadTipSet(synced.Parents()) + if err != nil { + return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err) + } + } + + // We traversed the fork length allowed without finding a common ancestor. + return true, nil +} + // ForceHeadSilent forces a chain head tipset without triggering a reorg // operation. // @@ -524,9 +594,13 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) // FlushValidationCache removes all results of block validation from the // chain metadata store. Usually the first step after a new chain import. func (cs *ChainStore) FlushValidationCache() error { + return FlushValidationCache(cs.ds) +} + +func FlushValidationCache(ds datastore.Batching) error { log.Infof("clearing block validation cache...") - dsWalk, err := cs.ds.Query(query.Query{ + dsWalk, err := ds.Query(query.Query{ // Potential TODO: the validation cache is not a namespace on its own // but is rather constructed as prefixed-key `foo:bar` via .Instance(), which // in turn does not work with the filter, which can match only on `foo/bar` @@ -546,7 +620,7 @@ func (cs *ChainStore) FlushValidationCache() error { return xerrors.Errorf("failed to run key listing query: %w", err) } - batch, err := cs.ds.Batch() + batch, err := ds.Batch() if err != nil { return xerrors.Errorf("failed to open a DS batch: %w", err) } diff --git a/chain/sync.go b/chain/sync.go index 61c1d2094..9c1b24806 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -34,7 +34,8 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - blst "github.com/supranational/blst/bindings/go" + + ffi "github.com/filecoin-project/filecoin-ffi" // named msgarray here to make it clear that these are the types used by // messages, regardless of specs-actors version. @@ -55,7 +56,6 @@ import ( "github.com/filecoin-project/lotus/chain/vm" bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/lib/sigs/bls" "github.com/filecoin-project/lotus/metrics" ) @@ -250,18 +250,6 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming) - if from == syncer.self { - // TODO: this is kindof a hack... - log.Debug("got block from ourselves") - - if err := syncer.Sync(ctx, fts.TipSet()); err != nil { - log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err) - return false - } - - return true - } - // TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of // the blockstore if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil { @@ -688,6 +676,10 @@ func blockSanityChecks(h *types.BlockHeader) error { return xerrors.Errorf("block had nil bls aggregate signature") } + if h.Miner.Protocol() != address.ID { + return xerrors.Errorf("block had non-ID miner address") + } + return nil } @@ -1190,17 +1182,21 @@ func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signat trace.Int64Attribute("msgCount", int64(len(msgs))), ) - msgsS := make([]blst.Message, len(msgs)) + msgsS := make([]ffi.Message, len(msgs)) + pubksS := make([]ffi.PublicKey, len(msgs)) for i := 0; i < len(msgs); i++ { msgsS[i] = msgs[i].Bytes() + copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes]) } + sigS := new(ffi.Signature) + copy(sigS[:], sig.Data[:ffi.SignatureBytes]) + if len(msgs) == 0 { return nil } - valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks, - msgsS, []byte(bls.DST)) + valid := ffi.HashVerify(sigS, msgsS, pubksS) if !valid { return xerrors.New("bls aggregate signature failed to verify") } @@ -1449,7 +1445,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know return nil, ErrForkCheckpoint } - // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? + // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes. // Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare? tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold)) if err != nil { @@ -1460,6 +1456,10 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } + // Track the fork length on our side of the synced chain to enforce + // `ForkLengthThreshold`. Initialized to 1 because we already walked back + // one tipset from `known` (our synced head). + forkLengthInHead := 1 for cur := 0; cur < len(tips); { if nts.Height() == 0 { @@ -1476,6 +1476,13 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know if nts.Height() < tips[cur].Height() { cur++ } else { + // Walk back one block in our synced chain to try to meet the fork's + // height. + forkLengthInHead++ + if forkLengthInHead > int(build.ForkLengthThreshold) { + return nil, ErrForkTooLong + } + // We will be forking away from nts, check that it isn't checkpointed if nts.Key() == chkpt { return nil, ErrForkCheckpoint diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go index 61985b964..5f23e67c0 100644 --- a/chain/sync_manager_test.go +++ b/chain/sync_manager_test.go @@ -117,6 +117,13 @@ func TestSyncManagerEdgeCase(t *testing.T) { // get the next sync target; it should be c1 as the heaviest tipset but added last (same weight as c2) bop = <-stc + if bop.ts.Equals(c2) { + // there's a small race and we might get c2 first. + // But we should still end on c1. + bop.done() + bop = <-stc + } + if !bop.ts.Equals(c1) { t.Fatalf("Expected tipset %s to sync, but got %s", c1, bop.ts) } @@ -143,8 +150,11 @@ func TestSyncManagerEdgeCase(t *testing.T) { t.Fatalf("Expected tipset %s to sync, but got %s", e1, last) } - if len(sm.state) != 0 { - t.Errorf("active syncs expected empty but got: %d", len(sm.state)) + sm.mx.Lock() + activeSyncs := len(sm.state) + sm.mx.Unlock() + if activeSyncs != 0 { + t.Errorf("active syncs expected empty but got: %d", activeSyncs) } }) } diff --git a/chain/types/fil.go b/chain/types/fil.go index 6742dd180..223ed3c50 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -26,7 +26,7 @@ func (f FIL) Unitless() string { var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"} func (f FIL) Short() string { - n := BigInt(f) + n := BigInt(f).Abs() dn := uint64(1) var prefix string @@ -70,7 +70,7 @@ func (f FIL) UnmarshalText(text []byte) error { } func ParseFIL(s string) (FIL, error) { - suffix := strings.TrimLeft(s, ".1234567890") + suffix := strings.TrimLeft(s, "-.1234567890") s = s[:len(s)-len(suffix)] var attofil bool if suffix != "" { diff --git a/chain/types/fil_test.go b/chain/types/fil_test.go index 6cbc44c5f..7bf2a802e 100644 --- a/chain/types/fil_test.go +++ b/chain/types/fil_test.go @@ -57,6 +57,52 @@ func TestFilShort(t *testing.T) { {fil: "0.000221234", expect: "221.234 μFIL"}, {fil: "0.0002212344", expect: "221.234 μFIL"}, {fil: "0.00022123444", expect: "221.234 μFIL"}, + + {fil: "-1", expect: "-1 FIL"}, + {fil: "-1.1", expect: "-1.1 FIL"}, + {fil: "-12", expect: "-12 FIL"}, + {fil: "-123", expect: "-123 FIL"}, + {fil: "-123456", expect: "-123456 FIL"}, + {fil: "-123.23", expect: "-123.23 FIL"}, + {fil: "-123456.234", expect: "-123456.234 FIL"}, + {fil: "-123456.2341234", expect: "-123456.234 FIL"}, + {fil: "-123456.234123445", expect: "-123456.234 FIL"}, + + {fil: "-0.1", expect: "-100 mFIL"}, + {fil: "-0.01", expect: "-10 mFIL"}, + {fil: "-0.001", expect: "-1 mFIL"}, + + {fil: "-0.0001", expect: "-100 μFIL"}, + {fil: "-0.00001", expect: "-10 μFIL"}, + {fil: "-0.000001", expect: "-1 μFIL"}, + + {fil: "-0.0000001", expect: "-100 nFIL"}, + {fil: "-0.00000001", expect: "-10 nFIL"}, + {fil: "-0.000000001", expect: "-1 nFIL"}, + + {fil: "-0.0000000001", expect: "-100 pFIL"}, + {fil: "-0.00000000001", expect: "-10 pFIL"}, + {fil: "-0.000000000001", expect: "-1 pFIL"}, + + {fil: "-0.0000000000001", expect: "-100 fFIL"}, + {fil: "-0.00000000000001", expect: "-10 fFIL"}, + {fil: "-0.000000000000001", expect: "-1 fFIL"}, + + {fil: "-0.0000000000000001", expect: "-100 aFIL"}, + {fil: "-0.00000000000000001", expect: "-10 aFIL"}, + {fil: "-0.000000000000000001", expect: "-1 aFIL"}, + + {fil: "-0.0000012", expect: "-1.2 μFIL"}, + {fil: "-0.00000123", expect: "-1.23 μFIL"}, + {fil: "-0.000001234", expect: "-1.234 μFIL"}, + {fil: "-0.0000012344", expect: "-1.234 μFIL"}, + {fil: "-0.00000123444", expect: "-1.234 μFIL"}, + + {fil: "-0.0002212", expect: "-221.2 μFIL"}, + {fil: "-0.00022123", expect: "-221.23 μFIL"}, + {fil: "-0.000221234", expect: "-221.234 μFIL"}, + {fil: "-0.0002212344", expect: "-221.234 μFIL"}, + {fil: "-0.00022123444", expect: "-221.234 μFIL"}, } { s := s t.Run(s.fil, func(t *testing.T) { diff --git a/chain/types/state.go b/chain/types/state.go index a96883604..c14836ee7 100644 --- a/chain/types/state.go +++ b/chain/types/state.go @@ -9,8 +9,10 @@ type StateTreeVersion uint64 const ( // StateTreeVersion0 corresponds to actors < v2. StateTreeVersion0 StateTreeVersion = iota - // StateTreeVersion1 corresponds to actors >= v2. + // StateTreeVersion1 corresponds to actors v2 StateTreeVersion1 + // StateTreeVersion2 corresponds to actors >= v3. + StateTreeVersion2 ) type StateRoot struct { diff --git a/chain/vm/burn.go b/chain/vm/burn.go index 9f9b95755..a214d198b 100644 --- a/chain/vm/burn.go +++ b/chain/vm/burn.go @@ -67,7 +67,7 @@ func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) { return gasLimit - gasUsed - gasToBurn.Int64(), gasToBurn.Int64() } -func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount) GasOutputs { +func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount, chargeNetworkFee bool) GasOutputs { gasUsedBig := big.NewInt(gasUsed) out := ZeroGasOutputs() @@ -76,7 +76,12 @@ func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi. baseFeeToPay = feeCap out.MinerPenalty = big.Mul(big.Sub(baseFee, feeCap), gasUsedBig) } - out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig) + + // If chargeNetworkFee is disabled, just skip computing the BaseFeeBurn. However, + // we charge all the other fees regardless. + if chargeNetworkFee { + out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig) + } minerTip := gasPremium if big.Cmp(big.Add(baseFeeToPay, minerTip), feeCap) > 0 { diff --git a/chain/vm/burn_test.go b/chain/vm/burn_test.go index 58e133605..e4fc69aff 100644 --- a/chain/vm/burn_test.go +++ b/chain/vm/burn_test.go @@ -63,7 +63,7 @@ func TestGasOutputs(t *testing.T) { for _, test := range tests { test := test t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { - output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium)) + output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium), true) i2s := func(i uint64) string { return fmt.Sprintf("%d", i) } diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index a5610736a..1c1d04f19 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -17,6 +17,7 @@ import ( exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime" + exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" @@ -62,6 +63,7 @@ func NewActorRegistry() *ActorRegistry { // add builtInCode using: register(cid, singleton) inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...) inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...) + inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...) return inv } diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index 885d3c0db..072711db6 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -12,6 +12,7 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/aerrors" @@ -91,6 +92,8 @@ func newAccountActor(ver actors.Version) *types.Actor { code = builtin0.AccountActorCodeID case actors.Version2: code = builtin2.AccountActorCodeID + case actors.Version3: + code = builtin3.AccountActorCodeID default: panic("unsupported actors version") } diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index ba6ee2f1d..0bcfe10a7 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" @@ -107,11 +108,18 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr) } + // workaround chain halt + if build.IsNearUpgrade(blockA.Height, build.UpgradeOrangeHeight) { + return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange") + } + if build.IsNearUpgrade(blockB.Height, build.UpgradeOrangeHeight) { + return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange") + } + // are blocks the same? if blockA.Cid().Equals(blockB.Cid()) { return nil, fmt.Errorf("no consensus fault: submitted blocks are the same") } - // (1) check conditions necessary to any consensus fault // were blocks mined by same miner? diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 0919b8e8a..522bc2298 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/aerrors" "github.com/filecoin-project/lotus/chain/actors/builtin/account" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" @@ -42,9 +43,11 @@ import ( const MaxCallDepth = 4096 -var log = logging.Logger("vm") -var actorLog = logging.Logger("actors") -var gasOnActorExec = newGasCharge("OnActorExec", 0, 0) +var ( + log = logging.Logger("vm") + actorLog = logging.Logger("actors") + gasOnActorExec = newGasCharge("OnActorExec", 0, 0) +) // stat counters var ( @@ -71,8 +74,10 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad return aast.PubkeyAddress() } -var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) -var _ blockstore.Viewer = (*gasChargingBlocks)(nil) +var ( + _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) + _ blockstore.Viewer = (*gasChargingBlocks)(nil) +) type gasChargingBlocks struct { chargeGas func(GasCharge) @@ -193,9 +198,11 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtim return vm.VM.makeRuntime(ctx, msg, nil) } -type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) -type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version -type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) +type ( + CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) + NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version + LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) +) type VM struct { cstate *state.StateTree @@ -264,7 +271,6 @@ type ApplyRet struct { func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) { - defer atomic.AddUint64(&StatSends, 1) st := vm.cstate @@ -561,7 +567,13 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, if gasUsed < 0 { gasUsed = 0 } - gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium) + + burn, err := vm.ShouldBurn(st, msg, errcode) + if err != nil { + return nil, xerrors.Errorf("deciding whether should burn failed: %w", err) + } + + gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium, burn) if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, gasOutputs.BaseFeeBurn); err != nil { @@ -599,6 +611,29 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, nil } +func (vm *VM) ShouldBurn(st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { + // Check to see if we should burn funds. We avoid burning on successful + // window post. This won't catch _indirect_ window post calls, but this + // is the best we can get for now. + if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt { + // Ok, we've checked the _method_, but we still need to check + // the target actor. It would be nice if we could just look at + // the trace, but I'm not sure if that's safe? + if toActor, err := st.GetActor(msg.To); err != nil { + // If the actor wasn't found, we probably deleted it or something. Move on. + if !xerrors.Is(err, types.ErrActorNotFound) { + // Otherwise, this should never fail and something is very wrong. + return false, xerrors.Errorf("failed to lookup target actor: %w", err) + } + } else if builtin.IsStorageMinerActor(toActor.Code) { + // Ok, this is a storage miner and we've processed a window post. Remove the burn. + return false, nil + } + } + + return true, nil +} + func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorError) { act, err := vm.cstate.GetActor(addr) if err != nil { @@ -707,7 +742,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err close(freeBufs) }() - var batch = <-freeBufs + batch := <-freeBufs batchCp := func(blk block.Block) error { numBlocks++ totalCopySize += len(blk.RawData()) diff --git a/cli/chain.go b/cli/chain.go index 643de8f5b..539ad1a79 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -10,6 +10,7 @@ import ( "os" "os/exec" "path" + "reflect" "sort" "strconv" "strings" @@ -56,6 +57,8 @@ var chainCmd = &cli.Command{ chainGasPriceCmd, chainInspectUsage, chainDecodeCmd, + chainEncodeCmd, + chainDisputeSetCmd, }, } @@ -1106,8 +1109,8 @@ var slashConsensusFault = &cli.Command{ ArgsUsage: "[blockCid1 blockCid2]", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "miner", - Usage: "Miner address", + Name: "from", + Usage: "optionally specify the account to report consensus from", }, &cli.StringFlag{ Name: "extra", @@ -1142,9 +1145,25 @@ var slashConsensusFault = &cli.Command{ return xerrors.Errorf("getting block 2: %w", err) } - def, err := api.WalletDefaultAddress(ctx) - if err != nil { - return err + if b1.Miner != b2.Miner { + return xerrors.Errorf("block1.miner:%s block2.miner:%s", b1.Miner, b2.Miner) + } + + var fromAddr address.Address + if from := cctx.String("from"); from == "" { + defaddr, err := api.WalletDefaultAddress(ctx) + if err != nil { + return err + } + + fromAddr = defaddr + } else { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr } bh1, err := cborutil.Dump(b1) @@ -1186,18 +1205,9 @@ var slashConsensusFault = &cli.Command{ return err } - if cctx.String("miner") == "" { - return xerrors.Errorf("--miner flag is required") - } - - maddr, err := address.NewFromString(cctx.String("miner")) - if err != nil { - return err - } - msg := &types.Message{ - To: maddr, - From: def, + To: b2.Miner, + From: fromAddr, Value: types.NewInt(0), Method: builtin.MethodsMiner.ReportConsensusFault, Params: enc, @@ -1320,3 +1330,86 @@ var chainDecodeParamsCmd = &cli.Command{ return nil }, } + +var chainEncodeCmd = &cli.Command{ + Name: "encode", + Usage: "encode various types", + Subcommands: []*cli.Command{ + chainEncodeParamsCmd, + }, +} + +var chainEncodeParamsCmd = &cli.Command{ + Name: "params", + Usage: "Encodes the given JSON params", + ArgsUsage: "[toAddr method params]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + }, + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify input encoding to parse", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + if cctx.Args().Len() != 3 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + to, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing toAddr: %w", err) + } + + method, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("parsing method id: %w", err) + } + + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + act, err := api.StateGetActor(ctx, to, ts.Key()) + if err != nil { + return xerrors.Errorf("getting actor: %w", err) + } + + methodMeta, found := stmgr.MethodsMap[act.Code][abi.MethodNum(method)] + if !found { + return fmt.Errorf("method %d not found on actor %s", method, act.Code) + } + + p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler) + + if err := json.Unmarshal([]byte(cctx.Args().Get(2)), p); err != nil { + return fmt.Errorf("unmarshaling input into params type: %w", err) + } + + buf := new(bytes.Buffer) + if err := p.MarshalCBOR(buf); err != nil { + return err + } + + switch cctx.String("encoding") { + case "base64": + fmt.Println(base64.StdEncoding.EncodeToString(buf.Bytes())) + case "hex": + fmt.Println(hex.EncodeToString(buf.Bytes())) + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) + } + + return nil + }, +} diff --git a/cli/client.go b/cli/client.go index 129ede94b..60729f2e5 100644 --- a/cli/client.go +++ b/cli/client.go @@ -91,7 +91,7 @@ var clientCmd = &cli.Command{ WithCategory("retrieval", clientRetrieveCmd), WithCategory("util", clientCommPCmd), WithCategory("util", clientCarGenCmd), - WithCategory("util", clientInfoCmd), + WithCategory("util", clientBalancesCmd), WithCategory("util", clientListTransfers), WithCategory("util", clientRestartTransfer), WithCategory("util", clientCancelTransfer), @@ -1732,9 +1732,9 @@ var clientGetDealCmd = &cli.Command{ }, } -var clientInfoCmd = &cli.Command{ - Name: "info", - Usage: "Print storage market client information", +var clientBalancesCmd = &cli.Command{ + Name: "balances", + Usage: "Print storage market client balances", Flags: []cli.Flag{ &cli.StringFlag{ Name: "client", @@ -1751,7 +1751,7 @@ var clientInfoCmd = &cli.Command{ var addr address.Address if clientFlag := cctx.String("client"); clientFlag != "" { - ca, err := address.NewFromString("client") + ca, err := address.NewFromString(clientFlag) if err != nil { return err } @@ -1770,10 +1770,22 @@ var clientInfoCmd = &cli.Command{ return err } - fmt.Printf("Client Market Info:\n") + reserved, err := api.MarketGetReserved(ctx, addr) + if err != nil { + return err + } - fmt.Printf("Locked Funds:\t%s\n", types.FIL(balance.Locked)) - fmt.Printf("Escrowed Funds:\t%s\n", types.FIL(balance.Escrow)) + avail := big.Sub(big.Sub(balance.Escrow, balance.Locked), reserved) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } + + fmt.Printf("Client Market Balance for address %s:\n", addr) + + fmt.Printf(" Escrowed Funds: %s\n", types.FIL(balance.Escrow)) + fmt.Printf(" Locked Funds: %s\n", types.FIL(balance.Locked)) + fmt.Printf(" Reserved Funds: %s\n", types.FIL(reserved)) + fmt.Printf(" Available to Withdraw: %s\n", types.FIL(avail)) return nil }, @@ -1943,6 +1955,11 @@ var clientListTransfers = &cli.Command{ Name: "list-transfers", Usage: "List ongoing data transfers for deals", Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "print verbose transfer details", + }, &cli.BoolFlag{ Name: "color", Usage: "use color in display output", @@ -1974,6 +1991,7 @@ var clientListTransfers = &cli.Command{ return err } + verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") color := cctx.Bool("color") watch := cctx.Bool("watch") @@ -1989,7 +2007,7 @@ var clientListTransfers = &cli.Command{ tm.MoveCursor(1, 1) - OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed) + OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) tm.Flush() @@ -2014,13 +2032,13 @@ var clientListTransfers = &cli.Command{ } } } - OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed) + OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) return nil }, } // OutputDataTransferChannels generates table output for a list of channels -func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool, showFailed bool) { +func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, color, showFailed bool) { sort.Slice(channels, func(i, j int) bool { return channels[i].TransferID < channels[j].TransferID }) @@ -2050,7 +2068,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range sendingChannels { - w.Write(toChannelOutput(color, "Sending To", channel)) + w.Write(toChannelOutput(color, "Sending To", channel, verbose)) } w.Flush(out) //nolint:errcheck @@ -2064,7 +2082,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range receivingChannels { - w.Write(toChannelOutput(color, "Receiving From", channel)) + w.Write(toChannelOutput(color, "Receiving From", channel, verbose)) } w.Flush(out) //nolint:errcheck } @@ -2085,9 +2103,13 @@ func channelStatusString(useColor bool, status datatransfer.Status) string { } } -func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} { - rootCid := ellipsis(channel.BaseCID.String(), 8) - otherParty := ellipsis(channel.OtherPeer.String(), 8) +func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { + rootCid := channel.BaseCID.String() + otherParty := channel.OtherPeer.String() + if !verbose { + rootCid = ellipsis(rootCid, 8) + otherParty = ellipsis(otherParty, 8) + } initiated := "N" if channel.IsInitiator { @@ -2095,7 +2117,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr } voucher := channel.Voucher - if len(voucher) > 40 { + if len(voucher) > 40 && !verbose { voucher = ellipsis(voucher, 37) } diff --git a/cli/disputer.go b/cli/disputer.go new file mode 100644 index 000000000..40a3092ca --- /dev/null +++ b/cli/disputer.go @@ -0,0 +1,429 @@ +package cli + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/chain/actors" + + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + + "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/store" + "github.com/urfave/cli/v2" +) + +const Confidence = 10 + +type minerDeadline struct { + miner address.Address + index uint64 +} + +var chainDisputeSetCmd = &cli.Command{ + Name: "disputer", + Usage: "interact with the window post disputer", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "max-fee", + Usage: "Spend up to X FIL per DisputeWindowedPoSt message", + }, + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send messages from", + }, + }, + Subcommands: []*cli.Command{ + disputerStartCmd, + disputerMsgCmd, + }, +} + +var disputerMsgCmd = &cli.Command{ + Name: "dispute", + Usage: "Send a specific DisputeWindowedPoSt message", + ArgsUsage: "[minerAddress index postIndex]", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 3 { + fmt.Println("Usage: dispute [minerAddress index postIndex]") + return nil + } + + ctx := ReqContext(cctx) + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + toa, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return fmt.Errorf("given 'miner' address %q was invalid: %w", cctx.Args().First(), err) + } + + deadline, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + postIndex, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64) + if err != nil { + return err + } + + fromAddr, err := getSender(ctx, api, cctx.String("from")) + if err != nil { + return err + } + + dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{ + Deadline: deadline, + PoStIndex: postIndex, + }) + + if aerr != nil { + return xerrors.Errorf("failed to serailize params: %w", aerr) + } + + dmsg := &types.Message{ + To: toa, + From: fromAddr, + Value: big.Zero(), + Method: builtin3.MethodsMiner.DisputeWindowedPoSt, + Params: dpp, + } + + rslt, err := api.StateCall(ctx, dmsg, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to simulate dispute: %w", err) + } + + if rslt.MsgRct.ExitCode == 0 { + mss, err := getMaxFee(cctx.String("max-fee")) + if err != nil { + return err + } + + sm, err := api.MpoolPushMessage(ctx, dmsg, mss) + if err != nil { + return err + } + + fmt.Println("dispute message ", sm.Cid()) + } else { + fmt.Println("dispute is unsuccessful") + } + + return nil + }, +} + +var disputerStartCmd = &cli.Command{ + Name: "start", + Usage: "Start the window post disputer", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "start-epoch", + Usage: "only start disputing PoSts after this epoch ", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := ReqContext(cctx) + + fromAddr, err := getSender(ctx, api, cctx.String("from")) + if err != nil { + return err + } + + mss, err := getMaxFee(cctx.String("max-fee")) + if err != nil { + return err + } + + startEpoch := abi.ChainEpoch(0) + if cctx.IsSet("height") { + startEpoch = abi.ChainEpoch(cctx.Uint64("height")) + } + + fmt.Println("checking sync status") + + if err := SyncWait(ctx, api, false); err != nil { + return xerrors.Errorf("sync wait: %w", err) + } + + fmt.Println("setting up window post disputer") + + // subscribe to head changes and validate the current value + + headChanges, err := api.ChainNotify(ctx) + if err != nil { + return err + } + + head, ok := <-headChanges + if !ok { + return xerrors.Errorf("Notify stream was invalid") + } + + if len(head) != 1 { + return xerrors.Errorf("Notify first entry should have been one item") + } + + if head[0].Type != store.HCCurrent { + return xerrors.Errorf("expected current head on Notify stream (got %s)", head[0].Type) + } + + lastEpoch := head[0].Val.Height() + lastStatusCheckEpoch := lastEpoch + + // build initial deadlineMap + + minerList, err := api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return err + } + + knownMiners := make(map[address.Address]struct{}) + deadlineMap := make(map[abi.ChainEpoch][]minerDeadline) + for _, miner := range minerList { + dClose, dl, err := makeMinerDeadline(ctx, api, miner) + if err != nil { + return xerrors.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + + knownMiners[miner] = struct{}{} + } + + // when this fires, check for newly created miners, and purge any "missed" epochs from deadlineMap + statusCheckTicker := time.NewTicker(time.Hour) + defer statusCheckTicker.Stop() + + fmt.Println("starting up window post disputer") + + applyTsk := func(tsk types.TipSetKey) error { + log.Infof("last checked height: %d", lastEpoch) + dls, ok := deadlineMap[lastEpoch] + delete(deadlineMap, lastEpoch) + if !ok || startEpoch >= lastEpoch { + // no deadlines closed at this epoch - Confidence, or we haven't reached the start cutoff yet + return nil + } + + dpmsgs := make([]*types.Message, 0) + + // TODO: Parallelizeable + for _, dl := range dls { + fullDeadlines, err := api.StateMinerDeadlines(ctx, dl.miner, tsk) + if err != nil { + return xerrors.Errorf("failed to load deadlines: %w", err) + } + + if int(dl.index) >= len(fullDeadlines) { + return xerrors.Errorf("deadline index %d not found in deadlines", dl.index) + } + + ms, err := makeDisputeWindowedPosts(ctx, api, dl, fullDeadlines[dl.index].DisputableProofCount, fromAddr) + if err != nil { + return xerrors.Errorf("failed to check for disputes: %w", err) + } + + dpmsgs = append(dpmsgs, ms...) + + dClose, dl, err := makeMinerDeadline(ctx, api, dl.miner) + if err != nil { + return xerrors.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + } + + // TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop + for _, dpmsg := range dpmsgs { + log.Infof("disputing a PoSt from miner %s", dpmsg.To) + m, err := api.MpoolPushMessage(ctx, dpmsg, mss) + if err != nil { + log.Infof("failed to dispute post message: %s", err.Error()) + } else { + log.Infof("disputed a PoSt in message: %s", m.Cid()) + } + } + + return nil + } + + disputeLoop := func() error { + select { + case notif, ok := <-headChanges: + if !ok { + return xerrors.Errorf("head change channel errored") + } + + for _, val := range notif { + switch val.Type { + case store.HCApply: + for ; lastEpoch <= val.Val.Height(); lastEpoch++ { + err := applyTsk(val.Val.Key()) + if err != nil { + return err + } + } + case store.HCRevert: + // do nothing + default: + return xerrors.Errorf("unexpected head change type %s", val.Type) + } + } + case <-statusCheckTicker.C: + log.Infof("Running status check: ") + + minerList, err = api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner list: %w", err) + } + + for _, m := range minerList { + _, ok := knownMiners[m] + if !ok { + dClose, dl, err := makeMinerDeadline(ctx, api, m) + if err != nil { + return xerrors.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + + knownMiners[m] = struct{}{} + } + } + + for ; lastStatusCheckEpoch < lastEpoch; lastStatusCheckEpoch++ { + // if an epoch got "skipped" from the deadlineMap somehow, just fry it now instead of letting it sit around forever + _, ok := deadlineMap[lastStatusCheckEpoch] + if ok { + log.Infof("epoch %d was skipped during execution, deleting it from deadlineMap") + delete(deadlineMap, lastStatusCheckEpoch) + } + } + + log.Infof("Status check complete") + case <-ctx.Done(): + return xerrors.Errorf("context cancelled") + } + + return nil + } + + for { + err := disputeLoop() + if err != nil { + fmt.Println("disputer shutting down: ", err) + break + } + } + + return nil + }, +} + +// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1 +// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent +func makeDisputeWindowedPosts(ctx context.Context, api lapi.FullNode, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) { + disputes := make([]*types.Message, 0) + + for i := uint64(0); i < postsSnapshotted; i++ { + + dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{ + Deadline: dl.index, + PoStIndex: i, + }) + + if aerr != nil { + return nil, xerrors.Errorf("failed to serailize params: %w", aerr) + } + + dispute := &types.Message{ + To: dl.miner, + From: sender, + Value: big.Zero(), + Method: builtin3.MethodsMiner.DisputeWindowedPoSt, + Params: dpp, + } + + rslt, err := api.StateCall(ctx, dispute, types.EmptyTSK) + if err == nil && rslt.MsgRct.ExitCode == 0 { + disputes = append(disputes, dispute) + } + + } + + return disputes, nil +} + +func makeMinerDeadline(ctx context.Context, api lapi.FullNode, mAddr address.Address) (abi.ChainEpoch, *minerDeadline, error) { + dl, err := api.StateMinerProvingDeadline(ctx, mAddr, types.EmptyTSK) + if err != nil { + return -1, nil, xerrors.Errorf("getting proving index list: %w", err) + } + + return dl.Close, &minerDeadline{ + miner: mAddr, + index: dl.Index, + }, nil +} + +func getSender(ctx context.Context, api lapi.FullNode, fromStr string) (address.Address, error) { + if fromStr == "" { + return api.WalletDefaultAddress(ctx) + } + + addr, err := address.NewFromString(fromStr) + if err != nil { + return address.Undef, err + } + + has, err := api.WalletHas(ctx, addr) + if err != nil { + return address.Undef, err + } + + if !has { + return address.Undef, xerrors.Errorf("wallet doesn't contain: %s ", addr) + } + + return addr, nil +} + +func getMaxFee(maxStr string) (*lapi.MessageSendSpec, error) { + if maxStr != "" { + maxFee, err := types.ParseFIL(maxStr) + if err != nil { + return nil, xerrors.Errorf("parsing max-fee: %w", err) + } + return &lapi.MessageSendSpec{ + MaxFee: types.BigInt(maxFee), + }, nil + } + + return nil, nil +} diff --git a/cli/mpool.go b/cli/mpool.go index a84865547..d74c7c9ba 100644 --- a/cli/mpool.go +++ b/cli/mpool.go @@ -362,15 +362,15 @@ var mpoolReplaceCmd = &cli.Command{ Flags: []cli.Flag{ &cli.StringFlag{ Name: "gas-feecap", - Usage: "gas feecap for new message", + Usage: "gas feecap for new message (burn and pay to miner, attoFIL/GasUnit)", }, &cli.StringFlag{ Name: "gas-premium", - Usage: "gas price for new message", + Usage: "gas price for new message (pay to miner, attoFIL/GasUnit)", }, &cli.Int64Flag{ Name: "gas-limit", - Usage: "gas price for new message", + Usage: "gas limit for new message (GasUnit)", }, &cli.BoolFlag{ Name: "auto", @@ -378,7 +378,7 @@ var mpoolReplaceCmd = &cli.Command{ }, &cli.StringFlag{ Name: "max-fee", - Usage: "Spend up to X FIL for this message (applicable for auto mode)", + Usage: "Spend up to X attoFIL for this message (applicable for auto mode)", }, }, ArgsUsage: " | ", diff --git a/cli/multisig.go b/cli/multisig.go index 8abae5182..c3a062ed4 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -473,12 +473,12 @@ var msigApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID")) } - if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 { - return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]")) + if cctx.Args().Len() > 2 && cctx.Args().Len() < 5 { + return ShowHelp(cctx, fmt.Errorf("usage: msig approve ")) } - if cctx.Args().Len() > 2 && cctx.Args().Len() != 5 { - return ShowHelp(cctx, fmt.Errorf("usage: msig approve ")) + if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 { + return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]")) } api, closer, err := GetFullNodeAPI(cctx) @@ -1178,7 +1178,7 @@ var msigLockProposeCmd = &cli.Command{ params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), - Amount: abi.NewTokenAmount(amount.Int64()), + Amount: big.Int(amount), }) if actErr != nil { diff --git a/cli/send.go b/cli/send.go index 55ea8b028..d15dd5fb2 100644 --- a/cli/send.go +++ b/cli/send.go @@ -15,6 +15,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" ) @@ -51,7 +52,7 @@ var sendCmd = &cli.Command{ &cli.Uint64Flag{ Name: "method", Usage: "specify method to invoke", - Value: 0, + Value: uint64(builtin.MethodSend), }, &cli.StringFlag{ Name: "params-json", @@ -61,6 +62,10 @@ var sendCmd = &cli.Command{ Name: "params-hex", Usage: "specify invocation parameters in hex", }, + &cli.BoolFlag{ + Name: "force", + Usage: "must be specified for the action to take effect if maybe SysErrInsufficientFunds etc", + }, }, Action: func(cctx *cli.Context) error { if cctx.Args().Len() != 2 { @@ -143,6 +148,20 @@ var sendCmd = &cli.Command{ Params: params, } + if !cctx.Bool("force") { + // Funds insufficient check + fromBalance, err := api.WalletBalance(ctx, msg.From) + if err != nil { + return err + } + totalCost := types.BigAdd(types.BigMul(msg.GasFeeCap, types.NewInt(uint64(msg.GasLimit))), msg.Value) + + if fromBalance.LessThan(totalCost) { + fmt.Printf("WARNING: From balance %s less than total cost %s\n", types.FIL(fromBalance), types.FIL(totalCost)) + return fmt.Errorf("--force must be specified for this action to have an effect; you have been warned") + } + } + if cctx.IsSet("nonce") { msg.Nonce = cctx.Uint64("nonce") sm, err := api.WalletSignMessage(ctx, fromAddr, msg) diff --git a/cli/state.go b/cli/state.go index bef864dd2..5899dfdb1 100644 --- a/cli/state.go +++ b/cli/state.go @@ -1617,7 +1617,7 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er return nil, fmt.Errorf("unknown method %d for actor %s", method, act) } - paramObj := methodMeta.Params + paramObj := methodMeta.Params.Elem() if paramObj.NumField() != len(args) { return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField()) } diff --git a/cli/wallet.go b/cli/wallet.go index e9b8e6ece..802d85702 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -509,6 +509,7 @@ var walletMarket = &cli.Command{ Usage: "Interact with market balances", Subcommands: []*cli.Command{ walletMarketWithdraw, + walletMarketAdd, }, } @@ -518,13 +519,13 @@ var walletMarketWithdraw = &cli.Command{ ArgsUsage: "[amount (FIL) optional, otherwise will withdraw max available]", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "from", - Usage: "Specify address to withdraw funds from, otherwise it will use the default wallet address", - Aliases: []string{"f"}, + Name: "wallet", + Usage: "Specify address to withdraw funds to, otherwise it will use the default wallet address", + Aliases: []string{"w"}, }, &cli.StringFlag{ Name: "address", - Usage: "Market address to withdraw from (account or miner actor address, defaults to --from address)", + Usage: "Market address to withdraw from (account or miner actor address, defaults to --wallet address)", Aliases: []string{"a"}, }, }, @@ -536,6 +537,123 @@ var walletMarketWithdraw = &cli.Command{ defer closer() ctx := ReqContext(cctx) + var wallet address.Address + if cctx.String("wallet") != "" { + wallet, err = address.NewFromString(cctx.String("wallet")) + if err != nil { + return xerrors.Errorf("parsing from address: %w", err) + } + } else { + wallet, err = api.WalletDefaultAddress(ctx) + if err != nil { + return xerrors.Errorf("getting default wallet address: %w", err) + } + } + + addr := wallet + if cctx.String("address") != "" { + addr, err = address.NewFromString(cctx.String("address")) + if err != nil { + return xerrors.Errorf("parsing market address: %w", err) + } + } + + // Work out if there are enough unreserved, unlocked funds to withdraw + bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err) + } + + reserved, err := api.MarketGetReserved(ctx, addr) + if err != nil { + return xerrors.Errorf("getting market reserved amount for address %s: %w", addr.String(), err) + } + + avail := big.Subtract(big.Subtract(bal.Escrow, bal.Locked), reserved) + + notEnoughErr := func(msg string) error { + return xerrors.Errorf("%s; "+ + "available (%s) = escrow (%s) - locked (%s) - reserved (%s)", + msg, types.FIL(avail), types.FIL(bal.Escrow), types.FIL(bal.Locked), types.FIL(reserved)) + } + + if avail.IsZero() || avail.LessThan(big.Zero()) { + avail = big.Zero() + return notEnoughErr("no funds available to withdraw") + } + + // Default to withdrawing all available funds + amt := avail + + // If there was an amount argument, only withdraw that amount + if cctx.Args().Present() { + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amt = abi.TokenAmount(f) + } + + // Check the amount is positive + if amt.IsZero() || amt.LessThan(big.Zero()) { + return xerrors.Errorf("amount must be > 0") + } + + // Check there are enough available funds + if amt.GreaterThan(avail) { + msg := fmt.Sprintf("can't withdraw more funds than available; requested: %s", types.FIL(amt)) + return notEnoughErr(msg) + } + + fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), wallet.String()) + smsg, err := api.MarketWithdraw(ctx, wallet, addr, amt) + if err != nil { + return xerrors.Errorf("fund manager withdraw error: %w", err) + } + + fmt.Printf("WithdrawBalance message cid: %s\n", smsg) + + return nil + }, +} + +var walletMarketAdd = &cli.Command{ + Name: "add", + Usage: "Add funds to the Storage Market Actor", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "Specify address to move funds from, otherwise it will use the default wallet address", + Aliases: []string{"f"}, + }, + &cli.StringFlag{ + Name: "address", + Usage: "Market address to move funds to (account or miner actor address, defaults to --from address)", + Aliases: []string{"a"}, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("getting node API: %w", err) + } + defer closer() + ctx := ReqContext(cctx) + + // Get amount param + if !cctx.Args().Present() { + return fmt.Errorf("must pass amount to add") + } + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amt := abi.TokenAmount(f) + + // Get from param var from address.Address if cctx.String("from") != "" { from, err = address.NewFromString(cctx.String("from")) @@ -549,6 +667,7 @@ var walletMarketWithdraw = &cli.Command{ } } + // Get address param addr := from if cctx.String("address") != "" { addr, err = address.NewFromString(cctx.String("address")) @@ -557,38 +676,14 @@ var walletMarketWithdraw = &cli.Command{ } } - bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK) + // Add balance to market actor + fmt.Printf("Submitting Add Balance message for amount %s for address %s\n", types.FIL(amt), addr) + smsg, err := api.MarketAddBalance(ctx, from, addr, amt) if err != nil { - return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err) + return xerrors.Errorf("add balance error: %w", err) } - avail := big.Subtract(bal.Escrow, bal.Locked) - amt := avail - - if cctx.Args().Present() { - f, err := types.ParseFIL(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("parsing 'amount' argument: %w", err) - } - - amt = abi.TokenAmount(f) - } - - if amt.GreaterThan(avail) { - return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amt), types.FIL(avail)) - } - - if avail.IsZero() { - return xerrors.Errorf("zero unlocked funds available to withdraw") - } - - fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), from.String()) - smsg, err := api.MarketWithdraw(ctx, from, addr, amt) - if err != nil { - return xerrors.Errorf("fund manager withdraw error: %w", err) - } - - fmt.Printf("WithdrawBalance message cid: %s\n", smsg) + fmt.Printf("AddBalance message cid: %s\n", smsg) return nil }, diff --git a/cmd/chain-noise/main.go b/cmd/chain-noise/main.go index 81586e1b2..37d623ce2 100644 --- a/cmd/chain-noise/main.go +++ b/cmd/chain-noise/main.go @@ -27,6 +27,16 @@ func main() { Hidden: true, Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME }, + &cli.IntFlag{ + Name: "limit", + Usage: "spam transaction count limit, <= 0 is no limit", + Value: 0, + }, + &cli.IntFlag{ + Name: "rate", + Usage: "spam transaction rate, count per second", + Value: 5, + }, }, Commands: []*cli.Command{runCmd}, } @@ -52,11 +62,17 @@ var runCmd = &cli.Command{ defer closer() ctx := lcli.ReqContext(cctx) - return sendSmallFundsTxs(ctx, api, addr, 5) + rate := cctx.Int("rate") + if rate <= 0 { + rate = 5 + } + limit := cctx.Int("limit") + + return sendSmallFundsTxs(ctx, api, addr, rate, limit) }, } -func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate int) error { +func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate, limit int) error { var sendSet []address.Address for i := 0; i < 20; i++ { naddr, err := api.WalletNew(ctx, types.KTSecp256k1) @@ -66,9 +82,14 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre sendSet = append(sendSet, naddr) } + count := limit tick := build.Clock.Ticker(time.Second / time.Duration(rate)) for { + if count <= 0 && limit > 0 { + fmt.Printf("%d messages sent.\n", limit) + return nil + } select { case <-tick.C: msg := &types.Message{ @@ -81,6 +102,7 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre if err != nil { return err } + count-- fmt.Println("Message sent: ", smsg.Cid()) case <-ctx.Done(): return nil diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index b246aedbb..398512e0d 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -40,6 +40,8 @@ import ( var log = logging.Logger("lotus-bench") type BenchResults struct { + EnvVar map[string]string + SectorSize abi.SectorSize SectorNumber int @@ -446,6 +448,15 @@ var sealBenchCmd = &cli.Command{ bo.VerifyWindowPostHot = verifyWindowpost2.Sub(verifyWindowpost1) } + bo.EnvVar = make(map[string]string) + for _, envKey := range []string{"BELLMAN_NO_GPU", "FIL_PROOFS_MAXIMIZE_CACHING", "FIL_PROOFS_USE_GPU_COLUMN_BUILDER", + "FIL_PROOFS_USE_GPU_TREE_BUILDER", "FIL_PROOFS_USE_MULTICORE_SDR", "BELLMAN_CUSTOM_GPU"} { + envValue, found := os.LookupEnv(envKey) + if found { + bo.EnvVar[envKey] = envValue + } + } + if c.Bool("json-out") { data, err := json.MarshalIndent(bo, "", " ") if err != nil { @@ -454,6 +465,10 @@ var sealBenchCmd = &cli.Command{ fmt.Println(string(data)) } else { + fmt.Println("environment variable list:") + for envKey, envValue := range bo.EnvVar { + fmt.Printf("%s=%s\n", envKey, envValue) + } fmt.Printf("----\nresults (v28) SectorSize:(%d), SectorNumber:(%d)\n", sectorSize, sectorNumber) if robench == "" { fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingSum.AddPiece, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.AddPiece)) diff --git a/cmd/lotus-gateway/api.go b/cmd/lotus-gateway/api.go index ee19eb948..2b5023739 100644 --- a/cmd/lotus-gateway/api.go +++ b/cmd/lotus-gateway/api.go @@ -57,6 +57,7 @@ type gatewayDepsAPI interface { StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateSearchMsgLimited(ctx context.Context, msg cid.Cid, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, h abi.ChainEpoch) (*api.MsgLookup, error) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) @@ -299,6 +300,10 @@ func (a *GatewayAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKe return a.api.StateNetworkVersion(ctx, tsk) } +func (a *GatewayAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) { + return a.api.StateSearchMsgLimited(ctx, msg, a.stateWaitLookbackLimit) +} + func (a *GatewayAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) { return a.api.StateWaitMsgLimited(ctx, msg, confidence, a.stateWaitLookbackLimit) } diff --git a/cmd/lotus-gateway/endtoend_test.go b/cmd/lotus-gateway/endtoend_test.go index 4d5e88c82..b6d81efb4 100644 --- a/cmd/lotus-gateway/endtoend_test.go +++ b/cmd/lotus-gateway/endtoend_test.go @@ -245,7 +245,7 @@ func startNodes( // Create a gateway server in front of the full node gapiImpl := newGatewayAPI(fullNode, lookbackCap, stateWaitLookbackLimit) - _, addr, err := builder.CreateRPCServer(gapiImpl) + _, addr, err := builder.CreateRPCServer(t, gapiImpl) require.NoError(t, err) // Create a gateway client API that connects to the gateway server diff --git a/cmd/lotus-shed/blockmsgid.go b/cmd/lotus-shed/blockmsgid.go new file mode 100644 index 000000000..85b786ec0 --- /dev/null +++ b/cmd/lotus-shed/blockmsgid.go @@ -0,0 +1,70 @@ +package main + +import ( + "encoding/base64" + "fmt" + + blake2b "github.com/minio/blake2b-simd" + "github.com/urfave/cli/v2" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var blockmsgidCmd = &cli.Command{ + Name: "blockmsgid", + Usage: "Print a block's pubsub message ID", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + for _, arg := range cctx.Args().Slice() { + blkcid, err := cid.Decode(arg) + if err != nil { + return fmt.Errorf("error decoding block cid: %w", err) + } + + blkhdr, err := api.ChainGetBlock(ctx, blkcid) + if err != nil { + return fmt.Errorf("error retrieving block header: %w", err) + } + + blkmsgs, err := api.ChainGetBlockMessages(ctx, blkcid) + if err != nil { + return fmt.Errorf("error retrieving block messages: %w", err) + } + + blkmsg := &types.BlockMsg{ + Header: blkhdr, + } + + for _, m := range blkmsgs.BlsMessages { + blkmsg.BlsMessages = append(blkmsg.BlsMessages, m.Cid()) + } + + for _, m := range blkmsgs.SecpkMessages { + blkmsg.SecpkMessages = append(blkmsg.SecpkMessages, m.Cid()) + } + + bytes, err := blkmsg.Serialize() + if err != nil { + return fmt.Errorf("error serializing BlockMsg: %w", err) + } + + msgId := blake2b.Sum256(bytes) + msgId64 := base64.StdEncoding.EncodeToString(msgId[:]) + + fmt.Println(msgId64) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index 8cdc1630c..e0c4465b5 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -120,7 +120,7 @@ var datastoreGetCmd = &cli.Command{ }, ArgsUsage: "[namespace key]", Action: func(cctx *cli.Context) error { - logging.SetLogLevel("badger", "ERROR") // nolint:errchec + logging.SetLogLevel("badger", "ERROR") // nolint:errcheck r, err := repo.NewFS(cctx.String("repo")) if err != nil { diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 19067f8c9..10b2b4d89 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -50,6 +50,7 @@ func main() { electionCmd, rpcCmd, cidCmd, + blockmsgidCmd, } app := &cli.App{ diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-storage-miner/actor.go index d1fc96972..bcd29ea60 100644 --- a/cmd/lotus-storage-miner/actor.go +++ b/cmd/lotus-storage-miner/actor.go @@ -622,8 +622,8 @@ var actorControlSet = &cli.Command{ var actorSetOwnerCmd = &cli.Command{ Name: "set-owner", - Usage: "Set owner address", - ArgsUsage: "[address]", + Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)", + ArgsUsage: "[newOwnerAddress senderAddress]", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "really-do-it", @@ -637,8 +637,8 @@ var actorSetOwnerCmd = &cli.Command{ return nil } - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of new owner address") + if cctx.NArg() != 2 { + return fmt.Errorf("must pass new owner address and sender address") } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -660,7 +660,17 @@ var actorSetOwnerCmd = &cli.Command{ return err } - newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + newAddrId, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + fa, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + fromAddrId, err := api.StateLookupID(ctx, fa, types.EmptyTSK) if err != nil { return err } @@ -675,13 +685,17 @@ var actorSetOwnerCmd = &cli.Command{ return err } - sp, err := actors.SerializeParams(&newAddr) + if fromAddrId != mi.Owner && fromAddrId != newAddrId { + return xerrors.New("from address must either be the old owner or the new owner") + } + + sp, err := actors.SerializeParams(&newAddrId) if err != nil { return xerrors.Errorf("serializing params: %w", err) } smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Owner, + From: fromAddrId, To: maddr, Method: miner.Methods.ChangeOwnerAddress, Value: big.Zero(), @@ -691,7 +705,7 @@ var actorSetOwnerCmd = &cli.Command{ return xerrors.Errorf("mpool push: %w", err) } - fmt.Println("Propose Message CID:", smsg.Cid()) + fmt.Println("Message CID:", smsg.Cid()) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) @@ -701,34 +715,11 @@ var actorSetOwnerCmd = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode != 0 { - fmt.Println("Propose owner change failed!") + fmt.Println("owner change failed!") return err } - smsg, err = api.MpoolPushMessage(ctx, &types.Message{ - From: newAddr, - To: maddr, - Method: miner.Methods.ChangeOwnerAddress, - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Println("Approve Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err = api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return err - } - - // check it executed successfully - if wait.Receipt.ExitCode != 0 { - fmt.Println("Approve owner change failed!") - return err - } + fmt.Println("message succeeded!") return nil }, diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-storage-miner/actor_test.go index 2aea6bda9..1816c1eab 100644 --- a/cmd/lotus-storage-miner/actor_test.go +++ b/cmd/lotus-storage-miner/actor_test.go @@ -50,7 +50,7 @@ func TestWorkerKeyChange(t *testing.T) { blocktime := 1 * time.Millisecond - n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithActorsV2At(1), test.FullNodeWithActorsV2At(1)}, test.OneMiner) + n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithActorsV3At(2), test.FullNodeWithActorsV3At(2)}, test.OneMiner) client1 := n[0] client2 := n[1] diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index ed74da96b..30c2924f2 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -222,7 +222,7 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf(" PreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits).Short()) fmt.Printf(" Pledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement).Short()) fmt.Printf(" Vesting: %s\n", types.FIL(lockedFunds.VestingFunds).Short()) - color.Green(" Available: %s", types.FIL(availBalance).Short()) + colorTokenAmount(" Available: %s\n", availBalance) mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK) if err != nil { @@ -232,7 +232,7 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf("Market Balance: %s\n", types.FIL(mb.Escrow).Short()) fmt.Printf(" Locked: %s\n", types.FIL(mb.Locked).Short()) - color.Green(" Available: %s\n", types.FIL(big.Sub(mb.Escrow, mb.Locked)).Short()) + colorTokenAmount(" Available: %s\n", big.Sub(mb.Escrow, mb.Locked)) wb, err := api.WalletBalance(ctx, mi.Worker) if err != nil { @@ -253,7 +253,7 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf(" Control: %s\n", types.FIL(cbsum).Short()) } - fmt.Printf("Total Spendable: %s\n", color.YellowString(types.FIL(spendable).Short())) + colorTokenAmount("Total Spendable: %s\n", spendable) fmt.Println() @@ -298,6 +298,10 @@ var stateList = []stateMeta{ {col: color.FgYellow, state: sealing.CommitWait}, {col: color.FgYellow, state: sealing.FinalizeSector}, + {col: color.FgCyan, state: sealing.Terminating}, + {col: color.FgCyan, state: sealing.TerminateWait}, + {col: color.FgCyan, state: sealing.TerminateFinality}, + {col: color.FgCyan, state: sealing.TerminateFailed}, {col: color.FgCyan, state: sealing.Removing}, {col: color.FgCyan, state: sealing.Removed}, @@ -355,3 +359,13 @@ func sectorsInfo(ctx context.Context, napi api.StorageMiner) error { return nil } + +func colorTokenAmount(format string, amount abi.TokenAmount) { + if amount.GreaterThan(big.Zero()) { + color.Green(format, types.FIL(amount).Short()) + } else if amount.Equals(big.Zero()) { + color.Yellow(format, types.FIL(amount).Short()) + } else { + color.Red(format, types.FIL(amount).Short()) + } +} diff --git a/cmd/lotus-storage-miner/market.go b/cmd/lotus-storage-miner/market.go index 1e5057f5d..bfb380528 100644 --- a/cmd/lotus-storage-miner/market.go +++ b/cmd/lotus-storage-miner/market.go @@ -451,7 +451,7 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0) if verbose { - _, _ = fmt.Fprintf(w, "Creation\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tTransferChannelID\tMessage\n") + _, _ = fmt.Fprintf(w, "Creation\tVerified\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tTransferChannelID\tMessage\n") } else { _, _ = fmt.Fprintf(w, "ProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\n") } @@ -465,7 +465,7 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose fil := types.FIL(types.BigMul(deal.Proposal.StoragePricePerEpoch, types.NewInt(uint64(deal.Proposal.Duration())))) if verbose { - _, _ = fmt.Fprintf(w, "%s\t", deal.CreationTime.Time().Format(time.Stamp)) + _, _ = fmt.Fprintf(w, "%s\t%t\t", deal.CreationTime.Time().Format(time.Stamp), deal.Proposal.VerifiedDeal) } _, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s", propcid, deal.DealID, storagemarket.DealStates[deal.State], deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize)), fil, deal.Proposal.Duration()) @@ -744,6 +744,11 @@ var transfersListCmd = &cli.Command{ Name: "list", Usage: "List ongoing data transfers for this miner", Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "print verbose transfer details", + }, &cli.BoolFlag{ Name: "color", Usage: "use color in display output", @@ -775,6 +780,7 @@ var transfersListCmd = &cli.Command{ return err } + verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") color := cctx.Bool("color") watch := cctx.Bool("watch") @@ -790,7 +796,7 @@ var transfersListCmd = &cli.Command{ tm.MoveCursor(1, 1) - lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed) + lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) tm.Flush() @@ -815,7 +821,7 @@ var transfersListCmd = &cli.Command{ } } } - lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed) + lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) return nil }, } diff --git a/cmd/lotus-storage-miner/proving.go b/cmd/lotus-storage-miner/proving.go index 5d176a754..3d60f4b76 100644 --- a/cmd/lotus-storage-miner/proving.go +++ b/cmd/lotus-storage-miner/proving.go @@ -430,11 +430,6 @@ var provingCheckProvableCmd = &cli.Command{ return err } - pf, err := info.SealProofType.RegisteredWindowPoStProof() - if err != nil { - return err - } - partitions, err := api.StateMinerPartitions(ctx, addr, dlIdx, types.EmptyTSK) if err != nil { return err @@ -446,7 +441,7 @@ var provingCheckProvableCmd = &cli.Command{ for parIdx, par := range partitions { sectors := make(map[abi.SectorNumber]struct{}) - sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.AllSectors, types.EmptyTSK) + sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK) if err != nil { return err } @@ -463,7 +458,7 @@ var provingCheckProvableCmd = &cli.Command{ }) } - bad, err := sapi.CheckProvable(ctx, pf, tocheck, cctx.Bool("slow")) + bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow")) if err != nil { return err } diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index 1c3e4858c..5ef067b2c 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -35,6 +35,7 @@ var sectorsCmd = &cli.Command{ sectorsRefsCmd, sectorsUpdateCmd, sectorsPledgeCmd, + sectorsTerminateCmd, sectorsRemoveCmd, sectorsMarkForUpgradeCmd, sectorsStartSealCmd, @@ -396,9 +397,123 @@ var sectorsRefsCmd = &cli.Command{ }, } +var sectorsTerminateCmd = &cli.Command{ + Name: "terminate", + Usage: "Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "pass this flag if you know what you are doing", + }, + }, + Subcommands: []*cli.Command{ + sectorsTerminateFlushCmd, + sectorsTerminatePendingCmd, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + return xerrors.Errorf("pass --really-do-it to confirm this action") + } + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + if cctx.Args().Len() != 1 { + return xerrors.Errorf("must pass sector number") + } + + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector number: %w", err) + } + + return nodeApi.SectorTerminate(ctx, abi.SectorNumber(id)) + }, +} + +var sectorsTerminateFlushCmd = &cli.Command{ + Name: "flush", + Usage: "Send a terminate message if there are sectors queued for termination", + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + mcid, err := nodeApi.SectorTerminateFlush(ctx) + if err != nil { + return err + } + + if mcid == nil { + return xerrors.New("no sectors were queued for termination") + } + + fmt.Println(mcid) + + return nil + }, +} + +var sectorsTerminatePendingCmd = &cli.Command{ + Name: "pending", + Usage: "List sector numbers of sectors pending termination", + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + ctx := lcli.ReqContext(cctx) + + pending, err := nodeApi.SectorTerminatePending(ctx) + if err != nil { + return err + } + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + dl, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting proving deadline info failed: %w", err) + } + + for _, id := range pending { + loc, err := api.StateSectorPartition(ctx, maddr, id.Number, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("finding sector partition: %w", err) + } + + fmt.Print(id.Number) + + if loc.Deadline == (dl.Index+1)%miner.WPoStPeriodDeadlines || // not in next (in case the terminate message takes a while to get on chain) + loc.Deadline == dl.Index || // not in current + (loc.Deadline+1)%miner.WPoStPeriodDeadlines == dl.Index { // not in previous + fmt.Print(" (in proving window)") + } + fmt.Println() + } + + return nil + }, +} + var sectorsRemoveCmd = &cli.Command{ Name: "remove", - Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector)", + Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))", ArgsUsage: "", Flags: []cli.Flag{ &cli.BoolFlag{ diff --git a/cmd/lotus/backup.go b/cmd/lotus/backup.go index aec0000c9..5517bd9f4 100644 --- a/cmd/lotus/backup.go +++ b/cmd/lotus/backup.go @@ -1,14 +1,121 @@ package main import ( + "os" + + dstore "github.com/ipfs/go-datastore" + "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + "gopkg.in/cheggaaa/pb.v1" "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/lotus/chain/store" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/backupds" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/repo" ) var backupCmd = lcli.BackupCmd("repo", repo.FullNode, func(cctx *cli.Context) (lcli.BackupAPI, jsonrpc.ClientCloser, error) { return lcli.GetFullNodeAPI(cctx) }) + +func restore(cctx *cli.Context, r repo.Repo) error { + bf, err := homedir.Expand(cctx.Path("restore")) + if err != nil { + return xerrors.Errorf("expand backup file path: %w", err) + } + + st, err := os.Stat(bf) + if err != nil { + return xerrors.Errorf("stat backup file (%s): %w", bf, err) + } + + f, err := os.Open(bf) + if err != nil { + return xerrors.Errorf("opening backup file: %w", err) + } + defer f.Close() // nolint:errcheck + + lr, err := r.Lock(repo.FullNode) + if err != nil { + return err + } + defer lr.Close() // nolint:errcheck + + if cctx.IsSet("restore-config") { + log.Info("Restoring config") + + cf, err := homedir.Expand(cctx.String("restore-config")) + if err != nil { + return xerrors.Errorf("expanding config path: %w", err) + } + + _, err = os.Stat(cf) + if err != nil { + return xerrors.Errorf("stat config file (%s): %w", cf, err) + } + + var cerr error + err = lr.SetConfig(func(raw interface{}) { + rcfg, ok := raw.(*config.FullNode) + if !ok { + cerr = xerrors.New("expected miner config") + return + } + + ff, err := config.FromFile(cf, rcfg) + if err != nil { + cerr = xerrors.Errorf("loading config: %w", err) + return + } + + *rcfg = *ff.(*config.FullNode) + }) + if cerr != nil { + return cerr + } + if err != nil { + return xerrors.Errorf("setting config: %w", err) + } + + } else { + log.Warn("--restore-config NOT SET, WILL USE DEFAULT VALUES") + } + + log.Info("Restoring metadata backup") + + mds, err := lr.Datastore("/metadata") + if err != nil { + return err + } + + bar := pb.New64(st.Size()) + br := bar.NewProxyReader(f) + bar.ShowTimeLeft = true + bar.ShowPercent = true + bar.ShowSpeed = true + bar.Units = pb.U_BYTES + + bar.Start() + err = backupds.RestoreInto(br, mds) + bar.Finish() + + if err != nil { + return xerrors.Errorf("restoring metadata: %w", err) + } + + log.Info("Resetting chainstore metadata") + + chainHead := dstore.NewKey("head") + if err := mds.Delete(chainHead); err != nil { + return xerrors.Errorf("clearing chain head: %w", err) + } + if err := store.FlushValidationCache(mds); err != nil { + return xerrors.Errorf("clearing chain validation cache: %w", err) + } + + return nil +} diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 581238d4b..457fb1efb 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -144,6 +144,14 @@ var DaemonCmd = &cli.Command{ Name: "api-max-req-size", Usage: "maximum API request size accepted by the JSON RPC server", }, + &cli.PathFlag{ + Name: "restore", + Usage: "restore from backup file", + }, + &cli.PathFlag{ + Name: "restore-config", + Usage: "config file to use when restoring from backup", + }, }, Action: func(cctx *cli.Context) error { isLite := cctx.Bool("lite") @@ -203,9 +211,11 @@ var DaemonCmd = &cli.Command{ r.SetConfigPath(cctx.String("config")) } - if err := r.Init(repo.FullNode); err != nil && err != repo.ErrRepoExists { + err = r.Init(repo.FullNode) + if err != nil && err != repo.ErrRepoExists { return xerrors.Errorf("repo init error: %w", err) } + freshRepo := err != repo.ErrRepoExists if !isLite { if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil { @@ -223,6 +233,15 @@ var DaemonCmd = &cli.Command{ genBytes = build.MaybeGenesis() } + if cctx.IsSet("restore") { + if !freshRepo { + return xerrors.Errorf("restoring from backup is only possible with a fresh repo!") + } + if err := restore(cctx, r); err != nil { + return xerrors.Errorf("restoring from backup: %w", err) + } + } + chainfile := cctx.String("import-chain") snapshot := cctx.String("import-snapshot") if chainfile != "" || snapshot != "" { diff --git a/cmd/tvx/codenames_test.go b/cmd/tvx/codenames_test.go index 00d107707..bef2e982f 100644 --- a/cmd/tvx/codenames_test.go +++ b/cmd/tvx/codenames_test.go @@ -18,7 +18,7 @@ func TestProtocolCodenames(t *testing.T) { t.Fatal("expected breeze codename") } - if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(height) != "actorsv2" { + if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(abi.ChainEpoch(height)) != "actorsv2" { t.Fatal("expected actorsv2 codename") } diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go index 89ad23913..e2fb787fb 100644 --- a/cmd/tvx/exec.go +++ b/cmd/tvx/exec.go @@ -1,63 +1,169 @@ package main import ( + "bufio" "encoding/json" "fmt" "io" "log" "os" + "path/filepath" + "strings" "github.com/fatih/color" + "github.com/filecoin-project/go-address" + cbornode "github.com/ipfs/go-ipld-cbor" "github.com/urfave/cli/v2" - "github.com/filecoin-project/lotus/conformance" - "github.com/filecoin-project/test-vectors/schema" + + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/conformance" + "github.com/filecoin-project/lotus/lib/blockstore" ) var execFlags struct { - file string + file string + out string + driverOpts cli.StringSlice + fallbackBlockstore bool } +const ( + optSaveBalances = "save-balances" +) + var execCmd = &cli.Command{ Name: "exec", - Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, or a ndjson stdin stream", - Action: runExecLotus, + Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, a directory, or a ndjson stdin stream", + Action: runExec, Flags: []cli.Flag{ + &repoFlag, &cli.StringFlag{ Name: "file", - Usage: "input file; if not supplied, the vector will be read from stdin", + Usage: "input file or directory; if not supplied, the vector will be read from stdin", TakesFile: true, Destination: &execFlags.file, }, + &cli.BoolFlag{ + Name: "fallback-blockstore", + Usage: "sets the full node API as a fallback blockstore; use this if you're transplanting vectors and get block not found errors", + Destination: &execFlags.fallbackBlockstore, + }, + &cli.StringFlag{ + Name: "out", + Usage: "output directory where to save the results, only used when the input is a directory", + Destination: &execFlags.out, + }, + &cli.StringSliceFlag{ + Name: "driver-opt", + Usage: "comma-separated list of driver options (EXPERIMENTAL; will change), supported: 'save-balances=', 'pipeline-basefee' (unimplemented); only available in single-file mode", + Destination: &execFlags.driverOpts, + }, }, } -func runExecLotus(_ *cli.Context) error { - if file := execFlags.file; file != "" { - // we have a single test vector supplied as a file. - file, err := os.Open(file) - if err != nil { - return fmt.Errorf("failed to open test vector: %w", err) +func runExec(c *cli.Context) error { + if execFlags.fallbackBlockstore { + if err := initialize(c); err != nil { + return fmt.Errorf("fallback blockstore was enabled, but could not resolve lotus API endpoint: %w", err) } - - var ( - dec = json.NewDecoder(file) - tv schema.TestVector - ) - - if err = dec.Decode(&tv); err != nil { - return fmt.Errorf("failed to decode test vector: %w", err) - } - - return executeTestVector(tv) + defer destroy(c) //nolint:errcheck + conformance.FallbackBlockstoreGetter = FullAPI } + path := execFlags.file + if path == "" { + return execVectorsStdin() + } + + fi, err := os.Stat(path) + if err != nil { + return err + } + + if fi.IsDir() { + // we're in directory mode; ensure the out directory exists. + outdir := execFlags.out + if outdir == "" { + return fmt.Errorf("no output directory provided") + } + if err := ensureDir(outdir); err != nil { + return err + } + return execVectorDir(path, outdir) + } + + // process tipset vector options. + if err := processTipsetOpts(); err != nil { + return err + } + + _, err = execVectorFile(new(conformance.LogReporter), path) + return err +} + +func processTipsetOpts() error { + for _, opt := range execFlags.driverOpts.Value() { + switch ss := strings.Split(opt, "="); { + case ss[0] == optSaveBalances: + filename := ss[1] + log.Printf("saving balances after each tipset in: %s", filename) + balancesFile, err := os.Create(filename) + if err != nil { + return err + } + w := bufio.NewWriter(balancesFile) + cb := func(bs blockstore.Blockstore, params *conformance.ExecuteTipsetParams, res *conformance.ExecuteTipsetResult) { + cst := cbornode.NewCborStore(bs) + st, err := state.LoadStateTree(cst, res.PostStateRoot) + if err != nil { + return + } + _ = st.ForEach(func(addr address.Address, actor *types.Actor) error { + _, err := fmt.Fprintln(w, params.ExecEpoch, addr, actor.Balance) + return err + }) + _ = w.Flush() + } + conformance.TipsetVectorOpts.OnTipsetApplied = append(conformance.TipsetVectorOpts.OnTipsetApplied, cb) + + } + + } + return nil +} + +func execVectorDir(path string, outdir string) error { + files, err := filepath.Glob(filepath.Join(path, "*")) + if err != nil { + return fmt.Errorf("failed to glob input directory %s: %w", path, err) + } + for _, f := range files { + outfile := strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)) + ".out" + outpath := filepath.Join(outdir, outfile) + outw, err := os.Create(outpath) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", outpath, err) + } + + log.Printf("processing vector %s; sending output to %s", f, outpath) + log.SetOutput(io.MultiWriter(os.Stderr, outw)) // tee the output. + _, _ = execVectorFile(new(conformance.LogReporter), f) + log.SetOutput(os.Stderr) + _ = outw.Close() + } + return nil +} + +func execVectorsStdin() error { + r := new(conformance.LogReporter) for dec := json.NewDecoder(os.Stdin); ; { var tv schema.TestVector switch err := dec.Decode(&tv); err { case nil: - if err = executeTestVector(tv); err != nil { + if _, err = executeTestVector(r, tv); err != nil { return err } case io.EOF: @@ -70,19 +176,30 @@ func runExecLotus(_ *cli.Context) error { } } -func executeTestVector(tv schema.TestVector) error { +func execVectorFile(r conformance.Reporter, path string) (diffs []string, error error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open test vector: %w", err) + } + + var tv schema.TestVector + if err = json.NewDecoder(file).Decode(&tv); err != nil { + return nil, fmt.Errorf("failed to decode test vector: %w", err) + } + return executeTestVector(r, tv) +} + +func executeTestVector(r conformance.Reporter, tv schema.TestVector) (diffs []string, err error) { log.Println("executing test vector:", tv.Meta.ID) for _, v := range tv.Pre.Variants { - r := new(conformance.LogReporter) - switch class, v := tv.Class, v; class { case "message": - conformance.ExecuteMessageVector(r, &tv, &v) + diffs, err = conformance.ExecuteMessageVector(r, &tv, &v) case "tipset": - conformance.ExecuteTipsetVector(r, &tv, &v) + diffs, err = conformance.ExecuteTipsetVector(r, &tv, &v) default: - return fmt.Errorf("test vector class %s not supported", class) + return nil, fmt.Errorf("test vector class %s not supported", class) } if r.Failed() { @@ -92,5 +209,5 @@ func executeTestVector(tv schema.TestVector) error { } } - return nil + return diffs, err } diff --git a/cmd/tvx/extract.go b/cmd/tvx/extract.go index 894fa0fbc..a3d538abd 100644 --- a/cmd/tvx/extract.go +++ b/cmd/tvx/extract.go @@ -1,9 +1,6 @@ package main import ( - "bytes" - "compress/gzip" - "context" "encoding/json" "fmt" "io" @@ -11,20 +8,7 @@ import ( "os" "path/filepath" - "github.com/fatih/color" - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin" - init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/conformance" - "github.com/filecoin-project/test-vectors/schema" - - "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" ) @@ -38,10 +22,12 @@ type extractOpts struct { block string class string cid string + tsk string file string retain string precursor string ignoreSanityChecks bool + squash bool } var extractFlags extractOpts @@ -56,7 +42,7 @@ var extractCmd = &cli.Command{ &repoFlag, &cli.StringFlag{ Name: "class", - Usage: "class of vector to extract; other required flags depend on the; values: 'message'", + Usage: "class of vector to extract; values: 'message', 'tipset'", Value: "message", Destination: &extractFlags.class, }, @@ -79,13 +65,17 @@ var extractCmd = &cli.Command{ &cli.StringFlag{ Name: "cid", Usage: "message CID to generate test vector from", - Required: true, Destination: &extractFlags.cid, }, + &cli.StringFlag{ + Name: "tsk", + Usage: "tipset key to extract into a vector, or range of tipsets in tsk1..tsk2 form", + Destination: &extractFlags.tsk, + }, &cli.StringFlag{ Name: "out", Aliases: []string{"o"}, - Usage: "file to write test vector to", + Usage: "file to write test vector to, or directory to write the batch to", Destination: &extractFlags.file, }, &cli.StringFlag{ @@ -110,303 +100,29 @@ var extractCmd = &cli.Command{ Value: false, Destination: &extractFlags.ignoreSanityChecks, }, + &cli.BoolFlag{ + Name: "squash", + Usage: "when extracting a tipset range, squash all tipsets into a single vector", + Value: false, + Destination: &extractFlags.squash, + }, }, } func runExtract(_ *cli.Context) error { - return doExtract(extractFlags) -} - -func doExtract(opts extractOpts) error { - ctx := context.Background() - - mcid, err := cid.Decode(opts.cid) - if err != nil { - return err - } - - msg, execTs, incTs, err := resolveFromChain(ctx, FullAPI, mcid, opts.block) - if err != nil { - return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) - } - - // get the circulating supply before the message was executed. - circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) - if err != nil { - return fmt.Errorf("failed while fetching circulating supply: %w", err) - } - - circSupply := circSupplyDetail.FilCirculating - - log.Printf("message was executed in tipset: %s", execTs.Key()) - log.Printf("message was included in tipset: %s", incTs.Key()) - log.Printf("circulating supply at inclusion tipset: %d", circSupply) - log.Printf("finding precursor messages using mode: %s", opts.precursor) - - // Fetch messages in canonical order from inclusion tipset. - msgs, err := FullAPI.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid()) - if err != nil { - return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err) - } - - related, found, err := findMsgAndPrecursors(opts.precursor, mcid, msg.From, msgs) - if err != nil { - return fmt.Errorf("failed while finding message and precursors: %w", err) - } - - if !found { - return fmt.Errorf("message not found; precursors found: %d", len(related)) - } - - var ( - precursors = related[:len(related)-1] - precursorsCids []cid.Cid - ) - - for _, p := range precursors { - precursorsCids = append(precursorsCids, p.Cid()) - } - - log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids)) - - var ( - // create a read-through store that uses ChainGetObject to fetch unknown CIDs. - pst = NewProxyingStores(ctx, FullAPI) - g = NewSurgeon(ctx, FullAPI, pst) - ) - - driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ - DisableVMFlush: true, - }) - - // this is the root of the state tree we start with. - root := incTs.ParentState() - log.Printf("base state tree root CID: %s", root) - - basefee := incTs.Blocks()[0].ParentBaseFee - log.Printf("basefee: %s", basefee) - - // on top of that state tree, we apply all precursors. - log.Printf("number of precursors to apply: %d", len(precursors)) - for i, m := range precursors { - log.Printf("applying precursor %d, cid: %s", i, m.Cid()) - _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: root, - Epoch: execTs.Height(), - Message: m, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - // recorded randomness will be discarded. - Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), - }) - if err != nil { - return fmt.Errorf("failed to execute precursor message: %w", err) - } - } - - var ( - preroot cid.Cid - postroot cid.Cid - applyret *vm.ApplyRet - carWriter func(w io.Writer) error - retention = opts.retain - - // recordingRand will record randomness so we can embed it in the test vector. - recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI) - ) - - log.Printf("using state retention strategy: %s", retention) - switch retention { - case "accessed-cids": - tbs, ok := pst.Blockstore.(TracingBlockstore) - if !ok { - return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present") - } - - tbs.StartTracing() - - preroot = root - applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: preroot, - Epoch: execTs.Height(), - Message: msg, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - Rand: recordingRand, - }) - if err != nil { - return fmt.Errorf("failed to execute message: %w", err) - } - accessed := tbs.FinishTracing() - carWriter = func(w io.Writer) error { - return g.WriteCARIncluding(w, accessed, preroot, postroot) - } - - case "accessed-actors": - log.Printf("calculating accessed actors") - // get actors accessed by message. - retain, err := g.GetAccessedActors(ctx, FullAPI, mcid) - if err != nil { - return fmt.Errorf("failed to calculate accessed actors: %w", err) - } - // also append the reward actor and the burnt funds actor. - retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address) - log.Printf("calculated accessed actors: %v", retain) - - // get the masked state tree from the root, - preroot, err = g.GetMaskedStateTree(root, retain) - if err != nil { - return err - } - applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: preroot, - Epoch: execTs.Height(), - Message: msg, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - Rand: recordingRand, - }) - if err != nil { - return fmt.Errorf("failed to execute message: %w", err) - } - carWriter = func(w io.Writer) error { - return g.WriteCAR(w, preroot, postroot) - } - + switch extractFlags.class { + case "message": + return doExtractMessage(extractFlags) + case "tipset": + return doExtractTipset(extractFlags) default: - return fmt.Errorf("unknown state retention option: %s", retention) + return fmt.Errorf("unsupported vector class") } - - log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot) - log.Println("performing sanity check on receipt") - - // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯ - // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2 - // This code is lenient and skips receipt comparison in case of a nil receipt. - rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key()) - if err != nil { - return fmt.Errorf("failed to find receipt on chain: %w", err) - } - log.Printf("found receipt: %+v", rec) - - // generate the schema receipt; if we got - var receipt *schema.Receipt - if rec != nil { - receipt = &schema.Receipt{ - ExitCode: int64(rec.ExitCode), - ReturnValue: rec.Return, - GasUsed: rec.GasUsed, - } - - reporter := new(conformance.LogReporter) - conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed") - if reporter.Failed() { - if opts.ignoreSanityChecks { - log.Println(color.YellowString("receipt sanity check failed; proceeding anyway")) - } else { - log.Println(color.RedString("receipt sanity check failed; aborting")) - return fmt.Errorf("vector generation aborted") - } - } else { - log.Println(color.GreenString("receipt sanity check succeeded")) - } - - } else { - receipt = &schema.Receipt{ - ExitCode: int64(applyret.ExitCode), - ReturnValue: applyret.Return, - GasUsed: applyret.GasUsed, - } - log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus")) - } - - log.Println("generating vector") - msgBytes, err := msg.Serialize() - if err != nil { - return err - } - - var ( - out = new(bytes.Buffer) - gw = gzip.NewWriter(out) - ) - if err := carWriter(gw); err != nil { - return err - } - if err = gw.Flush(); err != nil { - return err - } - if err = gw.Close(); err != nil { - return err - } - - version, err := FullAPI.Version(ctx) - if err != nil { - return err - } - - ntwkName, err := FullAPI.StateNetworkName(ctx) - if err != nil { - return err - } - - nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key()) - if err != nil { - return err - } - - codename := GetProtocolCodename(execTs.Height()) - - // Write out the test vector. - vector := schema.TestVector{ - Class: schema.ClassMessage, - Meta: &schema.Metadata{ - ID: opts.id, - // TODO need to replace schema.GenerationData with a more flexible - // data structure that makes no assumption about the traceability - // data that's being recorded; a flexible map[string]string - // would do. - Gen: []schema.GenerationData{ - {Source: fmt.Sprintf("network:%s", ntwkName)}, - {Source: fmt.Sprintf("message:%s", msg.Cid().String())}, - {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())}, - {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())}, - {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, - }, - Selector: schema.Selector{ - schema.SelectorMinProtocolVersion: codename, - }, - Randomness: recordingRand.Recorded(), - CAR: out.Bytes(), - Pre: &schema.Preconditions{ - Variants: []schema.Variant{ - {ID: codename, Epoch: int64(execTs.Height()), NetworkVersion: uint(nv)}, - }, - CircSupply: circSupply.Int, - BaseFee: basefee.Int, - StateTree: &schema.StateTree{ - RootCID: preroot, - }, - }, - ApplyMessages: []schema.Message{{Bytes: msgBytes}}, - Post: &schema.Postconditions{ - StateTree: &schema.StateTree{ - RootCID: postroot, - }, - Receipts: []*schema.Receipt{ - { - ExitCode: int64(applyret.ExitCode), - ReturnValue: applyret.Return, - GasUsed: applyret.GasUsed, - }, - }, - }, - } - - return writeVector(vector, opts.file) } -func writeVector(vector schema.TestVector, file string) (err error) { +// writeVector writes the vector into the specified file, or to stdout if +// file is empty. +func writeVector(vector *schema.TestVector, file string) (err error) { output := io.WriteCloser(os.Stdout) if file := file; file != "" { dir := filepath.Dir(file) @@ -426,101 +142,20 @@ func writeVector(vector schema.TestVector, file string) (err error) { return enc.Encode(&vector) } -// resolveFromChain queries the chain for the provided message, using the block CID to -// speed up the query, if provided -func resolveFromChain(ctx context.Context, api api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) { - // Extract the full message. - msg, err = api.ChainGetMessage(ctx, mcid) - if err != nil { - return nil, nil, nil, err +// writeVectors writes each vector to a different file under the specified +// directory. +func writeVectors(dir string, vectors ...*schema.TestVector) error { + // verify the output directory exists. + if err := ensureDir(dir); err != nil { + return err } - - log.Printf("found message with CID %s: %+v", mcid, msg) - - if block == "" { - log.Printf("locating message in blockchain") - - // Locate the message. - msgInfo, err := api.StateSearchMsg(ctx, mcid) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err) - } - - log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode) - - execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet) - return msg, execTs, incTs, err - } - - bcid, err := cid.Decode(block) - if err != nil { - return nil, nil, nil, err - } - - log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid) - - blk, err := api.ChainGetBlock(ctx, bcid) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get block: %w", err) - } - - // types.EmptyTSK hints to use the HEAD. - execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err) - } - - // walk back from the execTs instead of HEAD, to save time. - incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key()) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err) - } - - return msg, execTs, incTs, nil -} - -// fetchThisAndPrevTipset returns the full tipset identified by the key, as well -// as the previous tipset. In the context of vector generation, the target -// tipset is the one where a message was executed, and the previous tipset is -// the one where the message was included. -func fetchThisAndPrevTipset(ctx context.Context, api api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) { - // get the tipset on which this message was "executed" on. - // https://github.com/filecoin-project/lotus/issues/2847 - targetTs, err = api.ChainGetTipSet(ctx, target) - if err != nil { - return nil, nil, err - } - // get the previous tipset, on which this message was mined, - // i.e. included on-chain. - prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents()) - if err != nil { - return nil, nil, err - } - return targetTs, prevTs, nil -} - -// findMsgAndPrecursors ranges through the canonical messages slice, locating -// the target message and returning precursors in accordance to the supplied -// mode. -func findMsgAndPrecursors(mode string, msgCid cid.Cid, sender address.Address, msgs []api.Message) (related []*types.Message, found bool, err error) { - // Range through canonicalised messages, selecting only the precursors based - // on selection mode. - for _, other := range msgs { - switch { - case mode == PrecursorSelectAll: - fallthrough - case mode == PrecursorSelectSender && other.Message.From == sender: - related = append(related, other.Message) - } - - // this message is the target; we're done. - if other.Cid == msgCid { - return related, true, nil + // write each vector to its file. + for _, v := range vectors { + id := v.Meta.ID + path := filepath.Join(dir, fmt.Sprintf("%s.json", id)) + if err := writeVector(v, path); err != nil { + return err } } - - // this could happen because a block contained related messages, but not - // the target (that is, messages with a lower nonce, but ultimately not the - // target). - return related, false, nil + return nil } diff --git a/cmd/tvx/extract_many.go b/cmd/tvx/extract_many.go index 048271456..081678a17 100644 --- a/cmd/tvx/extract_many.go +++ b/cmd/tvx/extract_many.go @@ -189,7 +189,7 @@ func runExtractMany(c *cli.Context) error { precursor: PrecursorSelectSender, } - if err := doExtract(opts); err != nil { + if err := doExtractMessage(opts); err != nil { log.Println(color.RedString("failed to extract vector for message %s: %s; queuing for 'all' precursor selection", mcid, err)) retry = append(retry, opts) continue @@ -206,7 +206,7 @@ func runExtractMany(c *cli.Context) error { log.Printf("retrying %s: %s", r.cid, r.id) r.precursor = PrecursorSelectAll - if err := doExtract(r); err != nil { + if err := doExtractMessage(r); err != nil { merr = multierror.Append(merr, fmt.Errorf("failed to extract vector for message %s: %w", r.cid, err)) continue } diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go new file mode 100644 index 000000000..0c2fcff4a --- /dev/null +++ b/cmd/tvx/extract_message.go @@ -0,0 +1,416 @@ +package main + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "log" + + "github.com/fatih/color" + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/conformance" + + "github.com/filecoin-project/test-vectors/schema" + + "github.com/ipfs/go-cid" +) + +func doExtractMessage(opts extractOpts) error { + ctx := context.Background() + + if opts.cid == "" { + return fmt.Errorf("missing message CID") + } + + mcid, err := cid.Decode(opts.cid) + if err != nil { + return err + } + + msg, execTs, incTs, err := resolveFromChain(ctx, FullAPI, mcid, opts.block) + if err != nil { + return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) + } + + // get the circulating supply before the message was executed. + circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) + if err != nil { + return fmt.Errorf("failed while fetching circulating supply: %w", err) + } + + circSupply := circSupplyDetail.FilCirculating + + log.Printf("message was executed in tipset: %s", execTs.Key()) + log.Printf("message was included in tipset: %s", incTs.Key()) + log.Printf("circulating supply at inclusion tipset: %d", circSupply) + log.Printf("finding precursor messages using mode: %s", opts.precursor) + + // Fetch messages in canonical order from inclusion tipset. + msgs, err := FullAPI.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid()) + if err != nil { + return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err) + } + + related, found, err := findMsgAndPrecursors(opts.precursor, mcid, msg.From, msgs) + if err != nil { + return fmt.Errorf("failed while finding message and precursors: %w", err) + } + + if !found { + return fmt.Errorf("message not found; precursors found: %d", len(related)) + } + + var ( + precursors = related[:len(related)-1] + precursorsCids []cid.Cid + ) + + for _, p := range precursors { + precursorsCids = append(precursorsCids, p.Cid()) + } + + log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids)) + + var ( + // create a read-through store that uses ChainGetObject to fetch unknown CIDs. + pst = NewProxyingStores(ctx, FullAPI) + g = NewSurgeon(ctx, FullAPI, pst) + ) + + driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ + DisableVMFlush: true, + }) + + // this is the root of the state tree we start with. + root := incTs.ParentState() + log.Printf("base state tree root CID: %s", root) + + basefee := incTs.Blocks()[0].ParentBaseFee + log.Printf("basefee: %s", basefee) + + // on top of that state tree, we apply all precursors. + log.Printf("number of precursors to apply: %d", len(precursors)) + for i, m := range precursors { + log.Printf("applying precursor %d, cid: %s", i, m.Cid()) + _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ + Preroot: root, + Epoch: execTs.Height(), + Message: m, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + // recorded randomness will be discarded. + Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), + }) + if err != nil { + return fmt.Errorf("failed to execute precursor message: %w", err) + } + } + + var ( + preroot cid.Cid + postroot cid.Cid + applyret *vm.ApplyRet + carWriter func(w io.Writer) error + retention = opts.retain + + // recordingRand will record randomness so we can embed it in the test vector. + recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI) + ) + + log.Printf("using state retention strategy: %s", retention) + switch retention { + case "accessed-cids": + tbs, ok := pst.Blockstore.(TracingBlockstore) + if !ok { + return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present") + } + + tbs.StartTracing() + + preroot = root + applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ + Preroot: preroot, + Epoch: execTs.Height(), + Message: msg, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + Rand: recordingRand, + }) + if err != nil { + return fmt.Errorf("failed to execute message: %w", err) + } + accessed := tbs.FinishTracing() + carWriter = func(w io.Writer) error { + return g.WriteCARIncluding(w, accessed, preroot, postroot) + } + + case "accessed-actors": + log.Printf("calculating accessed actors") + // get actors accessed by message. + retain, err := g.GetAccessedActors(ctx, FullAPI, mcid) + if err != nil { + return fmt.Errorf("failed to calculate accessed actors: %w", err) + } + // also append the reward actor and the burnt funds actor. + retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address) + log.Printf("calculated accessed actors: %v", retain) + + // get the masked state tree from the root, + preroot, err = g.GetMaskedStateTree(root, retain) + if err != nil { + return err + } + applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ + Preroot: preroot, + Epoch: execTs.Height(), + Message: msg, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + Rand: recordingRand, + }) + if err != nil { + return fmt.Errorf("failed to execute message: %w", err) + } + carWriter = func(w io.Writer) error { + return g.WriteCAR(w, preroot, postroot) + } + + default: + return fmt.Errorf("unknown state retention option: %s", retention) + } + + log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot) + log.Println("performing sanity check on receipt") + + // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯ + // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2 + // This code is lenient and skips receipt comparison in case of a nil receipt. + rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key()) + if err != nil { + return fmt.Errorf("failed to find receipt on chain: %w", err) + } + log.Printf("found receipt: %+v", rec) + + // generate the schema receipt; if we got + var receipt *schema.Receipt + if rec != nil { + receipt = &schema.Receipt{ + ExitCode: int64(rec.ExitCode), + ReturnValue: rec.Return, + GasUsed: rec.GasUsed, + } + + reporter := new(conformance.LogReporter) + conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed") + if reporter.Failed() { + if opts.ignoreSanityChecks { + log.Println(color.YellowString("receipt sanity check failed; proceeding anyway")) + } else { + log.Println(color.RedString("receipt sanity check failed; aborting")) + return fmt.Errorf("vector generation aborted") + } + } else { + log.Println(color.GreenString("receipt sanity check succeeded")) + } + + } else { + receipt = &schema.Receipt{ + ExitCode: int64(applyret.ExitCode), + ReturnValue: applyret.Return, + GasUsed: applyret.GasUsed, + } + log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus")) + } + + log.Println("generating vector") + msgBytes, err := msg.Serialize() + if err != nil { + return err + } + + var ( + out = new(bytes.Buffer) + gw = gzip.NewWriter(out) + ) + if err := carWriter(gw); err != nil { + return err + } + if err = gw.Flush(); err != nil { + return err + } + if err = gw.Close(); err != nil { + return err + } + + version, err := FullAPI.Version(ctx) + if err != nil { + return err + } + + ntwkName, err := FullAPI.StateNetworkName(ctx) + if err != nil { + return err + } + + nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key()) + if err != nil { + return err + } + + codename := GetProtocolCodename(execTs.Height()) + + // Write out the test vector. + vector := schema.TestVector{ + Class: schema.ClassMessage, + Meta: &schema.Metadata{ + ID: opts.id, + // TODO need to replace schema.GenerationData with a more flexible + // data structure that makes no assumption about the traceability + // data that's being recorded; a flexible map[string]string + // would do. + Gen: []schema.GenerationData{ + {Source: fmt.Sprintf("network:%s", ntwkName)}, + {Source: fmt.Sprintf("message:%s", msg.Cid().String())}, + {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())}, + {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())}, + {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, + }, + Selector: schema.Selector{ + schema.SelectorMinProtocolVersion: codename, + }, + Randomness: recordingRand.Recorded(), + CAR: out.Bytes(), + Pre: &schema.Preconditions{ + Variants: []schema.Variant{ + {ID: codename, Epoch: int64(execTs.Height()), NetworkVersion: uint(nv)}, + }, + CircSupply: circSupply.Int, + BaseFee: basefee.Int, + StateTree: &schema.StateTree{ + RootCID: preroot, + }, + }, + ApplyMessages: []schema.Message{{Bytes: msgBytes}}, + Post: &schema.Postconditions{ + StateTree: &schema.StateTree{ + RootCID: postroot, + }, + Receipts: []*schema.Receipt{ + { + ExitCode: int64(applyret.ExitCode), + ReturnValue: applyret.Return, + GasUsed: applyret.GasUsed, + }, + }, + }, + } + return writeVector(&vector, opts.file) +} + +// resolveFromChain queries the chain for the provided message, using the block CID to +// speed up the query, if provided +func resolveFromChain(ctx context.Context, api api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) { + // Extract the full message. + msg, err = api.ChainGetMessage(ctx, mcid) + if err != nil { + return nil, nil, nil, err + } + + log.Printf("found message with CID %s: %+v", mcid, msg) + + if block == "" { + log.Printf("locating message in blockchain") + + // Locate the message. + msgInfo, err := api.StateSearchMsg(ctx, mcid) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err) + } + + log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode) + + execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet) + return msg, execTs, incTs, err + } + + bcid, err := cid.Decode(block) + if err != nil { + return nil, nil, nil, err + } + + log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid) + + blk, err := api.ChainGetBlock(ctx, bcid) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get block: %w", err) + } + + // types.EmptyTSK hints to use the HEAD. + execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err) + } + + // walk back from the execTs instead of HEAD, to save time. + incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key()) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err) + } + + return msg, execTs, incTs, nil +} + +// fetchThisAndPrevTipset returns the full tipset identified by the key, as well +// as the previous tipset. In the context of vector generation, the target +// tipset is the one where a message was executed, and the previous tipset is +// the one where the message was included. +func fetchThisAndPrevTipset(ctx context.Context, api api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) { + // get the tipset on which this message was "executed" on. + // https://github.com/filecoin-project/lotus/issues/2847 + targetTs, err = api.ChainGetTipSet(ctx, target) + if err != nil { + return nil, nil, err + } + // get the previous tipset, on which this message was mined, + // i.e. included on-chain. + prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents()) + if err != nil { + return nil, nil, err + } + return targetTs, prevTs, nil +} + +// findMsgAndPrecursors ranges through the canonical messages slice, locating +// the target message and returning precursors in accordance to the supplied +// mode. +func findMsgAndPrecursors(mode string, msgCid cid.Cid, sender address.Address, msgs []api.Message) (related []*types.Message, found bool, err error) { + // Range through canonicalised messages, selecting only the precursors based + // on selection mode. + for _, other := range msgs { + switch { + case mode == PrecursorSelectAll: + fallthrough + case mode == PrecursorSelectSender && other.Message.From == sender: + related = append(related, other.Message) + } + + // this message is the target; we're done. + if other.Cid == msgCid { + return related, true, nil + } + } + + // this could happen because a block contained related messages, but not + // the target (that is, messages with a lower nonce, but ultimately not the + // target). + return related, false, nil +} diff --git a/cmd/tvx/extract_tipset.go b/cmd/tvx/extract_tipset.go new file mode 100644 index 000000000..05e856aa1 --- /dev/null +++ b/cmd/tvx/extract_tipset.go @@ -0,0 +1,277 @@ +package main + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "log" + "strings" + + "github.com/filecoin-project/test-vectors/schema" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/conformance" +) + +func doExtractTipset(opts extractOpts) error { + ctx := context.Background() + + if opts.retain != "accessed-cids" { + return fmt.Errorf("tipset extraction only supports 'accessed-cids' state retention") + } + + if opts.tsk == "" { + return fmt.Errorf("tipset key cannot be empty") + } + + ss := strings.Split(opts.tsk, "..") + switch len(ss) { + case 1: // extracting a single tipset. + ts, err := lcli.ParseTipSetRef(ctx, FullAPI, opts.tsk) + if err != nil { + return fmt.Errorf("failed to fetch tipset: %w", err) + } + v, err := extractTipsets(ctx, ts) + if err != nil { + return err + } + return writeVector(v, opts.file) + + case 2: // extracting a range of tipsets. + left, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[0]) + if err != nil { + return fmt.Errorf("failed to fetch tipset %s: %w", ss[0], err) + } + right, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[1]) + if err != nil { + return fmt.Errorf("failed to fetch tipset %s: %w", ss[1], err) + } + + // resolve the tipset range. + tss, err := resolveTipsetRange(ctx, left, right) + if err != nil { + return err + } + + // are are squashing all tipsets into a single multi-tipset vector? + if opts.squash { + vector, err := extractTipsets(ctx, tss...) + if err != nil { + return err + } + return writeVector(vector, opts.file) + } + + // we are generating a single-tipset vector per tipset. + vectors, err := extractIndividualTipsets(ctx, tss...) + if err != nil { + return err + } + return writeVectors(opts.file, vectors...) + + default: + return fmt.Errorf("unrecognized tipset format") + } +} + +func resolveTipsetRange(ctx context.Context, left *types.TipSet, right *types.TipSet) (tss []*types.TipSet, err error) { + // start from the right tipset and walk back the chain until the left tipset, inclusive. + for curr := right; curr.Key() != left.Parents(); { + tss = append(tss, curr) + curr, err = FullAPI.ChainGetTipSet(ctx, curr.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to get tipset %s (height: %d): %w", curr.Parents(), curr.Height()-1, err) + } + } + // reverse the slice. + for i, j := 0, len(tss)-1; i < j; i, j = i+1, j-1 { + tss[i], tss[j] = tss[j], tss[i] + } + return tss, nil +} + +func extractIndividualTipsets(ctx context.Context, tss ...*types.TipSet) (vectors []*schema.TestVector, err error) { + for _, ts := range tss { + v, err := extractTipsets(ctx, ts) + if err != nil { + return nil, err + } + vectors = append(vectors, v) + } + return vectors, nil +} + +func extractTipsets(ctx context.Context, tss ...*types.TipSet) (*schema.TestVector, error) { + var ( + // create a read-through store that uses ChainGetObject to fetch unknown CIDs. + pst = NewProxyingStores(ctx, FullAPI) + g = NewSurgeon(ctx, FullAPI, pst) + + // recordingRand will record randomness so we can embed it in the test vector. + recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI) + ) + + tbs, ok := pst.Blockstore.(TracingBlockstore) + if !ok { + return nil, fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present") + } + + driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ + DisableVMFlush: true, + }) + + base := tss[0] + last := tss[len(tss)-1] + + // this is the root of the state tree we start with. + root := base.ParentState() + log.Printf("base state tree root CID: %s", root) + + codename := GetProtocolCodename(base.Height()) + nv, err := FullAPI.StateNetworkVersion(ctx, base.Key()) + if err != nil { + return nil, err + } + + version, err := FullAPI.Version(ctx) + if err != nil { + return nil, err + } + + ntwkName, err := FullAPI.StateNetworkName(ctx) + if err != nil { + return nil, err + } + + vector := schema.TestVector{ + Class: schema.ClassTipset, + Meta: &schema.Metadata{ + ID: fmt.Sprintf("@%d..@%d", base.Height(), last.Height()), + Gen: []schema.GenerationData{ + {Source: fmt.Sprintf("network:%s", ntwkName)}, + {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, + // will be completed by extra tipset stamps. + }, + Selector: schema.Selector{ + schema.SelectorMinProtocolVersion: codename, + }, + Pre: &schema.Preconditions{ + Variants: []schema.Variant{ + {ID: codename, Epoch: int64(base.Height()), NetworkVersion: uint(nv)}, + }, + StateTree: &schema.StateTree{ + RootCID: base.ParentState(), + }, + }, + Post: &schema.Postconditions{ + StateTree: new(schema.StateTree), + }, + } + + tbs.StartTracing() + + roots := []cid.Cid{base.ParentState()} + for i, ts := range tss { + log.Printf("tipset %s block count: %d", ts.Key(), len(ts.Blocks())) + + var blocks []schema.Block + for _, b := range ts.Blocks() { + msgs, err := FullAPI.ChainGetBlockMessages(ctx, b.Cid()) + if err != nil { + return nil, fmt.Errorf("failed to get block messages (cid: %s): %w", b.Cid(), err) + } + + log.Printf("block %s has %d messages", b.Cid(), len(msgs.Cids)) + + packed := make([]schema.Base64EncodedBytes, 0, len(msgs.Cids)) + for _, m := range msgs.BlsMessages { + b, err := m.Serialize() + if err != nil { + return nil, fmt.Errorf("failed to serialize message: %w", err) + } + packed = append(packed, b) + } + for _, m := range msgs.SecpkMessages { + b, err := m.Message.Serialize() + if err != nil { + return nil, fmt.Errorf("failed to serialize message: %w", err) + } + packed = append(packed, b) + } + blocks = append(blocks, schema.Block{ + MinerAddr: b.Miner, + WinCount: b.ElectionProof.WinCount, + Messages: packed, + }) + } + + basefee := base.Blocks()[0].ParentBaseFee + log.Printf("tipset basefee: %s", basefee) + + tipset := schema.Tipset{ + BaseFee: *basefee.Int, + Blocks: blocks, + EpochOffset: int64(i), + } + + params := conformance.ExecuteTipsetParams{ + Preroot: roots[len(roots)-1], + ParentEpoch: ts.Height() - 1, + Tipset: &tipset, + ExecEpoch: ts.Height(), + Rand: recordingRand, + } + + result, err := driver.ExecuteTipset(pst.Blockstore, pst.Datastore, params) + if err != nil { + return nil, fmt.Errorf("failed to execute tipset: %w", err) + } + + roots = append(roots, result.PostStateRoot) + + // update the vector. + vector.ApplyTipsets = append(vector.ApplyTipsets, tipset) + vector.Post.ReceiptsRoots = append(vector.Post.ReceiptsRoots, result.ReceiptsRoot) + + for _, res := range result.AppliedResults { + vector.Post.Receipts = append(vector.Post.Receipts, &schema.Receipt{ + ExitCode: int64(res.ExitCode), + ReturnValue: res.Return, + GasUsed: res.GasUsed, + }) + } + + vector.Meta.Gen = append(vector.Meta.Gen, schema.GenerationData{ + Source: "tipset:" + ts.Key().String(), + }) + } + + accessed := tbs.FinishTracing() + + // + // ComputeBaseFee(ctx, baseTs) + + // write a CAR with the accessed state into a buffer. + var ( + out = new(bytes.Buffer) + gw = gzip.NewWriter(out) + ) + if err := g.WriteCARIncluding(gw, accessed, roots...); err != nil { + return nil, err + } + if err = gw.Flush(); err != nil { + return nil, err + } + if err = gw.Close(); err != nil { + return nil, err + } + + vector.Randomness = recordingRand.Recorded() + vector.Post.StateTree.RootCID = roots[len(roots)-1] + vector.CAR = out.Bytes() + + return &vector, nil +} diff --git a/cmd/tvx/main.go b/cmd/tvx/main.go index 8de851ed5..94a656c3e 100644 --- a/cmd/tvx/main.go +++ b/cmd/tvx/main.go @@ -102,7 +102,7 @@ func initialize(c *cli.Context) error { // Make the API client. var err error if FullAPI, Closer, err = lcli.GetFullNodeAPI(c); err != nil { - err = fmt.Errorf("failed to locate Lotus node; ") + err = fmt.Errorf("failed to locate Lotus node; err: %w", err) } return err } @@ -113,3 +113,19 @@ func destroy(_ *cli.Context) error { } return nil } + +func ensureDir(path string) error { + switch fi, err := os.Stat(path); { + case os.IsNotExist(err): + if err := os.MkdirAll(path, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", path, err) + } + case err == nil: + if !fi.IsDir() { + return fmt.Errorf("path %s is not a directory: %w", path, err) + } + default: + return fmt.Errorf("failed to stat directory %s: %w", path, err) + } + return nil +} diff --git a/cmd/tvx/simulate.go b/cmd/tvx/simulate.go index 82b2bc118..7a33707dc 100644 --- a/cmd/tvx/simulate.go +++ b/cmd/tvx/simulate.go @@ -202,7 +202,7 @@ func runSimulateCmd(_ *cli.Context) error { }, } - if err := writeVector(vector, simulateFlags.out); err != nil { + if err := writeVector(&vector, simulateFlags.out); err != nil { return fmt.Errorf("failed to write vector: %w", err) } diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go index 4f574c175..e160929da 100644 --- a/cmd/tvx/stores.go +++ b/cmd/tvx/stores.go @@ -149,3 +149,14 @@ func (pb *proxyingBlockstore) Put(block blocks.Block) error { pb.lk.Unlock() return pb.Blockstore.Put(block) } + +func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { + pb.lk.Lock() + if pb.tracing { + for _, b := range blocks { + pb.traced[b.Cid()] = struct{}{} + } + } + pb.lk.Unlock() + return pb.Blockstore.PutMany(blocks) +} diff --git a/conformance/corpus_test.go b/conformance/corpus_test.go index a09f9a8d3..b9ba062cc 100644 --- a/conformance/corpus_test.go +++ b/conformance/corpus_test.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/test-vectors/schema" ) -var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant){ +var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant) ([]string, error){ schema.ClassMessage: ExecuteMessageVector, schema.ClassTipset: ExecuteTipsetVector, } @@ -133,7 +133,7 @@ func TestConformance(t *testing.T) { for _, variant := range vector.Pre.Variants { variant := variant t.Run(variant.ID, func(t *testing.T) { - invokee(t, &vector, &variant) + _, _ = invokee(t, &vector, &variant) //nolint:errcheck }) } }) diff --git a/conformance/driver.go b/conformance/driver.go index 833d50d7b..98436cf96 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -71,26 +71,48 @@ type ExecuteTipsetResult struct { AppliedMessages []*types.Message // AppliedResults stores the results of AppliedMessages, in the same order. AppliedResults []*vm.ApplyRet + + // PostBaseFee returns the basefee after applying this tipset. + PostBaseFee abi.TokenAmount +} + +type ExecuteTipsetParams struct { + Preroot cid.Cid + // ParentEpoch is the last epoch in which an actual tipset was processed. This + // is used by Lotus for null block counting and cron firing. + ParentEpoch abi.ChainEpoch + Tipset *schema.Tipset + ExecEpoch abi.ChainEpoch + // Rand is an optional vm.Rand implementation to use. If nil, the driver + // will use a vm.Rand that returns a fixed value for all calls. + Rand vm.Rand + // BaseFee if not nil or zero, will override the basefee of the tipset. + BaseFee abi.TokenAmount } // ExecuteTipset executes the supplied tipset on top of the state represented // by the preroot CID. // -// parentEpoch is the last epoch in which an actual tipset was processed. This -// is used by Lotus for null block counting and cron firing. -// // This method returns the the receipts root, the poststate root, and the VM // message results. The latter _include_ implicit messages, such as cron ticks // and reward withdrawal per miner. -func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset, execEpoch abi.ChainEpoch) (*ExecuteTipsetResult, error) { +func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params ExecuteTipsetParams) (*ExecuteTipsetResult, error) { var ( + tipset = params.Tipset syscalls = vm.Syscalls(ffiwrapper.ProofVerifier) - vmRand = NewFixedRand() cs = store.NewChainStore(bs, bs, ds, syscalls, nil) sm = stmgr.NewStateManager(cs) ) + if params.Rand == nil { + params.Rand = NewFixedRand() + } + + if params.BaseFee.NilOrZero() { + params.BaseFee = abi.NewTokenAmount(tipset.BaseFee.Int64()) + } + defer cs.Close() //nolint:errcheck blocks := make([]store.BlockMessages, 0, len(tipset.Blocks)) @@ -122,15 +144,23 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot var ( messages []*types.Message results []*vm.ApplyRet - - basefee = abi.NewTokenAmount(tipset.BaseFee.Int64()) ) - postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, execEpoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { + recordOutputs := func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { messages = append(messages, msg) results = append(results, ret) return nil - }, basefee, nil) + } + postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), + params.ParentEpoch, + params.Preroot, + blocks, + params.ExecEpoch, + params.Rand, + recordOutputs, + params.BaseFee, + nil, + ) if err != nil { return nil, err diff --git a/conformance/runner.go b/conformance/runner.go index 6f9d73305..8ced484c9 100644 --- a/conformance/runner.go +++ b/conformance/runner.go @@ -14,6 +14,8 @@ import ( "github.com/fatih/color" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/hashicorp/go-multierror" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -29,8 +31,27 @@ import ( "github.com/filecoin-project/lotus/lib/blockstore" ) +// FallbackBlockstoreGetter is a fallback blockstore to use for resolving CIDs +// unknown to the test vector. This is rarely used, usually only needed +// when transplanting vectors across versions. This is an interface tighter +// than ChainModuleAPI. It can be backed by a FullAPI client. +var FallbackBlockstoreGetter interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) +} + +var TipsetVectorOpts struct { + // PipelineBaseFee pipelines the basefee in multi-tipset vectors from one + // tipset to another. Basefees in the vector are ignored, except for that of + // the first tipset. UNUSED. + PipelineBaseFee bool + + // OnTipsetApplied contains callback functions called after a tipset has been + // applied. + OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult) +} + // ExecuteMessageVector executes a message-class test vector. -func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) { +func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) { var ( ctx = context.Background() baseEpoch = variant.Epoch @@ -38,7 +59,7 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema ) // Load the CAR into a new temporary Blockstore. - bs, err := LoadVectorCAR(vector.CAR) + bs, err := LoadBlockstore(vector.CAR) if err != nil { r.Fatalf("failed to load the vector CAR: %w", err) } @@ -79,14 +100,16 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema // Once all messages are applied, assert that the final state root matches // the expected postcondition root. if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual { - r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) - dumpThreeWayStateDiff(r, vector, bs, root) - r.FailNow() + ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) + r.Errorf(ierr.Error()) + err = multierror.Append(err, ierr) + diffs = dumpThreeWayStateDiff(r, vector, bs, root) } + return diffs, err } // ExecuteTipsetVector executes a tipset-class test vector. -func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) { +func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) { var ( ctx = context.Background() baseEpoch = abi.ChainEpoch(variant.Epoch) @@ -95,9 +118,10 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. ) // Load the vector CAR into a new temporary Blockstore. - bs, err := LoadVectorCAR(vector.CAR) + bs, err := LoadBlockstore(vector.CAR) if err != nil { r.Fatalf("failed to load the vector CAR: %w", err) + return nil, err } // Create a new Driver. @@ -109,9 +133,22 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. for i, ts := range vector.ApplyTipsets { ts := ts // capture execEpoch := baseEpoch + abi.ChainEpoch(ts.EpochOffset) - ret, err := driver.ExecuteTipset(bs, tmpds, root, prevEpoch, &ts, execEpoch) + params := ExecuteTipsetParams{ + Preroot: root, + ParentEpoch: prevEpoch, + Tipset: &ts, + ExecEpoch: execEpoch, + Rand: NewReplayingRand(r, vector.Randomness), + } + ret, err := driver.ExecuteTipset(bs, tmpds, params) if err != nil { - r.Fatalf("failed to apply tipset %d message: %s", i, err) + r.Fatalf("failed to apply tipset %d: %s", i, err) + return nil, err + } + + // invoke callbacks. + for _, cb := range TipsetVectorOpts.OnTipsetApplied { + cb(bs, ¶ms, ret) } for j, v := range ret.AppliedResults { @@ -121,7 +158,9 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. // Compare the receipts root. if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual { - r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual) + ierr := fmt.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual) + r.Errorf(ierr.Error()) + err = multierror.Append(err, ierr) } prevEpoch = execEpoch @@ -131,10 +170,12 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. // Once all messages are applied, assert that the final state root matches // the expected postcondition root. if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual { - r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) - dumpThreeWayStateDiff(r, vector, bs, root) - r.FailNow() + ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) + r.Errorf(ierr.Error()) + err = multierror.Append(err, ierr) + diffs = dumpThreeWayStateDiff(r, vector, bs, root) } + return diffs, err } // AssertMsgResult compares a message result. It takes the expected receipt @@ -154,7 +195,7 @@ func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet, } } -func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) { +func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) []string { // check if statediff exists; if not, skip. if err := exec.Command("statediff", "--help").Run(); err != nil { r.Log("could not dump 3-way state tree diff upon test failure: statediff command not found") @@ -163,7 +204,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore. r.Log("$ cd statediff") r.Log("$ go generate ./...") r.Log("$ go install ./cmd/statediff") - return + return nil } tmpCar, err := writeStateToTempCAR(bs, @@ -173,6 +214,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore. ) if err != nil { r.Fatalf("failed to write temporary state CAR: %s", err) + return nil } defer os.RemoveAll(tmpCar) //nolint:errcheck @@ -187,28 +229,43 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore. d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]") ) - printDiff := func(left, right cid.Cid) { + diff := func(left, right cid.Cid) string { cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String()) b, err := cmd.CombinedOutput() if err != nil { r.Fatalf("statediff failed: %s", err) } - r.Log(string(b)) + return string(b) } bold := color.New(color.Bold).SprintfFunc() + r.Log(bold("-----BEGIN STATEDIFF-----")) + // run state diffs. r.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c)) r.Log(bold("--- %s left: %s; right: %s ---", d1, a, b)) - printDiff(vector.Post.StateTree.RootCID, actual) + diffA := diff(vector.Post.StateTree.RootCID, actual) + r.Log(bold("----------BEGIN STATEDIFF A----------")) + r.Log(diffA) + r.Log(bold("----------END STATEDIFF A----------")) r.Log(bold("--- %s left: %s; right: %s ---", d2, c, b)) - printDiff(vector.Pre.StateTree.RootCID, actual) + diffB := diff(vector.Pre.StateTree.RootCID, actual) + r.Log(bold("----------BEGIN STATEDIFF B----------")) + r.Log(diffB) + r.Log(bold("----------END STATEDIFF B----------")) r.Log(bold("--- %s left: %s; right: %s ---", d3, c, a)) - printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID) + diffC := diff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID) + r.Log(bold("----------BEGIN STATEDIFF C----------")) + r.Log(diffC) + r.Log(bold("----------END STATEDIFF C----------")) + + r.Log(bold("-----END STATEDIFF-----")) + + return []string{diffA, diffB, diffC} } // writeStateToTempCAR writes the provided roots to a temporary CAR that'll be @@ -248,8 +305,8 @@ func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, er return tmp.Name(), nil } -func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) { - bs := blockstore.NewTemporary() +func LoadBlockstore(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) { + bs := blockstore.Blockstore(blockstore.NewTemporary()) // Read the base64-encoded CAR from the vector, and inflate the gzip. buf := bytes.NewReader(vectorCAR) @@ -264,5 +321,18 @@ func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, if err != nil { return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err) } + + if FallbackBlockstoreGetter != nil { + fbs := &blockstore.FallbackStore{Blockstore: bs} + fbs.SetFallback(func(ctx context.Context, c cid.Cid) (blocks.Block, error) { + b, err := FallbackBlockstoreGetter.ChainReadObj(ctx, c) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(b, c) + }) + bs = fbs + } + return bs, nil } diff --git a/documentation/en/api-methods-miner.md b/documentation/en/api-methods-miner.md index 0a6f8ec27..66512a02c 100644 --- a/documentation/en/api-methods-miner.md +++ b/documentation/en/api-methods-miner.md @@ -99,6 +99,9 @@ * [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration) * [SectorSetSealDelay](#SectorSetSealDelay) * [SectorStartSealing](#SectorStartSealing) + * [SectorTerminate](#SectorTerminate) + * [SectorTerminateFlush](#SectorTerminateFlush) + * [SectorTerminatePending](#SectorTerminatePending) * [Sectors](#Sectors) * [SectorsList](#SectorsList) * [SectorsListInStates](#SectorsListInStates) @@ -193,7 +196,8 @@ Response: ```json { "PreCommitControl": null, - "CommitControl": null + "CommitControl": null, + "TerminateControl": null } ``` @@ -1475,7 +1479,9 @@ Inputs: Response: `{}` ### SectorRemove -There are not yet any comments for this method. +SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can +be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. + Perms: admin @@ -1535,6 +1541,43 @@ Inputs: Response: `{}` +### SectorTerminate +SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then +automatically removes it from storage + + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SectorTerminateFlush +SectorTerminateFlush immediately sends a terminate message with sectors batched for termination. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorTerminatePending +SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message + + +Perms: admin + +Inputs: `null` + +Response: `null` + ## Sectors diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md index 9f46460f1..2c75ecaaa 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-methods.md @@ -68,6 +68,8 @@ * [LogList](#LogList) * [LogSetLevel](#LogSetLevel) * [Market](#Market) + * [MarketAddBalance](#MarketAddBalance) + * [MarketGetReserved](#MarketGetReserved) * [MarketReleaseFunds](#MarketReleaseFunds) * [MarketReserveFunds](#MarketReserveFunds) * [MarketWithdraw](#MarketWithdraw) @@ -175,6 +177,7 @@ * [StateReadState](#StateReadState) * [StateReplay](#StateReplay) * [StateSearchMsg](#StateSearchMsg) + * [StateSearchMsgLimited](#StateSearchMsgLimited) * [StateSectorExpiration](#StateSectorExpiration) * [StateSectorGetInfo](#StateSectorGetInfo) * [StateSectorPartition](#StateSectorPartition) @@ -1653,6 +1656,43 @@ Response: `{}` ## Market +### MarketAddBalance +MarketAddBalance adds funds to the market actor + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketGetReserved +MarketGetReserved gets the amount of funds that are currently reserved for the address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + ### MarketReleaseFunds MarketReleaseFunds releases funds reserved by MarketReserveFunds @@ -3988,7 +4028,7 @@ Response: "WorkerChangeEpoch": 10101, "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Multiaddrs": null, - "SealProofType": 8, + "WindowPoStProofType": 8, "SectorSize": 34359738368, "WindowPoStPartitionSectors": 42, "ConsensusFaultElapsed": 10101 @@ -4308,7 +4348,7 @@ Inputs: ] ``` -Response: `8` +Response: `9` ### StateReadState StateReadState returns the indicated actor's state. @@ -4471,6 +4511,46 @@ Response: } ``` +### StateSearchMsgLimited +StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 10101 +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + ### StateSectorExpiration StateSectorExpiration returns epoch at which given sector will expire diff --git a/extern/blst b/extern/blst deleted file mode 160000 index 1cbb16ed9..000000000 --- a/extern/blst +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1cbb16ed9580dcd3e9593b71221fcf2a048faaef diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 1d9cb3e8f..62f89f108 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 1d9cb3e8ff53f51f9318fc57e5d00bc79bdc0128 +Subproject commit 62f89f108a6a8fe9ad6ed52fb7ffbf8594d7ae5c diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 1c8c7ee84..dca8b44b5 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -45,6 +45,10 @@ func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error } func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { + // TODO: allow tuning those: + chunk := abi.PaddedPieceSize(4 << 20) + parallel := runtime.NumCPU() + var offset abi.UnpaddedPieceSize for _, size := range existingPieceSizes { offset += size @@ -108,10 +112,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw) - chunk := abi.PaddedPieceSize(4 << 20) + throttle := make(chan []byte, parallel) + piecePromises := make([]func() (abi.PieceInfo, error), 0) buf := make([]byte, chunk.Unpadded()) - var pieceCids []abi.PieceInfo + for i := 0; i < parallel; i++ { + if abi.UnpaddedPieceSize(i)*chunk.Unpadded() >= pieceSize { + break // won't use this many buffers + } + throttle <- make([]byte, chunk.Unpadded()) + } for { var read int @@ -132,13 +142,39 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi break } - c, err := sb.pieceCid(sector.ProofType, buf[:read]) - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err) - } - pieceCids = append(pieceCids, abi.PieceInfo{ - Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), - PieceCID: c, + done := make(chan struct { + cid.Cid + error + }, 1) + pbuf := <-throttle + copy(pbuf, buf[:read]) + + go func(read int) { + defer func() { + throttle <- pbuf + }() + + c, err := sb.pieceCid(sector.ProofType, pbuf[:read]) + done <- struct { + cid.Cid + error + }{c, err} + }(read) + + piecePromises = append(piecePromises, func() (abi.PieceInfo, error) { + select { + case e := <-done: + if e.error != nil { + return abi.PieceInfo{}, e.error + } + + return abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), + PieceCID: e.Cid, + }, nil + case <-ctx.Done(): + return abi.PieceInfo{}, ctx.Err() + } }) } @@ -155,8 +191,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi } stagedFile = nil - if len(pieceCids) == 1 { - return pieceCids[0], nil + if len(piecePromises) == 1 { + return piecePromises[0]() + } + + pieceCids := make([]abi.PieceInfo, len(piecePromises)) + for i, promise := range piecePromises { + pieceCids[i], err = promise() + if err != nil { + return abi.PieceInfo{}, err + } } pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids) diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 1292a9513..3b379af6f 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -33,6 +33,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader" ) func init() { @@ -622,3 +623,89 @@ func TestGenerateUnsealedCID(t *testing.T) { [][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)}, ) } + +func TestAddPiece512M(t *testing.T) { + sz := abi.PaddedPieceSize(512 << 20).Unpadded() + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + t.Cleanup(cleanup) + + r := rand.New(rand.NewSource(0x7e5)) + + c, err := sb.AddPiece(context.TODO(), storage.SectorRef{ + ID: abi.SectorID{ + Miner: miner, + Number: 0, + }, + ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, + }, nil, sz, io.LimitReader(r, int64(sz))) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, "baga6ea4seaqhyticusemlcrjhvulpfng4nint6bu3wpe5s3x4bnuj2rs47hfacy", c.PieceCID.String()) +} + +func BenchmarkAddPiece512M(b *testing.B) { + sz := abi.PaddedPieceSize(512 << 20).Unpadded() + b.SetBytes(int64(sz)) + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + b.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + b.Fatalf("%+v", err) + } + cleanup := func() { + if b.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + b.Error(err) + } + } + b.Cleanup(cleanup) + + for i := 0; i < b.N; i++ { + c, err := sb.AddPiece(context.TODO(), storage.SectorRef{ + ID: abi.SectorID{ + Miner: miner, + Number: abi.SectorNumber(i), + }, + ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, + }, nil, sz, io.LimitReader(&nullreader.Reader{}, int64(sz))) + if err != nil { + b.Fatal(err) + } + fmt.Println(c) + } +} diff --git a/extern/sector-storage/fsutil/filesize_unix.go b/extern/sector-storage/fsutil/filesize_unix.go index 500e54386..7df8dae4c 100644 --- a/extern/sector-storage/fsutil/filesize_unix.go +++ b/extern/sector-storage/fsutil/filesize_unix.go @@ -2,6 +2,7 @@ package fsutil import ( "os" + "path/filepath" "syscall" "golang.org/x/xerrors" @@ -11,19 +12,32 @@ type SizeInfo struct { OnDisk int64 } -// FileSize returns bytes used by a file on disk +// FileSize returns bytes used by a file or directory on disk +// NOTE: We care about the allocated bytes, not file or directory size func FileSize(path string) (SizeInfo, error) { - var stat syscall.Stat_t - if err := syscall.Stat(path, &stat); err != nil { - if err == syscall.ENOENT { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return xerrors.New("FileInfo.Sys of wrong type") + } + + // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize return SizeInfo{size}, nil + // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html + size += int64(stat.Blocks) * 512 // nolint NOTE: int64 cast is needed on osx + } + return err + }) + if err != nil { + if os.IsNotExist(err) { return SizeInfo{}, os.ErrNotExist } - return SizeInfo{}, xerrors.Errorf("stat: %w", err) + return SizeInfo{}, xerrors.Errorf("filepath.Walk err: %w", err) } - // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize - // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html - return SizeInfo{ - int64(stat.Blocks) * 512, // nolint NOTE: int64 cast is needed on osx - }, nil + return SizeInfo{size}, nil } diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index c56bbdf50..a9b31f38a 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -285,9 +285,19 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage. if unsealed == cid.Undef { return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size) } + + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return xerrors.Errorf("getting sector size: %w", err) + } + err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error { // TODO: make restartable - _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed)) + + // NOTE: we're unsealing the whole sector here as with SDR we can't really + // unseal the sector partially. Requesting the whole sector here can + // save us some work in case another piece is requested from here + _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, 0, abi.PaddedPieceSize(ssize).Unpadded(), ticket, unsealed)) return err }) if err != nil { diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 47fb2b974..9365ffb5c 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -347,7 +347,7 @@ func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSea } func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { - if len(mgr.sectors[sectorID.ID].pieces) > 1 || offset != 0 { + if offset != 0 { panic("implme") } diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index 573aa623b..40cf2fcf4 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -57,6 +57,7 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error { log.Warnw("duplicated worker added", "id", wid) // this is ok, we're already handling this worker in a different goroutine + sh.workersLk.Unlock() return nil } diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index eb3e7690f..9362a7504 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -155,6 +155,10 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, u) } + i.stores[si.ID].info.Weight = si.Weight + i.stores[si.ID].info.CanSeal = si.CanSeal + i.stores[si.ID].info.CanStore = si.CanStore + return nil } i.stores[si.ID] = &storageEntry{ diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go index 78765d7b4..70be08ace 100644 --- a/extern/storage-sealing/cbor_gen.go +++ b/extern/storage-sealing/cbor_gen.go @@ -475,7 +475,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{183}); err != nil { + if _, err := w.Write([]byte{184, 25}); err != nil { return err } @@ -928,6 +928,50 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } + // t.TerminateMessage (cid.Cid) (struct) + if len("TerminateMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TerminateMessage\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TerminateMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TerminateMessage")); err != nil { + return err + } + + if t.TerminateMessage == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.TerminateMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.TerminateMessage: %w", err) + } + } + + // t.TerminatedAt (abi.ChainEpoch) (int64) + if len("TerminatedAt") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TerminatedAt\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TerminatedAt"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TerminatedAt")); err != nil { + return err + } + + if t.TerminatedAt >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TerminatedAt)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TerminatedAt-1)); err != nil { + return err + } + } + // t.LastErr (string) (string) if len("LastErr") > cbg.MaxLength { return xerrors.Errorf("Value in field \"LastErr\" was too long") @@ -1441,6 +1485,55 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { t.Return = ReturnState(sval) } + // t.TerminateMessage (cid.Cid) (struct) + case "TerminateMessage": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.TerminateMessage: %w", err) + } + + t.TerminateMessage = &c + } + + } + // t.TerminatedAt (abi.ChainEpoch) (int64) + case "TerminatedAt": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TerminatedAt = abi.ChainEpoch(extraI) + } // t.LastErr (string) (string) case "LastErr": diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 740e4243a..c989d0296 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -148,6 +148,21 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorFaultReported{}, FaultReported), on(SectorFaulty{}, Faulty), ), + Terminating: planOne( + on(SectorTerminating{}, TerminateWait), + on(SectorTerminateFailed{}, TerminateFailed), + ), + TerminateWait: planOne( + on(SectorTerminated{}, TerminateFinality), + on(SectorTerminateFailed{}, TerminateFailed), + ), + TerminateFinality: planOne( + on(SectorTerminateFailed{}, TerminateFailed), + // SectorRemove (global) + ), + TerminateFailed: planOne( + // SectorTerminating (global) + ), Removing: planOne( on(SectorRemoved{}, Removed), on(SectorRemoveFailed{}, RemoveFailed), @@ -196,7 +211,7 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta Kind: fmt.Sprintf("truncate"), } - state.Log = append(state.Log[:2000], state.Log[:6000]...) + state.Log = append(state.Log[:2000], state.Log[6000:]...) } state.Log = append(state.Log, l) @@ -328,6 +343,14 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta // Post-seal case Proving: return m.handleProvingSector, processed, nil + case Terminating: + return m.handleTerminating, processed, nil + case TerminateWait: + return m.handleTerminateWait, processed, nil + case TerminateFinality: + return m.handleTerminateFinality, processed, nil + case TerminateFailed: + return m.handleTerminateFailed, processed, nil case Removing: return m.handleRemoving, processed, nil case Removed: @@ -409,8 +432,9 @@ func (m *Sealing) restartSectors(ctx context.Context) error { return err } - m.unsealedInfoMap.lk.Lock() + // m.unsealedInfoMap.lk.Lock() taken early in .New to prevent races defer m.unsealedInfoMap.lk.Unlock() + for _, sector := range trackedSectors { if err := m.sectors.Send(uint64(sector.SectorNumber), SectorRestart{}); err != nil { log.Errorf("restarting sector %d: %+v", sector.SectorNumber, err) diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 59f5e77e6..e28366721 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -314,6 +314,32 @@ func (evt SectorFaultReported) apply(state *SectorInfo) { type SectorFaultedFinal struct{} +// Terminating + +type SectorTerminate struct{} + +func (evt SectorTerminate) applyGlobal(state *SectorInfo) bool { + state.State = Terminating + return true +} + +type SectorTerminating struct{ Message *cid.Cid } + +func (evt SectorTerminating) apply(state *SectorInfo) { + state.TerminateMessage = evt.Message +} + +type SectorTerminated struct{ TerminatedAt abi.ChainEpoch } + +func (evt SectorTerminated) apply(state *SectorInfo) { + state.TerminatedAt = evt.TerminatedAt +} + +type SectorTerminateFailed struct{ error } + +func (evt SectorTerminateFailed) FormatError(xerrors.Printer) (next error) { return evt.error } +func (evt SectorTerminateFailed) apply(*SectorInfo) {} + // External events type SectorRemove struct{} diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index d0c8fd3c9..f46403742 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" statemachine "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/specs-storage/storage" @@ -60,6 +61,8 @@ type SealingAPI interface { StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) + StateMinerProvingDeadline(context.Context, address.Address, TipSetToken) (*dline.Info, error) + StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) @@ -94,12 +97,15 @@ type Sealing struct { stats SectorStats + terminator *TerminateBatcher + getConfig GetSealingConfigFunc } type FeeConfig struct { MaxPreCommitGasFee abi.TokenAmount MaxCommitGasFee abi.TokenAmount + MaxTerminateGasFee abi.TokenAmount } type UnsealedSectorMap struct { @@ -136,6 +142,8 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds notifee: notifee, addrSel: as, + terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc), + getConfig: gc, stats: SectorStats{ @@ -145,6 +153,8 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{}) + s.unsealedInfoMap.lk.Lock() // released after initialized in .Run() + return s } @@ -158,7 +168,14 @@ func (m *Sealing) Run(ctx context.Context) error { } func (m *Sealing) Stop(ctx context.Context) error { - return m.sectors.Stop(ctx) + if err := m.terminator.Stop(ctx); err != nil { + return err + } + + if err := m.sectors.Stop(ctx); err != nil { + return err + } + return nil } func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { @@ -263,6 +280,18 @@ func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error { return m.sectors.Send(uint64(sid), SectorRemove{}) } +func (m *Sealing) Terminate(ctx context.Context, sid abi.SectorNumber) error { + return m.sectors.Send(uint64(sid), SectorTerminate{}) +} + +func (m *Sealing) TerminateFlush(ctx context.Context) (*cid.Cid, error) { + return m.terminator.Flush(ctx) +} + +func (m *Sealing) TerminatePending(ctx context.Context) ([]abi.SectorID, error) { + return m.terminator.Pending(ctx) +} + // Caller should NOT hold m.unsealedInfoMap.lk func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error { // locking here ensures that when the SectorStartPacking event is sent, the sector won't be picked up anywhere else @@ -446,7 +475,12 @@ func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof return 0, err } - return mi.SealProofType, nil + ver, err := m.api.StateNetworkVersion(ctx, nil) + if err != nil { + return 0, err + } + + return miner.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType) } func (m *Sealing) minerSector(spt abi.RegisteredSealProof, num abi.SectorNumber) storage.SectorRef { diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index ed32a110b..49a607958 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -30,6 +30,10 @@ var ExistSectorStateList = map[SectorState]struct{}{ Faulty: {}, FaultReported: {}, FaultedFinal: {}, + Terminating: {}, + TerminateWait: {}, + TerminateFinality: {}, + TerminateFailed: {}, Removing: {}, RemoveFailed: {}, Removed: {}, @@ -69,6 +73,11 @@ const ( FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain FaultedFinal SectorState = "FaultedFinal" // fault declared on chain + Terminating SectorState = "Terminating" + TerminateWait SectorState = "TerminateWait" + TerminateFinality SectorState = "TerminateFinality" + TerminateFailed SectorState = "TerminateFailed" + Removing SectorState = "Removing" RemoveFailed SectorState = "RemoveFailed" Removed SectorState = "Removed" @@ -78,7 +87,7 @@ func toStatState(st SectorState) statSectorState { switch st { case Empty, WaitDeals, Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector: return sstSealing - case Proving, Removed, Removing: + case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: return sstProving } diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index e425606de..e76b7883c 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -224,9 +224,9 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo case *ErrBadCommD: return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) case *ErrExpiredTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)}) + return ctx.Send(SectorTicketExpired{xerrors.Errorf("ticket expired error, removing sector: %w", err)}) case *ErrBadTicket: - return ctx.Send(SectorTicketExpired{xerrors.Errorf("expired ticket: %w", err)}) + return ctx.Send(SectorTicketExpired{xerrors.Errorf("expired ticket, removing sector: %w", err)}) case *ErrInvalidDeals: log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed}) @@ -309,6 +309,22 @@ func (m *Sealing) handleRemoveFailed(ctx statemachine.Context, sector SectorInfo return ctx.Send(SectorRemove{}) } +func (m *Sealing) handleTerminateFailed(ctx statemachine.Context, sector SectorInfo) error { + // ignoring error as it's most likely an API error - `pci` will be nil, and we'll go back to + // the Terminating state after cooldown. If the API is still failing, well get back to here + // with the error in SectorInfo log. + pci, _ := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if pci != nil { + return nil // pause the fsm, needs manual user action + } + + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorTerminate{}) +} + func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo) error { // First make vary sure the sector isn't committed si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) diff --git a/extern/storage-sealing/states_proving.go b/extern/storage-sealing/states_proving.go index de7e6c8d0..212fd906f 100644 --- a/extern/storage-sealing/states_proving.go +++ b/extern/storage-sealing/states_proving.go @@ -1,9 +1,14 @@ package sealing import ( + "time" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" ) func (m *Sealing) handleFaulty(ctx statemachine.Context, sector SectorInfo) error { @@ -31,6 +36,89 @@ func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInf return ctx.Send(SectorFaultedFinal{}) } +func (m *Sealing) handleTerminating(ctx statemachine.Context, sector SectorInfo) error { + // First step of sector termination + // * See if sector is live + // * If not, goto removing + // * Add to termination queue + // * Wait for message to land on-chain + // * Check for correct termination + // * wait for expiration (+winning lookback?) + + si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting sector info: %w", err)}) + } + + if si == nil { + // either already terminated or not committed yet + + pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("checking precommit presence: %w", err)}) + } + if pci != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("sector was precommitted but not proven, remove instead of terminating")}) + } + + return ctx.Send(SectorRemove{}) + } + + termCid, terminated, err := m.terminator.AddTermination(ctx.Context(), m.minerSectorID(sector.SectorNumber)) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("queueing termination: %w", err)}) + } + + if terminated { + return ctx.Send(SectorTerminating{Message: nil}) + } + + return ctx.Send(SectorTerminating{Message: &termCid}) +} + +func (m *Sealing) handleTerminateWait(ctx statemachine.Context, sector SectorInfo) error { + if sector.TerminateMessage == nil { + return xerrors.New("entered TerminateWait with nil TerminateMessage") + } + + mw, err := m.api.StateWaitMsg(ctx.Context(), *sector.TerminateMessage) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("waiting for terminate message to land on chain: %w", err)}) + } + + if mw.Receipt.ExitCode != exitcode.Ok { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("terminate message failed to execute: exit %d: %w", mw.Receipt.ExitCode, err)}) + } + + return ctx.Send(SectorTerminated{TerminatedAt: mw.Height}) +} + +func (m *Sealing) handleTerminateFinality(ctx statemachine.Context, sector SectorInfo) error { + for { + tok, epoch, err := m.api.ChainHead(ctx.Context()) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting chain head: %w", err)}) + } + + nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting network version: %w", err)}) + } + + if epoch >= sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv) { + return ctx.Send(SectorRemove{}) + } + + toWait := time.Duration(epoch-sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv)) * time.Duration(build.BlockDelaySecs) * time.Second + select { + case <-time.After(toWait): + continue + case <-ctx.Context().Done(): + return ctx.Context().Err() + } + } +} + func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) error { if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil { return ctx.Send(SectorRemoveFailed{err}) diff --git a/extern/storage-sealing/terminate_batch.go b/extern/storage-sealing/terminate_batch.go new file mode 100644 index 000000000..31ccef93c --- /dev/null +++ b/extern/storage-sealing/terminate_batch.go @@ -0,0 +1,351 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/dline" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" +) + +var ( + // TODO: config + + TerminateBatchMax uint64 = 100 // adjust based on real-world gas numbers, actors limit at 10k + TerminateBatchMin uint64 = 1 + TerminateBatchWait = 5 * time.Minute +) + +type TerminateBatcherApi interface { + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error) + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerProvingDeadline(context.Context, address.Address, TipSetToken) (*dline.Info, error) + StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error) +} + +type TerminateBatcher struct { + api TerminateBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg FeeConfig + + todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField + + waiting map[abi.SectorNumber][]chan cid.Cid + + notify, stop, stopped chan struct{} + force chan chan *cid.Cid + lk sync.Mutex +} + +func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig) *TerminateBatcher { + b := &TerminateBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + + todo: map[SectorLocation]*bitfield.BitField{}, + waiting: map[abi.SectorNumber][]chan cid.Cid{}, + + notify: make(chan struct{}, 1), + force: make(chan chan *cid.Cid), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *TerminateBatcher) run() { + var forceRes chan *cid.Cid + var lastMsg *cid.Cid + + for { + if forceRes != nil { + forceRes <- lastMsg + forceRes = nil + } + lastMsg = nil + + var sendAboveMax, sendAboveMin bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-time.After(TerminateBatchWait): + sendAboveMin = true + case fr := <-b.force: // user triggered + forceRes = fr + } + + var err error + lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin) + if err != nil { + log.Warnw("TerminateBatcher processBatch error", "error", err) + } + } +} + +func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { + dl, err := b.api.StateMinerProvingDeadline(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("getting proving deadline info failed: %w", err) + } + + b.lk.Lock() + defer b.lk.Unlock() + params := miner2.TerminateSectorsParams{} + + var total uint64 + for loc, sectors := range b.todo { + n, err := sectors.Count() + if err != nil { + log.Errorw("TerminateBatcher: failed to count sectors to terminate", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + // don't send terminations for currently challenged sectors + if loc.Deadline == (dl.Index+1)%miner.WPoStPeriodDeadlines || // not in next (in case the terminate message takes a while to get on chain) + loc.Deadline == dl.Index || // not in current + (loc.Deadline+1)%miner.WPoStPeriodDeadlines == dl.Index { // not in previous + continue + } + + if n < 1 { + log.Warnw("TerminateBatcher: zero sectors in bucket", "deadline", loc.Deadline, "partition", loc.Partition) + continue + } + + toTerminate, err := sectors.Copy() + if err != nil { + log.Warnw("TerminateBatcher: copy sectors bitfield", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + if total+n > uint64(miner.AddressedSectorsMax) { + n = uint64(miner.AddressedSectorsMax) - total + + toTerminate, err = toTerminate.Slice(0, n) + if err != nil { + log.Warnw("TerminateBatcher: slice toTerminate bitfield", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + s, err := bitfield.SubtractBitField(*sectors, toTerminate) + if err != nil { + log.Warnw("TerminateBatcher: sectors-toTerminate", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + *sectors = s + } + + total += n + + params.Terminations = append(params.Terminations, miner2.TerminationDeclaration{ + Deadline: loc.Deadline, + Partition: loc.Partition, + Sectors: toTerminate, + }) + + if total >= uint64(miner.AddressedSectorsMax) { + break + } + + if len(params.Terminations) >= miner.DeclarationsMax { + break + } + } + + if len(params.Terminations) == 0 { + return nil, nil // nothing to do + } + + if notif && total < TerminateBatchMax { + return nil, nil + } + + if after && total < TerminateBatchMin { + return nil, nil + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return nil, xerrors.Errorf("couldn't serialize TerminateSectors params: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("couldn't get miner info: %w", err) + } + + from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, b.feeCfg.MaxTerminateGasFee, b.feeCfg.MaxTerminateGasFee) + if err != nil { + return nil, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), b.feeCfg.MaxTerminateGasFee, enc.Bytes()) + if err != nil { + return nil, xerrors.Errorf("sending message failed: %w", err) + } + log.Infow("Sent TerminateSectors message", "cid", mcid, "from", from, "terminations", len(params.Terminations)) + + for _, t := range params.Terminations { + delete(b.todo, SectorLocation{ + Deadline: t.Deadline, + Partition: t.Partition, + }) + + err := t.Sectors.ForEach(func(sn uint64) error { + for _, ch := range b.waiting[abi.SectorNumber(sn)] { + ch <- mcid // buffered + } + delete(b.waiting, abi.SectorNumber(sn)) + + return nil + }) + if err != nil { + return nil, xerrors.Errorf("sectors foreach: %w", err) + } + } + + return &mcid, nil +} + +// register termination, wait for batch message, return message CID +// can return cid.Undef,true if the sector is already terminated on-chain +func (b *TerminateBatcher) AddTermination(ctx context.Context, s abi.SectorID) (mcid cid.Cid, terminated bool, err error) { + maddr, err := address.NewIDAddress(uint64(s.Miner)) + if err != nil { + return cid.Undef, false, err + } + + loc, err := b.api.StateSectorPartition(ctx, maddr, s.Number, nil) + if err != nil { + return cid.Undef, false, xerrors.Errorf("getting sector location: %w", err) + } + if loc == nil { + return cid.Undef, false, xerrors.New("sector location not found") + } + + { + // check if maybe already terminated + parts, err := b.api.StateMinerPartitions(ctx, maddr, loc.Deadline, nil) + if err != nil { + return cid.Cid{}, false, xerrors.Errorf("getting partitions: %w", err) + } + live, err := parts[loc.Partition].LiveSectors.IsSet(uint64(s.Number)) + if err != nil { + return cid.Cid{}, false, xerrors.Errorf("checking if sector is in live set: %w", err) + } + if !live { + // already terminated + return cid.Undef, true, nil + } + } + + b.lk.Lock() + bf, ok := b.todo[*loc] + if !ok { + n := bitfield.New() + bf = &n + b.todo[*loc] = bf + } + bf.Set(uint64(s.Number)) + + sent := make(chan cid.Cid, 1) + b.waiting[s.Number] = append(b.waiting[s.Number], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case c := <-sent: + return c, false, nil + case <-ctx.Done(): + return cid.Undef, false, ctx.Err() + } +} + +func (b *TerminateBatcher) Flush(ctx context.Context) (*cid.Cid, error) { + resCh := make(chan *cid.Cid, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *TerminateBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, bf := range b.todo { + err := bf.ForEach(func(id uint64) error { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(id), + }) + return nil + }) + if err != nil { + return nil, err + } + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *TerminateBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index 8f3e82a0b..1d5073622 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -103,6 +103,10 @@ type SectorInfo struct { // Recovery Return ReturnState + // Termination + TerminateMessage *cid.Cid + TerminatedAt abi.ChainEpoch + // Debug LastErr string diff --git a/go.mod b/go.mod index e6bc14a1b..273391a56 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 - github.com/coreos/go-systemd/v22 v22.0.0 + github.com/coreos/go-systemd/v22 v22.1.0 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/dgraph-io/badger/v2 v2.2007.2 github.com/docker/go-units v0.4.0 @@ -25,25 +25,26 @@ require ( github.com/elastic/gosigar v0.12.0 github.com/fatih/color v1.9.0 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f - github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb + github.com/filecoin-project/go-address v0.0.5 github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect - github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 + github.com/filecoin-project/go-bitfield v0.2.3 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v1.2.3 + github.com/filecoin-project/go-data-transfer v1.2.7 github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a - github.com/filecoin-project/go-fil-markets v1.0.10 + github.com/filecoin-project/go-fil-markets v1.1.2 github.com/filecoin-project/go-jsonrpc v0.1.2 github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 - github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc + github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71 github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-statestore v0.1.0 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/specs-actors v0.9.13 - github.com/filecoin-project/specs-actors/v2 v2.3.2 + github.com/filecoin-project/specs-actors/v2 v2.3.4 + github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128055125-ab0632b1c8fa github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 @@ -70,7 +71,7 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.5.1 + github.com/ipfs/go-graphsync v0.5.2 github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 @@ -104,7 +105,7 @@ require ( github.com/libp2p/go-libp2p-mplex v0.3.0 github.com/libp2p/go-libp2p-noise v0.1.2 github.com/libp2p/go-libp2p-peerstore v0.2.6 - github.com/libp2p/go-libp2p-pubsub v0.4.0 + github.com/libp2p/go-libp2p-pubsub v0.4.1 github.com/libp2p/go-libp2p-quic-transport v0.9.0 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 @@ -124,13 +125,12 @@ require ( github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a github.com/prometheus/client_golang v1.6.0 github.com/raulk/clock v1.1.0 - github.com/raulk/go-watchdog v0.0.1 - github.com/stretchr/testify v1.6.1 - github.com/supranational/blst v0.1.1 + github.com/raulk/go-watchdog v1.0.1 + github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v1.0.0 github.com/urfave/cli/v2 v2.2.0 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba - github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 + github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2 github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d @@ -141,7 +141,7 @@ require ( go.uber.org/multierr v1.6.0 go.uber.org/zap v1.16.0 golang.org/x/net v0.0.0-20201021035429-f5854403a974 - golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 @@ -157,5 +157,3 @@ replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi replace github.com/filecoin-project/test-vectors => ./extern/test-vectors - -replace github.com/supranational/blst => ./extern/blst diff --git a/go.sum b/go.sum index 644904013..6d38f4a8e 100644 --- a/go.sum +++ b/go.sum @@ -129,6 +129,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -145,6 +146,8 @@ github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbL github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -155,8 +158,8 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= @@ -237,16 +240,18 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb h1:Cbu7YYsXHtVlPEJ+eqbBx2S3ElmWCB0NjpGPYvvvCrA= -github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM= +github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= +github.com/filecoin-project/go-amt-ipld/v3 v3.0.0 h1:Ou/q82QeHGOhpkedvaxxzpBYuqTxLCcj5OChkDNx4qc= +github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 h1:RMdzMqe3mu2Z/3N3b9UEfkbGZxukstmZgNC024ybWhA= -github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3 h1:pedK/7maYF06Z+BYJf2OeFFqIDEh6SP6mIOlLFpYXGs= +github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434 h1:0kHszkYP3hgApcjl5x4rpwONhN9+j7XDobf6at5XfHs= @@ -255,8 +260,8 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMX github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.2.3 h1:rM/HgGOOMsKvmeQjY7CVR3v7Orxf04LJSSczSpGlhg4= -github.com/filecoin-project/go-data-transfer v1.2.3/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY= +github.com/filecoin-project/go-data-transfer v1.2.7 h1:WE5Cpp9eMt5BDoWOVR64QegSn6bwHQaDzyyjVU377Y0= +github.com/filecoin-project/go-data-transfer v1.2.7/go.mod h1:mvjZ+C3NkBX10JP4JMu27DCjUouHFjHwUGh+Xc4yvDA= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= @@ -264,12 +269,14 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.0.10 h1:1QunPsgApTLNXVlaXoPMxyrMtOsMLPOQq3RUjGRmgVI= -github.com/filecoin-project/go-fil-markets v1.0.10/go.mod h1:tcXby9CsTNuHu19dH05YZ5pNDsoYcQXSrbkxzVeMJrY= +github.com/filecoin-project/go-fil-markets v1.1.2 h1:5FVdDmF9GvW6Xllql9OGiJXEZjh/tu590BXSQH2W/vU= +github.com/filecoin-project/go-fil-markets v1.1.2/go.mod h1:6oTRaAsHnCqhi3mpZqdvnWIzH6QzHQc4dbhJrI9/BfQ= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1 h1:zbzs46G7bOctkZ+JUX3xirrj0RaEsi+27dtlsgrTNBg= +github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= github.com/filecoin-project/go-jsonrpc v0.1.2 h1:MTebUawBHLxxY9gDi1WXuGc89TWIDmsgoDqeZSk9KRw= github.com/filecoin-project/go-jsonrpc v0.1.2/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= @@ -283,6 +290,8 @@ github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc h1:+hbMY4Pcx2oizrfH08VWXwrj5mU8aJT6g0UNxGHFCGU= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71 h1:Cas/CUB4ybYpdxvW7LouaydE16cpwdq3vvS3qgZuU+Q= +github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= @@ -296,6 +305,10 @@ github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.2 h1:2Vcf4CGa29kRh4JJ02m+FbvD/p3YNnLGsaHfw7Uj49g= github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= +github.com/filecoin-project/specs-actors/v2 v2.3.4 h1:NZK2oMCcA71wNsUzDBmLQyRMzcCnX9tDGvwZ53G67j8= +github.com/filecoin-project/specs-actors/v2 v2.3.4/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= +github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128055125-ab0632b1c8fa h1:J0yyTt9MLDaN0XvzjEAWTCvG6SRVfXc6dVLluvRiOsQ= +github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128055125-ab0632b1c8fa/go.mod h1:NL24TPjJGyU7fh1ztpUyYcoZi3TmRKNEI0huPYmhObA= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= @@ -392,6 +405,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -557,10 +572,8 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.5.0 h1:iaByvxq88Ys1KcaQzTS1wmRhNsNEo3SaUiSGqTSbGmM= -github.com/ipfs/go-graphsync v0.5.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= -github.com/ipfs/go-graphsync v0.5.1 h1:4fXBRvRKicTgTmCFMmEua/H5jvmAOLgU9Z7PCPWt2ec= -github.com/ipfs/go-graphsync v0.5.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.5.2 h1:USD+daaSC+7pLHCxROThSaF6SF7WYXF03sjrta0rCfA= +github.com/ipfs/go-graphsync v0.5.2/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= @@ -748,6 +761,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= @@ -923,8 +938,8 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1 github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.4.0 h1:YNVRyXqBgv9i4RG88jzoTtkSOaSB45CqHkL29NNBZb4= -github.com/libp2p/go-libp2p-pubsub v0.4.0/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= +github.com/libp2p/go-libp2p-pubsub v0.4.1 h1:j4umIg5nyus+sqNfU+FWvb9aeYFQH/A+nDFhWj+8yy8= +github.com/libp2p/go-libp2p-pubsub v0.4.1/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E= @@ -1236,6 +1251,8 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df h1:vdYtBU6zvL7v+Tr+0xFM/qhahw/EvY8DMMunZHKH6eE= github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -1313,8 +1330,8 @@ github.com/prometheus/procfs v0.1.0 h1:jhMy6QXfi3y2HEzFoyuCj40z4OZIIHHPtFyCMftmv github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= -github.com/raulk/go-watchdog v0.0.1 h1:q0ad0fanW8uaLRTvxQ0RfdADBiKa6CL6NMByhB0vpBs= -github.com/raulk/go-watchdog v0.0.1/go.mod h1:dIvQcKy0laxuHGda1ms8/2T9wE3ZJRbz9bxEO7c0q1M= +github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= +github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1366,6 +1383,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= @@ -1409,6 +1428,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= @@ -1426,6 +1447,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4= github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= @@ -1459,6 +1482,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU= github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2 h1:7HzUKl5d/dELS9lLeT4W6YvliZx+s9k/eOOIdHKrA/w= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g= @@ -1683,6 +1708,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/lib/blockstore/badger/blockstore_test_suite.go b/lib/blockstore/badger/blockstore_test_suite.go index b11fc4e23..9332e62c5 100644 --- a/lib/blockstore/badger/blockstore_test_suite.go +++ b/lib/blockstore/badger/blockstore_test_suite.go @@ -183,6 +183,8 @@ func (s *Suite) TestAllKeysRespectsContext(t *testing.T) { require.True(t, ok) cancel() + // pull one value out to avoid race + _, _ = <-ch v, ok = <-ch require.Equal(t, cid.Undef, v) diff --git a/lib/sigs/bls/init.go b/lib/sigs/bls/init.go index 42633eee8..9bc69c3a4 100644 --- a/lib/sigs/bls/init.go +++ b/lib/sigs/bls/init.go @@ -7,17 +7,17 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/crypto" - blst "github.com/supranational/blst/bindings/go" + ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/lotus/lib/sigs" ) const DST = string("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_") -type SecretKey = blst.SecretKey -type PublicKey = blst.P1Affine -type Signature = blst.P2Affine -type AggregateSignature = blst.P2Aggregate +type SecretKey = ffi.PrivateKey +type PublicKey = ffi.PublicKey +type Signature = ffi.Signature +type AggregateSignature = ffi.Signature type blsSigner struct{} @@ -29,30 +29,55 @@ func (blsSigner) GenPrivate() ([]byte, error) { return nil, fmt.Errorf("bls signature error generating random data") } // Note private keys seem to be serialized little-endian! - pk := blst.KeyGen(ikm[:]).ToLEndian() - return pk, nil + sk := ffi.PrivateKeyGenerateWithSeed(ikm) + return sk[:], nil } func (blsSigner) ToPublic(priv []byte) ([]byte, error) { - pk := new(SecretKey).FromLEndian(priv) - if pk == nil || !pk.Valid() { + if priv == nil || len(priv) != ffi.PrivateKeyBytes { return nil, fmt.Errorf("bls signature invalid private key") } - return new(PublicKey).From(pk).Compress(), nil + + sk := new(SecretKey) + copy(sk[:], priv[:ffi.PrivateKeyBytes]) + + pubkey := ffi.PrivateKeyPublicKey(*sk) + + return pubkey[:], nil } func (blsSigner) Sign(p []byte, msg []byte) ([]byte, error) { - pk := new(SecretKey).FromLEndian(p) - if pk == nil || !pk.Valid() { + if p == nil || len(p) != ffi.PrivateKeyBytes { return nil, fmt.Errorf("bls signature invalid private key") } - return new(Signature).Sign(pk, msg, []byte(DST)).Compress(), nil + + sk := new(SecretKey) + copy(sk[:], p[:ffi.PrivateKeyBytes]) + + sig := ffi.PrivateKeySign(*sk, msg) + + return sig[:], nil } func (blsSigner) Verify(sig []byte, a address.Address, msg []byte) error { - if !new(Signature).VerifyCompressed(sig, a.Payload()[:], msg, []byte(DST)) { + payload := a.Payload() + if sig == nil || len(sig) != ffi.SignatureBytes || len(payload) != ffi.PublicKeyBytes { return fmt.Errorf("bls signature failed to verify") } + + pk := new(PublicKey) + copy(pk[:], payload[:ffi.PublicKeyBytes]) + + sigS := new(Signature) + copy(sigS[:], sig[:ffi.SignatureBytes]) + + msgs := [1]ffi.Message{msg} + pks := [1]PublicKey{*pk} + + if !ffi.HashVerify(sigS, msgs[:], pks[:]) { + return fmt.Errorf("bls signature failed to verify") + } + return nil } diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go index 87c8dfe65..e5f669f2f 100644 --- a/markets/loggers/loggers.go +++ b/markets/loggers/loggers.go @@ -12,22 +12,22 @@ var log = logging.Logger("markets") // StorageClientLogger logs events from the storage client func StorageClientLogger(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - log.Infow("storage event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) + log.Infow("storage client event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) } // StorageProviderLogger logs events from the storage provider func StorageProviderLogger(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - log.Infow("storage event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) + log.Infow("storage provider event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) } // RetrievalClientLogger logs events from the retrieval client func RetrievalClientLogger(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { - log.Infow("retrieval event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) + log.Infow("retrieval client event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) } // RetrievalProviderLogger logs events from the retrieval provider func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { - log.Infow("retrieval event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) + log.Infow("retrieval provider event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) } // DataTransferLogger logs events from the data transfer module diff --git a/markets/storageadapter/api.go b/markets/storageadapter/api.go new file mode 100644 index 000000000..9d89c7aa4 --- /dev/null +++ b/markets/storageadapter/api.go @@ -0,0 +1,53 @@ +package storageadapter + +import ( + "context" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/adt" + + "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" +) + +type apiWrapper struct { + api interface { + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) + } +} + +func (ca *apiWrapper) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { + store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(ca.api))) + + preAct, err := ca.api.StateGetActor(ctx, actor, pre) + if err != nil { + return nil, xerrors.Errorf("getting pre actor: %w", err) + } + curAct, err := ca.api.StateGetActor(ctx, actor, cur) + if err != nil { + return nil, xerrors.Errorf("getting cur actor: %w", err) + } + + preSt, err := miner.Load(store, preAct) + if err != nil { + return nil, xerrors.Errorf("loading miner actor: %w", err) + } + curSt, err := miner.Load(store, curAct) + if err != nil { + return nil, xerrors.Errorf("loading miner actor: %w", err) + } + + diff, err := miner.DiffPreCommits(preSt, curSt) + if err != nil { + return nil, xerrors.Errorf("diff precommits: %w", err) + } + + return diff, err +} diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go index 4d00ab258..f3491da47 100644 --- a/markets/storageadapter/client.go +++ b/markets/storageadapter/client.go @@ -34,9 +34,8 @@ import ( ) type ClientNodeAdapter struct { - full.StateAPI - full.ChainAPI - full.MpoolAPI + *clientApi + *apiWrapper fundmgr *market.FundManager ev *events.Events @@ -46,14 +45,14 @@ type ClientNodeAdapter struct { type clientApi struct { full.ChainAPI full.StateAPI + full.MpoolAPI } func NewClientNodeAdapter(stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) storagemarket.StorageClientNode { - capi := &clientApi{chain, stateapi} + capi := &clientApi{chain, stateapi, mpool} return &ClientNodeAdapter{ - StateAPI: stateapi, - ChainAPI: chain, - MpoolAPI: mpool, + clientApi: capi, + apiWrapper: &apiWrapper{api: capi}, fundmgr: fundmgr, ev: events.NewEvents(context.TODO(), capi), @@ -264,7 +263,7 @@ func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID a // and the chain has advanced to the confidence height stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts2.Height() { + if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { onDealExpired(nil) return false, nil } diff --git a/markets/storageadapter/getcurrentdealinfo.go b/markets/storageadapter/getcurrentdealinfo.go index ab8c3f52f..97311a0b2 100644 --- a/markets/storageadapter/getcurrentdealinfo.go +++ b/markets/storageadapter/getcurrentdealinfo.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -18,47 +19,49 @@ type getCurrentDealInfoAPI interface { StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) + + diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) } // GetCurrentDealInfo gets current information on a deal, and corrects the deal ID as needed -func GetCurrentDealInfo(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (abi.DealID, *api.MarketDeal, error) { +func GetCurrentDealInfo(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (abi.DealID, *api.MarketDeal, types.TipSetKey, error) { marketDeal, dealErr := api.StateMarketStorageDeal(ctx, dealID, ts.Key()) if dealErr == nil { equal, err := checkDealEquality(ctx, ts, api, proposal, marketDeal.Proposal) if err != nil { - return dealID, nil, err + return dealID, nil, types.EmptyTSK, err } if equal { - return dealID, marketDeal, nil + return dealID, marketDeal, types.EmptyTSK, nil } dealErr = xerrors.Errorf("Deal proposals did not match") } if publishCid == nil { - return dealID, nil, dealErr + return dealID, nil, types.EmptyTSK, dealErr } // attempt deal id correction lookup, err := api.StateSearchMsg(ctx, *publishCid) if err != nil { - return dealID, nil, err + return dealID, nil, types.EmptyTSK, err } if lookup.Receipt.ExitCode != exitcode.Ok { - return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", *publishCid, lookup.Receipt.ExitCode) + return dealID, nil, types.EmptyTSK, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", *publishCid, lookup.Receipt.ExitCode) } var retval market.PublishStorageDealsReturn if err := retval.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil { - return dealID, nil, xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err) + return dealID, nil, types.EmptyTSK, xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err) } if len(retval.IDs) != 1 { // market currently only ever sends messages with 1 deal - return dealID, nil, xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal") + return dealID, nil, types.EmptyTSK, xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal") } if retval.IDs[0] == dealID { // DealID did not change, so we are stuck with the original lookup error - return dealID, nil, dealErr + return dealID, nil, lookup.TipSet, dealErr } dealID = retval.IDs[0] @@ -67,13 +70,13 @@ func GetCurrentDealInfo(ctx context.Context, ts *types.TipSet, api getCurrentDea if err == nil { equal, err := checkDealEquality(ctx, ts, api, proposal, marketDeal.Proposal) if err != nil { - return dealID, nil, err + return dealID, nil, types.EmptyTSK, err } if !equal { - return dealID, nil, xerrors.Errorf("Deal proposals did not match") + return dealID, nil, types.EmptyTSK, xerrors.Errorf("Deal proposals did not match") } } - return dealID, marketDeal, err + return dealID, marketDeal, lookup.TipSet, err } func checkDealEquality(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, p1, p2 market.DealProposal) (bool, error) { diff --git a/markets/storageadapter/getcurrentdealinfo_test.go b/markets/storageadapter/getcurrentdealinfo_test.go index ed5d36c5b..5e3c10495 100644 --- a/markets/storageadapter/getcurrentdealinfo_test.go +++ b/markets/storageadapter/getcurrentdealinfo_test.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" test "github.com/filecoin-project/lotus/chain/events/state/mock" "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" @@ -209,7 +210,7 @@ func TestGetCurrentDealInfo(t *testing.T) { MarketDeals: marketDeals, } - dealID, marketDeal, err := GetCurrentDealInfo(ctx, ts, api, startDealID, proposal, data.publishCid) + dealID, marketDeal, _, err := GetCurrentDealInfo(ctx, ts, api, startDealID, proposal, data.publishCid) require.Equal(t, data.expectedDealID, dealID) require.Equal(t, data.expectedMarketDeal, marketDeal) if data.expectedError == nil { @@ -236,6 +237,10 @@ type mockGetCurrentDealInfoAPI struct { MarketDeals map[marketDealKey]*api.MarketDeal } +func (mapi *mockGetCurrentDealInfoAPI) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { + return &miner.PreCommitChanges{}, nil +} + func (mapi *mockGetCurrentDealInfoAPI) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, ts types.TipSetKey) (*api.MarketDeal, error) { deal, ok := mapi.MarketDeals[marketDealKey{dealID, ts}] if !ok { diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go index bfa084638..5466c81ef 100644 --- a/markets/storageadapter/ondealsectorcommitted.go +++ b/markets/storageadapter/ondealsectorcommitted.go @@ -5,16 +5,18 @@ import ( "context" "sync" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" ) type sectorCommittedEventsAPI interface { @@ -32,7 +34,7 @@ func OnDealSectorPreCommitted(ctx context.Context, api getCurrentDealInfoAPI, ev // First check if the deal is already active, and if so, bail out checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - isActive, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid) + di, isActive, publishTs, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid) if err != nil { // Note: the error returned from here will end up being returned // from OnDealSectorPreCommitted so no need to call the callback @@ -46,6 +48,36 @@ func OnDealSectorPreCommitted(ctx context.Context, api getCurrentDealInfoAPI, ev return true, false, nil } + // Check that precommits which landed between when the deal was published + // and now don't already contain the deal we care about. + // (this can happen when the precommit lands vary quickly (in tests), or + // when the client node was down after the deal was published, and when + // the precommit containing it landed on chain) + + if publishTs == types.EmptyTSK { + lookup, err := api.StateSearchMsg(ctx, *publishCid) + if err != nil { + return false, false, err + } + if lookup != nil { // can be nil in tests + publishTs = lookup.TipSet + } + } + + diff, err := api.diffPreCommits(ctx, provider, publishTs, ts.Key()) + if err != nil { + return false, false, err + } + + for _, info := range diff.Added { + for _, d := range info.Info.DealIDs { + if d == di { + cb(info.Info.SectorNumber, false, nil) + return true, false, nil + } + } + } + // Not yet active, start matching against incoming messages return false, true, nil } @@ -75,6 +107,11 @@ func OnDealSectorPreCommitted(ctx context.Context, api getCurrentDealInfoAPI, ev return false, err } + // Ignore the pre-commit message if it was not executed successfully + if rec.ExitCode != 0 { + return true, nil + } + // Extract the message parameters var params miner.SectorPreCommitInfo if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { @@ -83,7 +120,7 @@ func OnDealSectorPreCommitted(ctx context.Context, api getCurrentDealInfoAPI, ev // When the deal is published, the deal ID may change, so get the // current deal ID from the publish message CID - dealID, _, err = GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) + dealID, _, _, err = GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) if err != nil { return false, err } @@ -125,7 +162,7 @@ func OnDealSectorCommitted(ctx context.Context, api getCurrentDealInfoAPI, event // First check if the deal is already active, and if so, bail out checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - isActive, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid) + _, isActive, _, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid) if err != nil { // Note: the error returned from here will end up being returned // from OnDealSectorCommitted so no need to call the callback @@ -175,8 +212,13 @@ func OnDealSectorCommitted(ctx context.Context, api getCurrentDealInfoAPI, event return false, err } + // Ignore the prove-commit message if it was not executed successfully + if rec.ExitCode != 0 { + return true, nil + } + // Get the deal info - _, sd, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) + _, sd, _, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) if err != nil { return false, xerrors.Errorf("failed to look up deal on chain: %w", err) } @@ -206,22 +248,22 @@ func OnDealSectorCommitted(ctx context.Context, api getCurrentDealInfoAPI, event return nil } -func checkIfDealAlreadyActive(ctx context.Context, api getCurrentDealInfoAPI, ts *types.TipSet, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (bool, error) { - _, sd, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) +func checkIfDealAlreadyActive(ctx context.Context, api getCurrentDealInfoAPI, ts *types.TipSet, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (abi.DealID, bool, types.TipSetKey, error) { + di, sd, publishTs, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid) if err != nil { // TODO: This may be fine for some errors - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) + return 0, false, types.EmptyTSK, xerrors.Errorf("failed to look up deal on chain: %w", err) } // Sector with deal is already active if sd.State.SectorStartEpoch > 0 { - return true, nil + return 0, true, publishTs, nil } // Sector was slashed if sd.State.SlashEpoch > 0 { - return false, xerrors.Errorf("deal %d was slashed at epoch %d", dealID, sd.State.SlashEpoch) + return 0, false, types.EmptyTSK, xerrors.Errorf("deal %d was slashed at epoch %d", dealID, sd.State.SlashEpoch) } - return false, nil + return di, false, publishTs, nil } diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go index 30fbfea76..dea1f89d2 100644 --- a/markets/storageadapter/ondealsectorcommitted_test.go +++ b/markets/storageadapter/ondealsectorcommitted_test.go @@ -114,6 +114,25 @@ func TestOnDealSectorPreCommitted(t *testing.T) { expectedCBIsActive: false, expectedCBSectorNumber: sectorNumber, }, + "ignores unsuccessful pre-commit message": { + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{startDealID}, + }), + deals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + receipt: &types.MessageReceipt{ExitCode: 1}, + }, + }, + expectedCBCallCount: 0, + }, "error on deal in check": { checkTsDeals: map[abi.DealID]*api.MarketDeal{}, searchMessageErr: errors.New("something went wrong"), @@ -142,8 +161,7 @@ func TestOnDealSectorPreCommitted(t *testing.T) { deals: map[abi.DealID]*api.MarketDeal{}, }, }, - expectedCBCallCount: 1, - expectedCBError: errors.New("handling applied event: something went wrong"), + expectedCBCallCount: 0, expectedError: errors.New("failed to set up called handler: something went wrong"), }, "proposed deal epoch timeout": { @@ -179,7 +197,7 @@ func TestOnDealSectorPreCommitted(t *testing.T) { matchMessages[i] = matchMessage{ curH: 5, msg: ms.msg, - msgReceipt: nil, + msgReceipt: ms.receipt, ts: matchTs, } } @@ -297,6 +315,23 @@ func TestOnDealSectorCommitted(t *testing.T) { }, expectedCBCallCount: 1, }, + "ignores unsuccessful prove-commit message": { + checkTsDeals: map[abi.DealID]*api.MarketDeal{ + startDealID: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + deals: map[abi.DealID]*api.MarketDeal{ + startDealID: successDeal, + }, + receipt: &types.MessageReceipt{ExitCode: 1}, + }, + }, + expectedCBCallCount: 0, + }, "error on deal in check": { checkTsDeals: map[abi.DealID]*api.MarketDeal{}, searchMessageErr: errors.New("something went wrong"), @@ -361,7 +396,7 @@ func TestOnDealSectorCommitted(t *testing.T) { matchMessages[i] = matchMessage{ curH: 5, msg: ms.msg, - msgReceipt: nil, + msgReceipt: ms.receipt, ts: matchTs, } } @@ -397,8 +432,9 @@ func TestOnDealSectorCommitted(t *testing.T) { } type matchState struct { - msg *types.Message - deals map[abi.DealID]*api.MarketDeal + msg *types.Message + receipt *types.MessageReceipt + deals map[abi.DealID]*api.MarketDeal } type matchMessage struct { @@ -434,7 +470,11 @@ func (fe *fakeEvents) Called(check events.CheckFunc, msgHnd events.MsgHandler, r return err } if matched { - more, err := msgHnd(matchMessage.msg, matchMessage.msgReceipt, matchMessage.ts, matchMessage.curH) + receipt := matchMessage.msgReceipt + if receipt == nil { + receipt = &types.MessageReceipt{ExitCode: 0} + } + more, err := msgHnd(matchMessage.msg, receipt, matchMessage.ts, matchMessage.curH) if err != nil { return err } diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index 79fd718f8..90a659d34 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -24,6 +24,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/types" @@ -41,6 +42,7 @@ var log = logging.Logger("storageadapter") type ProviderNodeAdapter struct { api.FullNode + *apiWrapper // this goes away with the data transfer module dag dtypes.StagingDAG @@ -55,7 +57,8 @@ type ProviderNodeAdapter struct { func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode { return func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode { na := &ProviderNodeAdapter{ - FullNode: full, + FullNode: full, + apiWrapper: &apiWrapper{api: full}, dag: dag, secb: secb, @@ -154,30 +157,36 @@ func (n *ProviderNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Si return err == nil, err } -func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { +func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (address.Address, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { return address.Undef, err } - mi, err := n.StateMinerInfo(ctx, miner, tsk) + mi, err := n.StateMinerInfo(ctx, maddr, tsk) if err != nil { return address.Address{}, err } return mi.Worker, nil } -func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, miner address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { +func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { return 0, err } - mi, err := n.StateMinerInfo(ctx, miner, tsk) + mi, err := n.StateMinerInfo(ctx, maddr, tsk) if err != nil { return 0, err } - return mi.SealProofType, nil + + nver, err := n.StateNetworkVersion(ctx, tsk) + if err != nil { + return 0, err + } + + return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType) } func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { @@ -344,7 +353,7 @@ func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID // and the chain has advanced to the confidence height stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts2.Height() { + if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { onDealExpired(nil) return false, nil } diff --git a/node/builder.go b/node/builder.go index 8ee9b3674..1dd60ee1b 100644 --- a/node/builder.go +++ b/node/builder.go @@ -269,7 +269,7 @@ func Online() Option { Override(new(vm.SyscallBuilder), vm.Syscalls), Override(new(*store.ChainStore), modules.ChainStore), Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), - Override(new(*stmgr.StateManager), stmgr.NewStateManagerWithUpgradeSchedule), + Override(new(*stmgr.StateManager), modules.StateManager), Override(new(*wallet.LocalWallet), wallet.NewWallet), Override(new(wallet.Default), From(new(*wallet.LocalWallet))), Override(new(api.WalletAPI), From(new(wallet.MultiWallet))), diff --git a/node/config/def.go b/node/config/def.go index 68371c384..a20e0ceaa 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -69,6 +69,7 @@ type SealingConfig struct { type MinerFeeConfig struct { MaxPreCommitGasFee types.FIL MaxCommitGasFee types.FIL + MaxTerminateGasFee types.FIL MaxWindowPoStGasFee types.FIL MaxPublishDealsFee types.FIL MaxMarketBalanceAddFee types.FIL @@ -211,6 +212,7 @@ func DefaultStorageMiner() *StorageMiner { Fees: MinerFeeConfig{ MaxPreCommitGasFee: types.MustParseFIL("0.025"), MaxCommitGasFee: types.MustParseFIL("0.05"), + MaxTerminateGasFee: types.MustParseFIL("0.5"), MaxWindowPoStGasFee: types.MustParseFIL("5"), MaxPublishDealsFee: types.MustParseFIL("0.05"), MaxMarketBalanceAddFee: types.MustParseFIL("0.007"), diff --git a/node/impl/client/client.go b/node/impl/client/client.go index e90a31a80..00c840b63 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -7,6 +7,8 @@ import ( "io" "os" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "golang.org/x/xerrors" "github.com/filecoin-project/go-padreader" @@ -157,6 +159,16 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) // TODO: Get this from storage ask } + networkVersion, err := a.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed to get network version: %w", err) + } + + st, err := miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) + if err != nil { + return nil, xerrors.Errorf("failed to get seal proof type: %w", err) + } + result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{ Addr: params.Wallet, Info: &providerInfo, @@ -165,7 +177,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), Price: params.EpochPrice, Collateral: params.ProviderCollateral, - Rt: mi.SealProofType, + Rt: st, FastRetrieval: params.FastRetrieval, VerifiedDeal: params.VerifiedDeal, StoreID: storeID, diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index 189512a65..ec7074e5b 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -93,13 +93,15 @@ type gasMeta struct { limit int64 } +// finds 55th percntile instead of median to put negative pressure on gas price func medianGasPremium(prices []gasMeta, blocks int) abi.TokenAmount { sort.Slice(prices, func(i, j int) bool { // sort desc by price return prices[i].price.GreaterThan(prices[j].price) }) - at := build.BlockGasTarget * int64(blocks) / 2 + at := build.BlockGasTarget * int64(blocks) / 2 // 50th + at += build.BlockGasTarget * int64(blocks) / (2 * 20) // move 5% further prev1, prev2 := big.Zero(), big.Zero() for _, price := range prices { prev1, prev2 = price.price, prev1 @@ -227,6 +229,9 @@ func gasEstimateGasLimit( pending, ts := mpool.PendingFor(fromA) priorMsgs := make([]types.ChainMsg, 0, len(pending)) for _, m := range pending { + if m.Message.Nonce == msg.Nonce { + break + } priorMsgs = append(priorMsgs, m) } diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 957cf0b5b..91a1b74db 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -55,6 +55,7 @@ type StateModuleAPI interface { StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) + StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) } @@ -139,15 +140,10 @@ func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err) } - // TODO: You know, this is terrible. - // I mean, we _really_ shouldn't do this. Maybe we should convert somewhere else? info, err := mas.Info() if err != nil { return miner.MinerInfo{}, err } - if m.StateManager.GetNtwkVersion(ctx, ts.Height()) >= network.Version7 && info.SealProofType < abi.RegisteredSealProof_StackedDrg2KiBV1_1 { - info.SealProofType += abi.RegisteredSealProof_StackedDrg2KiBV1_1 - } return info, nil } @@ -169,13 +165,19 @@ func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, t out := make([]api.Deadline, deadlines) if err := mas.ForEachDeadline(func(i uint64, dl miner.Deadline) error { - ps, err := dl.PostSubmissions() + ps, err := dl.PartitionsPoSted() + if err != nil { + return err + } + + l, err := dl.DisputableProofCount() if err != nil { return err } out[i] = api.Deadline{ - PostSubmissions: ps, + PostSubmissions: ps, + DisputableProofCount: l, } return nil }); err != nil { @@ -589,8 +591,14 @@ func stateWaitMsgLimited(ctx context.Context, smgr *stmgr.StateManager, cstore * }, nil } -func (a *StateAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) { - ts, recpt, found, err := a.StateManager.SearchForMessage(ctx, msg) +func (m *StateModule) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) { + return stateSearchMsgLimited(ctx, m.StateManager, msg, stmgr.LookbackNoLimit) +} +func (a *StateAPI) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error) { + return stateSearchMsgLimited(ctx, a.StateManager, msg, lookbackLimit) +} +func stateSearchMsgLimited(ctx context.Context, smgr *stmgr.StateManager, msg cid.Cid, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error) { + ts, recpt, found, err := smgr.SearchForMessage(ctx, msg, lookbackLimit) if err != nil { return nil, err } diff --git a/node/impl/market/market.go b/node/impl/market/market.go index e7fccc9ba..b62f2b40e 100644 --- a/node/impl/market/market.go +++ b/node/impl/market/market.go @@ -3,21 +3,49 @@ package market import ( "context" + "github.com/ipfs/go-cid" "go.uber.org/fx" - "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors" + marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/impl/full" ) type MarketAPI struct { fx.In + full.MpoolAPI FMgr *market.FundManager } +func (a *MarketAPI) MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) { + params, err := actors.SerializeParams(&addr) + if err != nil { + return cid.Undef, err + } + + smsg, aerr := a.MpoolPushMessage(ctx, &types.Message{ + To: marketactor.Address, + From: wallet, + Value: amt, + Method: marketactor.Methods.AddBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +func (a *MarketAPI) MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) { + return a.FMgr.GetReserved(addr), nil +} + func (a *MarketAPI) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) { return a.FMgr.Reserve(ctx, wallet, addr, amt) } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 7c1328361..fe79817a5 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -328,6 +328,18 @@ func (sm *StorageMinerAPI) SectorRemove(ctx context.Context, id abi.SectorNumber return sm.Miner.RemoveSector(ctx, id) } +func (sm *StorageMinerAPI) SectorTerminate(ctx context.Context, id abi.SectorNumber) error { + return sm.Miner.TerminateSector(ctx, id) +} + +func (sm *StorageMinerAPI) SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) { + return sm.Miner.TerminateFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.TerminatePending(ctx) +} + func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { return sm.Miner.MarkForUpgrade(id) } diff --git a/node/modules/client.go b/node/modules/client.go index 18bba2417..fcc93fb40 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -120,7 +120,11 @@ func RegisterClientValidator(crv dtypes.ClientRequestValidator, dtm dtypes.Clien // uses the clients's Client DAG service for transfers func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientDataTransfer, error) { sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/client/counter")) - net := dtnet.NewFromLibp2pHost(h) + + // go-data-transfer protocol retries: + // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour + dtRetryParams := dtnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) + net := dtnet.NewFromLibp2pHost(h, dtRetryParams) dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers")) transport := dtgstransport.NewTransport(h.ID(), gs) @@ -129,7 +133,9 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap return nil, err } - dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc) + // data-transfer push channel restart configuration + dtRestartConfig := dtimpl.PushChannelRestartConfig(time.Minute, 10, 1024, 10*time.Minute, 3) + dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc, dtRestartConfig) if err != nil { return nil, err } @@ -153,7 +159,11 @@ func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore { } func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, j journal.Journal) (storagemarket.StorageClient, error) { - net := smnet.NewFromLibp2pHost(h) + // go-fil-markets protocol retries: + // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour + marketsRetryParams := smnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) + net := smnet.NewFromLibp2pHost(h, marketsRetryParams) + c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, storageimpl.DealPollingInterval(time.Second)) if err != nil { return nil, err diff --git a/node/modules/core.go b/node/modules/core.go index 794a9dafe..83a7e8d42 100644 --- a/node/modules/core.go +++ b/node/modules/core.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "time" "github.com/gbrlsnchs/jwt/v3" @@ -14,6 +15,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" record "github.com/libp2p/go-libp2p-record" + "github.com/raulk/go-watchdog" "go.uber.org/fx" "golang.org/x/xerrors" @@ -28,7 +30,6 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/system" - "github.com/raulk/go-watchdog" ) const ( @@ -69,45 +70,71 @@ func MemoryConstraints() system.MemoryConstraints { // MemoryWatchdog starts the memory watchdog, applying the computed resource // constraints. -func MemoryWatchdog(lc fx.Lifecycle, constraints system.MemoryConstraints) { +func MemoryWatchdog(lr repo.LockedRepo, lc fx.Lifecycle, constraints system.MemoryConstraints) { if os.Getenv(EnvWatchdogDisabled) == "1" { log.Infof("memory watchdog is disabled via %s", EnvWatchdogDisabled) return } - cfg := watchdog.MemConfig{ - Resolution: 5 * time.Second, - Policy: &watchdog.WatermarkPolicy{ - Watermarks: []float64{0.50, 0.60, 0.70, 0.85, 0.90, 0.925, 0.95}, - EmergencyWatermark: 0.95, - }, - Logger: logWatchdog, + // configure heap profile capture so that one is captured per episode where + // utilization climbs over 90% of the limit. A maximum of 10 heapdumps + // will be captured during life of this process. + watchdog.HeapProfileDir = filepath.Join(lr.Path(), "heapprof") + watchdog.HeapProfileMaxCaptures = 10 + watchdog.HeapProfileThreshold = 0.9 + watchdog.Logger = logWatchdog + + policy := watchdog.NewWatermarkPolicy(0.50, 0.60, 0.70, 0.85, 0.90, 0.925, 0.95) + + // Try to initialize a watchdog in the following order of precedence: + // 1. If a max heap limit has been provided, initialize a heap-driven watchdog. + // 2. Else, try to initialize a cgroup-driven watchdog. + // 3. Else, try to initialize a system-driven watchdog. + // 4. Else, log a warning that the system is flying solo, and return. + + addStopHook := func(stopFn func()) { + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + stopFn() + return nil + }, + }) } - // if user has set max heap limit, apply it. Otherwise, fall back to total - // system memory constraint. + // 1. If user has set max heap limit, apply it. if maxHeap := constraints.MaxHeapMem; maxHeap != 0 { - log.Infof("memory watchdog will apply max heap constraint: %d bytes", maxHeap) - cfg.Limit = maxHeap - cfg.Scope = watchdog.ScopeHeap - } else { - log.Infof("max heap size not provided; memory watchdog will apply total system memory constraint: %d bytes", constraints.TotalSystemMem) - cfg.Limit = constraints.TotalSystemMem - cfg.Scope = watchdog.ScopeSystem + const minGOGC = 10 + err, stopFn := watchdog.HeapDriven(maxHeap, minGOGC, policy) + if err == nil { + log.Infof("initialized heap-driven watchdog; max heap: %d bytes", maxHeap) + addStopHook(stopFn) + return + } + log.Warnf("failed to initialize heap-driven watchdog; err: %s", err) + log.Warnf("trying a cgroup-driven watchdog") } - err, stop := watchdog.Memory(cfg) - if err != nil { - log.Warnf("failed to instantiate memory watchdog: %s", err) + // 2. cgroup-driven watchdog. + err, stopFn := watchdog.CgroupDriven(5*time.Second, policy) + if err == nil { + log.Infof("initialized cgroup-driven watchdog") + addStopHook(stopFn) + return + } + log.Warnf("failed to initialize cgroup-driven watchdog; err: %s", err) + log.Warnf("trying a system-driven watchdog") + + // 3. system-driven watchdog. + err, stopFn = watchdog.SystemDriven(0, 5*time.Second, policy) // 0 calculates the limit automatically. + if err == nil { + log.Infof("initialized system-driven watchdog") + addStopHook(stopFn) return } - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - stop() - return nil - }, - }) + // 4. log the failure + log.Warnf("failed to initialize system-driven watchdog; err: %s", err) + log.Warnf("system running without a memory watchdog") } type JwtPayload struct { @@ -166,7 +193,7 @@ func BuiltinBootstrap() (dtypes.BootstrapPeers, error) { func DrandBootstrap(ds dtypes.DrandSchedule) (dtypes.DrandBootstrap, error) { // TODO: retry resolving, don't fail if at least one resolve succeeds - res := []peer.AddrInfo{} + var res []peer.AddrInfo for _, d := range ds { addrs, err := addrutil.ParseAddresses(context.TODO(), d.Config.Relays) if err != nil { diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go new file mode 100644 index 000000000..9d3917b85 --- /dev/null +++ b/node/modules/stmgr.go @@ -0,0 +1,20 @@ +package modules + +import ( + "go.uber.org/fx" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" +) + +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, us stmgr.UpgradeSchedule) (*stmgr.StateManager, error) { + sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStart: sm.Start, + OnStop: sm.Stop, + }) + return sm, nil +} diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 30f84aeaf..2a0b3f8b2 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -58,6 +58,7 @@ import ( lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/types" @@ -66,7 +67,7 @@ import ( "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/markets/retrievaladapter" - "github.com/filecoin-project/lotus/miner" + lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -127,8 +128,12 @@ func SealProofType(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (abi.Register if err != nil { return 0, err } + networkVersion, err := fnapi.StateNetworkVersion(context.TODO(), types.EmptyTSK) + if err != nil { + return 0, err + } - return mi.SealProofType, nil + return miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) } type sidsc struct { @@ -419,13 +424,13 @@ func StagingGraphsync(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.Stagi return gs } -func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api lapi.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*miner.Miner, error) { +func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api lapi.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*lotusminer.Miner, error) { minerAddr, err := minerAddrFromDS(ds) if err != nil { return nil, err } - m := miner.NewMiner(api, epp, minerAddr, sf, j) + m := lotusminer.NewMiner(api, epp, minerAddr, sf, j) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { diff --git a/node/node_test.go b/node/node_test.go index 0baa047da..142e8875a 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -164,6 +164,20 @@ func TestWindowedPost(t *testing.T) { test.TestWindowPost(t, builder.MockSbBuilder, 2*time.Millisecond, 10) } +func TestTerminate(t *testing.T) { + if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { + t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") + } + + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + test.TestTerminate(t, builder.MockSbBuilder, 2*time.Millisecond) +} + func TestCCUpgrade(t *testing.T) { logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("chainstore", "ERROR") @@ -184,3 +198,29 @@ func TestPaymentChannels(t *testing.T) { test.TestPaymentChannels(t, builder.MockSbBuilder, 5*time.Millisecond) } + +func TestWindowPostDispute(t *testing.T) { + if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { + t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") + } + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + test.TestWindowPostDispute(t, builder.MockSbBuilder, 2*time.Millisecond) +} + +func TestWindowPostDisputeFails(t *testing.T) { + if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { + t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") + } + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond) +} diff --git a/node/test/builder.go b/node/test/builder.go index f6599cf23..94ddf6a4a 100644 --- a/node/test/builder.go +++ b/node/test/builder.go @@ -148,7 +148,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr } } - return test.TestStorageNode{StorageMiner: minerapi, MineOne: mineOne} + return test.TestStorageNode{StorageMiner: minerapi, MineOne: mineOne, Stop: stop} } func Builder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { @@ -491,34 +491,40 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes } func fullRpc(t *testing.T, nd test.TestNode) test.TestNode { - ma, listenAddr, err := CreateRPCServer(nd) + ma, listenAddr, err := CreateRPCServer(t, nd) require.NoError(t, err) + var stop func() var full test.TestNode - full.FullNode, _, err = client.NewFullNodeRPC(context.Background(), listenAddr, nil) + full.FullNode, stop, err = client.NewFullNodeRPC(context.Background(), listenAddr, nil) require.NoError(t, err) + t.Cleanup(stop) full.ListenAddr = ma return full } func storerRpc(t *testing.T, nd test.TestStorageNode) test.TestStorageNode { - ma, listenAddr, err := CreateRPCServer(nd) + ma, listenAddr, err := CreateRPCServer(t, nd) require.NoError(t, err) + var stop func() var storer test.TestStorageNode - storer.StorageMiner, _, err = client.NewStorageMinerRPC(context.Background(), listenAddr, nil) + storer.StorageMiner, stop, err = client.NewStorageMinerRPC(context.Background(), listenAddr, nil) require.NoError(t, err) + t.Cleanup(stop) storer.ListenAddr = ma storer.MineOne = nd.MineOne return storer } -func CreateRPCServer(handler interface{}) (multiaddr.Multiaddr, string, error) { +func CreateRPCServer(t *testing.T, handler interface{}) (multiaddr.Multiaddr, string, error) { rpcServer := jsonrpc.NewServer() rpcServer.Register("Filecoin", handler) testServ := httptest.NewServer(rpcServer) // todo: close + t.Cleanup(testServ.Close) + t.Cleanup(testServ.CloseClientConnections) addr := testServ.Listener.Addr() listenAddr := "ws://" + addr.String() diff --git a/paychmgr/settler/settler.go b/paychmgr/settler/settler.go index 41aaca665..131cd25a7 100644 --- a/paychmgr/settler/settler.go +++ b/paychmgr/settler/settler.go @@ -73,6 +73,11 @@ func (pcs *paymentChannelSettler) check(ts *types.TipSet) (done bool, more bool, } func (pcs *paymentChannelSettler) messageHandler(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + // Ignore unsuccessful settle messages + if rec.ExitCode != 0 { + return true, nil + } + bestByLane, err := paychmgr.BestSpendableByLane(pcs.ctx, pcs.api, msg.To) if err != nil { return true, err diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 071ad30df..20bf30825 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -4,8 +4,6 @@ import ( "bytes" "context" - "github.com/filecoin-project/go-state-types/network" - "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -14,6 +12,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" @@ -243,6 +243,15 @@ func (s SealingAPIAdapter) StateSectorPartition(ctx context.Context, maddr addre return nil, nil // not found } +func (s SealingAPIAdapter) StateMinerPartitions(ctx context.Context, maddr address.Address, dlIdx uint64, tok sealing.TipSetToken) ([]api.Partition, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerPartitions(ctx, maddr, dlIdx, tsk) +} + func (s SealingAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok sealing.TipSetToken) (market.DealProposal, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { @@ -266,6 +275,15 @@ func (s SealingAPIAdapter) StateNetworkVersion(ctx context.Context, tok sealing. return s.delegate.StateNetworkVersion(ctx, tsk) } +func (s SealingAPIAdapter) StateMinerProvingDeadline(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (*dline.Info, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, err + } + + return s.delegate.StateMinerProvingDeadline(ctx, maddr, tsk) +} + func (s SealingAPIAdapter) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) { msg := types.Message{ To: to, diff --git a/storage/addresses.go b/storage/addresses.go index 5da8643cd..ad0c6d683 100644 --- a/storage/addresses.go +++ b/storage/addresses.go @@ -30,6 +30,8 @@ func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi m addrs = append(addrs, as.PreCommitControl...) case api.CommitAddr: addrs = append(addrs, as.CommitControl...) + case api.TerminateSectorsAddr: + addrs = append(addrs, as.TerminateControl...) default: defaultCtl := map[address.Address]struct{}{} for _, a := range mi.ControlAddresses { @@ -129,6 +131,6 @@ func maybeUseAddress(ctx context.Context, a addrSelectApi, addr address.Address, *bestAvail = b } - log.Warnw("address didn't have enough funds for window post message", "address", addr, "required", types.FIL(goodFunds), "balance", types.FIL(b)) + log.Warnw("address didn't have enough funds to send message", "address", addr, "required", types.FIL(goodFunds), "balance", types.FIL(b)) return false } diff --git a/storage/miner.go b/storage/miner.go index a0d5a6a92..425664991 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -148,6 +148,7 @@ func (m *Miner) Run(ctx context.Context) error { fc := sealing.FeeConfig{ MaxPreCommitGasFee: abi.TokenAmount(m.feeCfg.MaxPreCommitGasFee), MaxCommitGasFee: abi.TokenAmount(m.feeCfg.MaxCommitGasFee), + MaxTerminateGasFee: abi.TokenAmount(m.feeCfg.MaxTerminateGasFee), } evts := events.NewEvents(ctx, m.api) @@ -224,18 +225,13 @@ func NewWinningPoStProver(api api.FullNode, prover storage.Prover, verifier ffiw return nil, xerrors.Errorf("getting sector size: %w", err) } - wpt, err := mi.SealProofType.RegisteredWinningPoStProof() - if err != nil { - return nil, err - } - if build.InsecurePoStValidation { log.Warn("*****************************************************************************") log.Warn(" Generating fake PoSt proof! You should only see this while running tests! ") log.Warn("*****************************************************************************") } - return &StorageWpp{prover, verifier, abi.ActorID(miner), wpt}, nil + return &StorageWpp{prover, verifier, abi.ActorID(miner), mi.WindowPoStProofType}, nil } var _ gen.WinningPoStProver = (*StorageWpp)(nil) diff --git a/storage/sealing.go b/storage/sealing.go index 2cd454e5b..d07a14810 100644 --- a/storage/sealing.go +++ b/storage/sealing.go @@ -4,6 +4,8 @@ import ( "context" "io" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -44,6 +46,18 @@ func (m *Miner) RemoveSector(ctx context.Context, id abi.SectorNumber) error { return m.sealing.Remove(ctx, id) } +func (m *Miner) TerminateSector(ctx context.Context, id abi.SectorNumber) error { + return m.sealing.Terminate(ctx, id) +} + +func (m *Miner) TerminateFlush(ctx context.Context) (*cid.Cid, error) { + return m.sealing.TerminateFlush(ctx) +} + +func (m *Miner) TerminatePending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.TerminatePending(ctx) +} + func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { return m.sealing.MarkForUpgrade(id) } diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index f81a60a1e..3d6073a63 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -47,18 +47,13 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, as return nil, xerrors.Errorf("getting sector size: %w", err) } - rt, err := mi.SealProofType.RegisteredWindowPoStProof() - if err != nil { - return nil, err - } - return &WindowPoStScheduler{ api: api, feeCfg: fc, addrSel: as, prover: sb, faultTracker: ft, - proofType: rt, + proofType: mi.WindowPoStProofType, partitionSectors: mi.WindowPoStPartitionSectors, actor: actor,