Merge branch 'master' into chore/snake_context_through_blockstore_init
This commit is contained in:
commit
ce3af308ed
@ -1,6 +1,7 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
go: gotest/tools@0.0.13
|
||||
aws-cli: circleci/aws-cli@1.3.2
|
||||
|
||||
executors:
|
||||
golang:
|
||||
@ -200,6 +201,8 @@ jobs:
|
||||
<<: *test
|
||||
test-window-post:
|
||||
<<: *test
|
||||
test-terminate:
|
||||
<<: *test
|
||||
test-conformance:
|
||||
description: |
|
||||
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||
@ -445,6 +448,114 @@ jobs:
|
||||
name: Publish release
|
||||
command: ./scripts/publish-release.sh
|
||||
|
||||
build-and-push-image:
|
||||
description: build and push docker images to public AWS ECR registry
|
||||
executor: aws-cli/default
|
||||
parameters:
|
||||
profile-name:
|
||||
type: string
|
||||
default: "default"
|
||||
description: AWS profile name to be configured.
|
||||
|
||||
aws-access-key-id:
|
||||
type: env_var_name
|
||||
default: AWS_ACCESS_KEY_ID
|
||||
description: >
|
||||
AWS access key id for IAM role. Set this to the name of
|
||||
the environment variable you will set to hold this
|
||||
value, i.e. AWS_ACCESS_KEY.
|
||||
|
||||
aws-secret-access-key:
|
||||
type: env_var_name
|
||||
default: AWS_SECRET_ACCESS_KEY
|
||||
description: >
|
||||
AWS secret key for IAM role. Set this to the name of
|
||||
the environment variable you will set to hold this
|
||||
value, i.e. AWS_SECRET_ACCESS_KEY.
|
||||
|
||||
region:
|
||||
type: env_var_name
|
||||
default: AWS_REGION
|
||||
description: >
|
||||
Name of env var storing your AWS region information,
|
||||
defaults to AWS_REGION
|
||||
|
||||
account-url:
|
||||
type: env_var_name
|
||||
default: AWS_ECR_ACCOUNT_URL
|
||||
description: >
|
||||
Env var storing Amazon ECR account URL that maps to an AWS account,
|
||||
e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
|
||||
defaults to AWS_ECR_ACCOUNT_URL
|
||||
|
||||
dockerfile:
|
||||
type: string
|
||||
default: Dockerfile
|
||||
description: Name of dockerfile to use. Defaults to Dockerfile.
|
||||
|
||||
path:
|
||||
type: string
|
||||
default: .
|
||||
description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
|
||||
|
||||
extra-build-args:
|
||||
type: string
|
||||
default: ""
|
||||
description: >
|
||||
Extra flags to pass to docker build. For examples, see
|
||||
https://docs.docker.com/engine/reference/commandline/build
|
||||
|
||||
repo:
|
||||
type: string
|
||||
description: Name of an Amazon ECR repository
|
||||
|
||||
tag:
|
||||
type: string
|
||||
default: "latest"
|
||||
description: A comma-separated string containing docker image tags to build and push (default = latest)
|
||||
|
||||
steps:
|
||||
- aws-cli/setup:
|
||||
profile-name: <<parameters.profile-name>>
|
||||
aws-access-key-id: <<parameters.aws-access-key-id>>
|
||||
aws-secret-access-key: <<parameters.aws-secret-access-key>>
|
||||
aws-region: <<parameters.region>>
|
||||
|
||||
- run:
|
||||
name: Log into Amazon ECR
|
||||
command: |
|
||||
aws ecr-public get-login-password --region $<<parameters.region>> --profile <<parameters.profile-name>> | docker login --username AWS --password-stdin $<<parameters.account-url>>
|
||||
|
||||
- checkout
|
||||
|
||||
- setup_remote_docker:
|
||||
version: 19.03.13
|
||||
docker_layer_caching: false
|
||||
|
||||
- run:
|
||||
name: Build docker image
|
||||
command: |
|
||||
registry_id=$(echo $<<parameters.account-url>> | sed "s;\..*;;g")
|
||||
|
||||
docker_tag_args=""
|
||||
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||
for tag in "${DOCKER_TAGS[@]}"; do
|
||||
docker_tag_args="$docker_tag_args -t $<<parameters.account-url>>/<<parameters.repo>>:$tag"
|
||||
done
|
||||
|
||||
docker build \
|
||||
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
|
||||
-f <<parameters.path>>/<<parameters.dockerfile>> \
|
||||
$docker_tag_args \
|
||||
<<parameters.path>>
|
||||
|
||||
- run:
|
||||
name: Push image to Amazon ECR
|
||||
command: |
|
||||
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||
for tag in "${DOCKER_TAGS[@]}"; do
|
||||
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
|
||||
done
|
||||
|
||||
workflows:
|
||||
version: 2.1
|
||||
@ -476,9 +587,15 @@ workflows:
|
||||
test-suite-name: cli
|
||||
packages: "./cli/... ./cmd/... ./api/..."
|
||||
- test-window-post:
|
||||
codecov-upload: true
|
||||
go-test-flags: "-run=TestWindowedPost"
|
||||
winpost-test: "1"
|
||||
test-suite-name: window-post
|
||||
- test-terminate:
|
||||
codecov-upload: true
|
||||
go-test-flags: "-run=TestTerminate"
|
||||
winpost-test: "1"
|
||||
test-suite-name: terminate
|
||||
- test-short:
|
||||
go-test-flags: "--timeout 10m --short"
|
||||
test-suite-name: short
|
||||
@ -529,3 +646,8 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- build-and-push-image:
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-dev
|
||||
tag: '${CIRCLE_SHA1:0:8}'
|
||||
|
10
.codecov.yml
10
.codecov.yml
@ -5,5 +5,15 @@ ignore:
|
||||
- "api/test/*"
|
||||
- "gen/**/*"
|
||||
- "gen/*"
|
||||
- "cmd/lotus-shed/*"
|
||||
- "cmd/tvx/*"
|
||||
- "cmd/lotus-pcr/*"
|
||||
- "cmd/tvx/*"
|
||||
- "cmd/lotus-chainwatch/*"
|
||||
- "cmd/lotus-health/*"
|
||||
- "cmd/lotus-fountain/*"
|
||||
- "cmd/lotus-townhall/*"
|
||||
- "cmd/lotus-stats/*"
|
||||
- "cmd/lotus-pcr/*"
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
75
CHANGELOG.md
75
CHANGELOG.md
@ -1,5 +1,80 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.4.1 / 2021-01-20
|
||||
|
||||
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. In particular, [#5341](https://github.com/filecoin-project/lotus/pull/5341) introduces the ability for Lotus miners to terminate sectors.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Core Lotus
|
||||
|
||||
- fix(sync): enforce ForkLengthThreshold for synced chain (https://github.com/filecoin-project/lotus/pull/5182)
|
||||
- introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101)
|
||||
- Skip bootstrapping if no peers specified (https://github.com/filecoin-project/lotus/pull/5301)
|
||||
- Chainxchg write response timeout (https://github.com/filecoin-project/lotus/pull/5254)
|
||||
- update NewestNetworkVersion (https://github.com/filecoin-project/lotus/pull/5277)
|
||||
- fix(sync): remove checks bypass when we submit the block (https://github.com/filecoin-project/lotus/pull/4192)
|
||||
- chore: export vm.ShouldBurn (https://github.com/filecoin-project/lotus/pull/5355)
|
||||
- fix(sync): enforce fork len when changing head (https://github.com/filecoin-project/lotus/pull/5244)
|
||||
- Use 55th percentile instead of median for gas-price (https://github.com/filecoin-project/lotus/pull/5369)
|
||||
- update go-libp2p-pubsub to v0.4.1 (https://github.com/filecoin-project/lotus/pull/5329)
|
||||
|
||||
#### Sealing
|
||||
|
||||
- Sector termination support (https://github.com/filecoin-project/lotus/pull/5341)
|
||||
- update weight canSeal and canStore when attach (https://github.com/filecoin-project/lotus/pull/5242/files)
|
||||
- sector-storage/mock: improve mocked readpiece (https://github.com/filecoin-project/lotus/pull/5208)
|
||||
- Fix deadlock in runWorker in sched_worker.go (https://github.com/filecoin-project/lotus/pull/5251)
|
||||
- Skip checking terminated sectors provable (https://github.com/filecoin-project/lotus/pull/5217)
|
||||
- storagefsm: Fix unsealedInfoMap.lk init race (https://github.com/filecoin-project/lotus/pull/5319)
|
||||
- Multicore AddPiece CommP (https://github.com/filecoin-project/lotus/pull/5320)
|
||||
- storagefsm: Send correct event on ErrExpiredTicket in CommitFailed (https://github.com/filecoin-project/lotus/pull/5366)
|
||||
- expose StateSearchMessage on gateway (https://github.com/filecoin-project/lotus/pull/5382)
|
||||
- fix FileSize to return correct disk usage recursively (https://github.com/filecoin-project/lotus/pull/5384)
|
||||
|
||||
#### Dealmaking
|
||||
|
||||
- Better error message when withdrawing funds (https://github.com/filecoin-project/lotus/pull/5293)
|
||||
- add verbose for list transfers (https://github.com/filecoin-project/lotus/pull/5259)
|
||||
- cli - rename `client info` to `client balances` (https://github.com/filecoin-project/lotus/pull/5304)
|
||||
- Better CLI for wallet market withdraw and client info (https://github.com/filecoin-project/lotus/pull/5303)
|
||||
|
||||
#### UX
|
||||
|
||||
- correct flag usages for replace cmd (https://github.com/filecoin-project/lotus/pull/5255)
|
||||
- lotus state call will panic (https://github.com/filecoin-project/lotus/pull/5275)
|
||||
- fix get sector bug (https://github.com/filecoin-project/lotus/pull/4976)
|
||||
- feat: lotus wallet market add (adds funds to storage market actor) (https://github.com/filecoin-project/lotus/pull/5300)
|
||||
- Fix client flag parsing in client balances cli (https://github.com/filecoin-project/lotus/pull/5312)
|
||||
- delete slash-consensus miner (https://github.com/filecoin-project/lotus/pull/4577)
|
||||
- add fund sufficient check in send (https://github.com/filecoin-project/lotus/pull/5252)
|
||||
- enable parse and shorten negative FIL values (https://github.com/filecoin-project/lotus/pull/5315)
|
||||
- add limit and rate for chain noise (https://github.com/filecoin-project/lotus/pull/5223)
|
||||
- add bench env print (https://github.com/filecoin-project/lotus/pull/5222)
|
||||
- Implement full-node restore option (https://github.com/filecoin-project/lotus/pull/5362)
|
||||
- add color for token amount (https://github.com/filecoin-project/lotus/pull/5352)
|
||||
- correct log in maybeUseAddress (https://github.com/filecoin-project/lotus/pull/5359)
|
||||
- add slash-consensus from flag (https://github.com/filecoin-project/lotus/pull/5378)
|
||||
|
||||
#### Testing
|
||||
|
||||
- tvx extract: more tipset extraction goodness (https://github.com/filecoin-project/lotus/pull/5258)
|
||||
- Fix race in blockstore test suite (https://github.com/filecoin-project/lotus/pull/5297)
|
||||
|
||||
|
||||
#### Build & Networks
|
||||
|
||||
- Remove LOTUS_DISABLE_V2_ACTOR_MIGRATION envvar (https://github.com/filecoin-project/lotus/pull/5289)
|
||||
- Create a calibnet build option (https://github.com/filecoin-project/lotus/pull/5288)
|
||||
- Calibnet: Set Orange epoch (https://github.com/filecoin-project/lotus/pull/5325)
|
||||
|
||||
#### Management
|
||||
|
||||
- Update SECURITY.md (https://github.com/filecoin-project/lotus/pull/5246)
|
||||
- README: Contribute section (https://github.com/filecoin-project/lotus/pull/5330)
|
||||
- README: refine Contribute section (https://github.com/filecoin-project/lotus/pull/5331)
|
||||
- Add misc tooling to codecov ignore list (https://github.com/filecoin-project/lotus/pull/5347)
|
||||
|
||||
# 1.4.0 / 2020-12-19
|
||||
|
||||
This is a MANDATORY hotfix release of Lotus that resolves a chain halt at height 336,459 caused by nondeterminism in specs-actors. The fix is to update actors to 2.3.3 in order to incorporate this fix https://github.com/filecoin-project/specs-actors/pull/1334.
|
||||
|
74
Dockerfile.lotus
Normal file
74
Dockerfile.lotus
Normal file
@ -0,0 +1,74 @@
|
||||
FROM golang:1.15.6 AS builder-deps
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||
|
||||
ARG RUST_VERSION=nightly
|
||||
ENV XDG_CACHE_HOME="/tmp"
|
||||
|
||||
ENV RUSTUP_HOME=/usr/local/rustup \
|
||||
CARGO_HOME=/usr/local/cargo \
|
||||
PATH=/usr/local/cargo/bin:$PATH
|
||||
|
||||
RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \
|
||||
chmod +x rustup-init; \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
|
||||
FROM builder-deps AS builder-local
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
COPY ./ /opt/filecoin
|
||||
WORKDIR /opt/filecoin
|
||||
RUN make clean deps
|
||||
|
||||
|
||||
FROM builder-local AS builder
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
WORKDIR /opt/filecoin
|
||||
|
||||
ARG RUSTFLAGS=""
|
||||
ARG GOFLAGS=""
|
||||
|
||||
RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats
|
||||
|
||||
|
||||
FROM ubuntu:20.04 AS base
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
# Base resources
|
||||
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc
|
||||
|
||||
|
||||
FROM base AS lotus
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
|
||||
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||
|
||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||
ENV LOTUS_PATH /var/lib/lotus
|
||||
|
||||
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters
|
||||
|
||||
USER fc
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/lotus"]
|
||||
|
||||
CMD ["-help"]
|
3
Makefile
3
Makefile
@ -63,6 +63,9 @@ debug: lotus lotus-miner lotus-worker lotus-seed
|
||||
2k: GOFLAGS+=-tags=2k
|
||||
2k: lotus lotus-miner lotus-worker lotus-seed
|
||||
|
||||
calibnet: GOFLAGS+=-tags=calibnet
|
||||
calibnet: lotus lotus-miner lotus-worker lotus-seed
|
||||
|
||||
lotus: $(BUILD_DEPS)
|
||||
rm -f lotus
|
||||
go build $(GOFLAGS) -o lotus ./cmd/lotus
|
||||
|
37
README.md
37
README.md
@ -24,24 +24,31 @@ For instructions on how to build, install and setup lotus, please visit [https:/
|
||||
|
||||
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
|
||||
|
||||
## Development
|
||||
## Related packages
|
||||
|
||||
The main branches under development at the moment are:
|
||||
* [`master`](https://github.com/filecoin-project/lotus): current testnet.
|
||||
* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes.
|
||||
* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits.
|
||||
These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation:
|
||||
|
||||
### Tracker
|
||||
|
||||
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch.
|
||||
|
||||
### Packages
|
||||
|
||||
The lotus Filecoin implementation unfolds into the following packages:
|
||||
|
||||
- [This repo](https://github.com/filecoin-project/lotus)
|
||||
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
||||
- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||
- [specs-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||
|
||||
## Contribute
|
||||
|
||||
Lotus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations:
|
||||
|
||||
1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs).
|
||||
2. If the change is complex and requires prior discussion, [open an issue](github.com/filecoin-project/lotus/issues) or a [discussion](https://github.com/filecoin-project/lotus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted.
|
||||
3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn.
|
||||
|
||||
When implementing a change:
|
||||
|
||||
1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`.
|
||||
2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc.
|
||||
3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go.
|
||||
4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers.
|
||||
5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted).
|
||||
6. Add tests.
|
||||
7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description.
|
||||
8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message.
|
||||
|
||||
## License
|
||||
|
||||
|
@ -391,6 +391,8 @@ type FullNode interface {
|
||||
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
|
||||
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
||||
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
|
||||
// StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
|
||||
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*MsgLookup, error)
|
||||
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
||||
// message arrives on chain, and gets to the indicated confidence depth.
|
||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
|
||||
@ -515,6 +517,10 @@ type FullNode interface {
|
||||
// along with the address removal.
|
||||
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error)
|
||||
|
||||
// MarketAddBalance adds funds to the market actor
|
||||
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error)
|
||||
// MarketGetReserved gets the amount of funds that are currently reserved for the address
|
||||
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error)
|
||||
// MarketReserveFunds reserves funds for a deal
|
||||
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error)
|
||||
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
|
||||
|
@ -39,6 +39,7 @@ type GatewayAPI interface {
|
||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
||||
StateSearchMsg(ctx context.Context, msg cid.Cid) (*MsgLookup, error)
|
||||
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error)
|
||||
|
@ -65,7 +65,17 @@ type StorageMiner interface {
|
||||
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
|
||||
SectorGetExpectedSealDuration(context.Context) (time.Duration, error)
|
||||
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
|
||||
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
|
||||
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
|
||||
SectorRemove(context.Context, abi.SectorNumber) error
|
||||
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
|
||||
// automatically removes it from storage
|
||||
SectorTerminate(context.Context, abi.SectorNumber) error
|
||||
// SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
|
||||
// Returns null if message wasn't sent
|
||||
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error)
|
||||
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
|
||||
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error)
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error
|
||||
|
||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
|
||||
@ -217,9 +227,12 @@ const (
|
||||
PreCommitAddr AddrUse = iota
|
||||
CommitAddr
|
||||
PoStAddr
|
||||
|
||||
TerminateSectorsAddr
|
||||
)
|
||||
|
||||
type AddressConfig struct {
|
||||
PreCommitControl []address.Address
|
||||
CommitControl []address.Address
|
||||
TerminateControl []address.Address
|
||||
}
|
||||
|
@ -206,6 +206,7 @@ type FullNodeStruct struct {
|
||||
StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateWaitMsgLimited func(context.Context, cid.Cid, uint64, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateSearchMsgLimited func(context.Context, cid.Cid, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"`
|
||||
@ -244,6 +245,8 @@ type FullNodeStruct struct {
|
||||
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MarketAddBalance func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketGetReserved func(ctx context.Context, addr address.Address) (types.BigInt, error) `perm:"sign"`
|
||||
MarketReserveFunds func(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketReleaseFunds func(ctx context.Context, addr address.Address, amt types.BigInt) error `perm:"sign"`
|
||||
MarketWithdraw func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
@ -312,6 +315,9 @@ type StorageMinerStruct struct {
|
||||
SectorGetExpectedSealDuration func(context.Context) (time.Duration, error) `perm:"read"`
|
||||
SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"admin"`
|
||||
SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
|
||||
SectorTerminate func(context.Context, abi.SectorNumber) error `perm:"admin"`
|
||||
SectorTerminateFlush func(ctx context.Context) (*cid.Cid, error) `perm:"admin"`
|
||||
SectorTerminatePending func(ctx context.Context) ([]abi.SectorID, error) `perm:"admin"`
|
||||
SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"`
|
||||
|
||||
WorkerConnect func(context.Context, string) error `perm:"admin" retry:"true"` // TODO: worker perm
|
||||
@ -438,6 +444,7 @@ type GatewayStruct struct {
|
||||
StateMinerProvingDeadline func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||
StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
|
||||
StateSearchMsg func(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
|
||||
StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
|
||||
StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error)
|
||||
StateSectorGetInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
@ -1008,6 +1015,10 @@ func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api
|
||||
return c.Internal.StateSearchMsg(ctx, msgc)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) StateSearchMsgLimited(ctx context.Context, msgc cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) {
|
||||
return c.Internal.StateSearchMsgLimited(ctx, msgc, limit)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
|
||||
return c.Internal.StateListMiners(ctx, tsk)
|
||||
}
|
||||
@ -1148,6 +1159,14 @@ func (c *FullNodeStruct) MsigRemoveSigner(ctx context.Context, msig address.Addr
|
||||
return c.Internal.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketAddBalance(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return c.Internal.MarketAddBalance(ctx, wallet, addr, amt)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) {
|
||||
return c.Internal.MarketGetReserved(ctx, addr)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return c.Internal.MarketReserveFunds(ctx, wallet, addr, amt)
|
||||
}
|
||||
@ -1300,6 +1319,18 @@ func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.Sector
|
||||
return c.Internal.SectorRemove(ctx, number)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorTerminate(ctx context.Context, number abi.SectorNumber) error {
|
||||
return c.Internal.SectorTerminate(ctx, number)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) {
|
||||
return c.Internal.SectorTerminateFlush(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) {
|
||||
return c.Internal.SectorTerminatePending(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SectorMarkForUpgrade(ctx context.Context, number abi.SectorNumber) error {
|
||||
return c.Internal.SectorMarkForUpgrade(ctx, number)
|
||||
}
|
||||
@ -1754,6 +1785,10 @@ func (g GatewayStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSet
|
||||
return g.Internal.StateNetworkVersion(ctx, tsk)
|
||||
}
|
||||
|
||||
func (g GatewayStruct) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
|
||||
return g.Internal.StateSearchMsg(ctx, msg)
|
||||
}
|
||||
|
||||
func (g GatewayStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) {
|
||||
return g.Internal.StateSectorGetInfo(ctx, maddr, n, tsk)
|
||||
}
|
||||
|
@ -8,103 +8,40 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
dstest "github.com/ipfs/go-merkledag/test"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
)
|
||||
|
||||
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
MakeDeal(t, ctx, 6, client, miner, carExport, fastRet, startEpoch)
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch)
|
||||
}
|
||||
|
||||
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
MakeDeal(t, ctx, 6, client, miner, false, false, startEpoch)
|
||||
MakeDeal(t, ctx, 7, client, miner, false, false, startEpoch)
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
|
||||
MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch)
|
||||
}
|
||||
|
||||
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
@ -152,95 +89,41 @@ func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api
|
||||
}
|
||||
|
||||
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
data := make([]byte, 1600)
|
||||
rand.New(rand.NewSource(int64(8))).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
fcid, err := s.client.ClientImportLocal(s.ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
deal := startDeal(t, ctx, miner, client, fcid, true, startEpoch)
|
||||
deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch)
|
||||
|
||||
waitDealPublished(t, ctx, miner, deal)
|
||||
waitDealPublished(t, s.ctx, s.miner, deal)
|
||||
fmt.Println("deal published, retrieving")
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
info, err := s.client.ClientGetDealInfo(s.ctx, *deal)
|
||||
require.NoError(t, err)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data)
|
||||
}
|
||||
|
||||
func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
{
|
||||
data1 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(3))).Read(data1)
|
||||
r := bytes.NewReader(data1)
|
||||
|
||||
fcid1, err := client.ClientImportLocal(ctx, r)
|
||||
fcid1, err := s.client.ClientImportLocal(s.ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -249,35 +132,31 @@ func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
|
||||
rand.New(rand.NewSource(int64(9))).Read(data2)
|
||||
r2 := bytes.NewReader(data2)
|
||||
|
||||
fcid2, err := client.ClientImportLocal(ctx, r2)
|
||||
fcid2, err := s.client.ClientImportLocal(s.ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
deal1 := startDeal(t, ctx, miner, client, fcid1, true, 0)
|
||||
deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal1, true)
|
||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true)
|
||||
|
||||
deal2 := startDeal(t, ctx, miner, client, fcid2, true, 0)
|
||||
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal2, false)
|
||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false)
|
||||
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal2)
|
||||
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
|
||||
require.NoError(t, err)
|
||||
|
||||
rf, _ := miner.SectorsRefs(ctx)
|
||||
rf, _ := s.miner.SectorsRefs(s.ctx)
|
||||
fmt.Printf("refs: %+v\n", rf)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
|
||||
testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||
@ -459,3 +338,40 @@ func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath strin
|
||||
}
|
||||
return rdata
|
||||
}
|
||||
|
||||
type dealsScaffold struct {
|
||||
ctx context.Context
|
||||
client *impl.FullNodeAPI
|
||||
miner TestStorageNode
|
||||
blockMiner *BlockMiner
|
||||
}
|
||||
|
||||
func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold {
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
return connectAndStartMining(t, b, blocktime, client, miner)
|
||||
}
|
||||
|
||||
func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold {
|
||||
ctx := context.Background()
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
blockMiner := NewBlockMiner(ctx, t, miner, blocktime)
|
||||
blockMiner.MineBlocks()
|
||||
|
||||
return &dealsScaffold{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
miner: miner,
|
||||
blockMiner: blockMiner,
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -157,7 +158,11 @@ func (ts *testSuite) testVersion(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, v.Version, build.BuildVersion)
|
||||
versions := strings.Split(v.Version, "+")
|
||||
if len(versions) <= 0 {
|
||||
t.Fatal("empty version")
|
||||
}
|
||||
require.Equal(t, versions[0], build.BuildVersion)
|
||||
}
|
||||
|
||||
func (ts *testSuite) testSearchMsg(t *testing.T) {
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
@ -211,6 +212,7 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int,
|
||||
upgradeHeight abi.ChainEpoch) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -428,3 +430,175 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
||||
}
|
||||
|
||||
func TestTerminate(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
nSectors := uint64(2)
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(1)}, []StorageMiner{{Full: 0, Preseal: int(nSectors)}})
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
||||
|
||||
fmt.Printf("Seal a sector\n")
|
||||
|
||||
pledgeSectors(t, ctx, miner, 1, 0, nil)
|
||||
|
||||
fmt.Printf("wait for power\n")
|
||||
|
||||
{
|
||||
// Wait until proven.
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||
fmt.Printf("End for head.Height > %d\n", waitUntil)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > waitUntil {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nSectors++
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
||||
|
||||
fmt.Println("Terminate a sector")
|
||||
|
||||
toTerminate := abi.SectorNumber(3)
|
||||
|
||||
err = miner.SectorTerminate(ctx, toTerminate)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgTriggerred := false
|
||||
loop:
|
||||
for {
|
||||
si, err := miner.SectorsStatus(ctx, toTerminate, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("state: ", si.State, msgTriggerred)
|
||||
|
||||
switch sealing.SectorState(si.State) {
|
||||
case sealing.Terminating:
|
||||
if !msgTriggerred {
|
||||
{
|
||||
p, err := miner.SectorTerminatePending(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, p, 1)
|
||||
require.Equal(t, abi.SectorNumber(3), p[0].Number)
|
||||
}
|
||||
|
||||
c, err := miner.SectorTerminateFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if c != nil {
|
||||
msgTriggerred = true
|
||||
fmt.Println("terminate message:", c)
|
||||
|
||||
{
|
||||
p, err := miner.SectorTerminatePending(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, p, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed:
|
||||
break loop
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// check power decreased
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
||||
|
||||
// check in terminated set
|
||||
{
|
||||
parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(parts), 0)
|
||||
|
||||
bflen := func(b bitfield.BitField) uint64 {
|
||||
l, err := b.Count()
|
||||
require.NoError(t, err)
|
||||
return l
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(1), bflen(parts[0].AllSectors))
|
||||
require.Equal(t, uint64(0), bflen(parts[0].LiveSectors))
|
||||
}
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
||||
}
|
||||
|
@ -2,11 +2,9 @@ package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/addrutil"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
rice "github.com/GeertJohan/go.rice"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@ -17,24 +15,16 @@ func BuiltinBootstrap() ([]peer.AddrInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var out []peer.AddrInfo
|
||||
|
||||
b := rice.MustFindBox("bootstrap")
|
||||
err := b.Walk("", func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to walk box: %w", err)
|
||||
|
||||
if BootstrappersFile != "" {
|
||||
spi := b.MustString(BootstrappersFile)
|
||||
if spi == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(path, ".pi") {
|
||||
return nil
|
||||
}
|
||||
spi := b.MustString(path)
|
||||
if spi == "" {
|
||||
return nil
|
||||
}
|
||||
pi, err := addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n"))
|
||||
out = append(out, pi...)
|
||||
return err
|
||||
})
|
||||
return out, err
|
||||
return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n"))
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
4
build/bootstrap/calibnet.pi
Normal file
4
build/bootstrap/calibnet.pi
Normal file
@ -0,0 +1,4 @@
|
||||
/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWK1QYsm6iqyhgH7vqsbeoNoKHbT368h1JLHS1qYN36oyc
|
||||
/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWKDyJZoPsNak1iYNN1GGmvGnvhyVbWBL6iusYfP3RpgYs
|
||||
/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWJRSTnzABB6MYYEBbSTT52phQntVD1PpRTMh1xt9mh6yH
|
||||
/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWQLi3kY6HnMYLUtwCe26zWMdNhniFgHVNn1DioQc7NiWv
|
@ -14,7 +14,7 @@ func MaybeGenesis() []byte {
|
||||
log.Warnf("loading built-in genesis: %s", err)
|
||||
return nil
|
||||
}
|
||||
genBytes, err := builtinGen.Bytes("devnet.car")
|
||||
genBytes, err := builtinGen.Bytes(GenesisFile)
|
||||
if err != nil {
|
||||
log.Warnf("loading built-in genesis: %s", err)
|
||||
}
|
||||
|
BIN
build/genesis/calibnet.car
Normal file
BIN
build/genesis/calibnet.car
Normal file
Binary file not shown.
@ -3,13 +3,13 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
const BootstrappersFile = ""
|
||||
const GenesisFile = ""
|
||||
|
||||
const UpgradeBreezeHeight = -1
|
||||
const BreezeGasTampingDuration = 0
|
||||
|
||||
@ -18,8 +18,8 @@ const UpgradeIgnitionHeight = -2
|
||||
const UpgradeRefuelHeight = -3
|
||||
const UpgradeTapeHeight = -4
|
||||
|
||||
var UpgradeActorsV2Height = abi.ChainEpoch(10)
|
||||
var UpgradeLiftoffHeight = abi.ChainEpoch(-5)
|
||||
const UpgradeActorsV2Height = 10
|
||||
const UpgradeLiftoffHeight = -5
|
||||
|
||||
const UpgradeKumquatHeight = 15
|
||||
const UpgradeCalicoHeight = 20
|
||||
@ -36,11 +36,6 @@ func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
|
||||
UpgradeActorsV2Height = math.MaxInt64
|
||||
UpgradeLiftoffHeight = 11
|
||||
}
|
||||
|
||||
BuildType |= Build2k
|
||||
}
|
||||
|
||||
|
67
build/params_calibnet.go
Normal file
67
build/params_calibnet.go
Normal file
@ -0,0 +1,67 @@
|
||||
// +build calibnet
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const BootstrappersFile = "calibnet.pi"
|
||||
const GenesisFile = "calibnet.car"
|
||||
|
||||
const UpgradeBreezeHeight = -1
|
||||
const BreezeGasTampingDuration = 120
|
||||
|
||||
const UpgradeSmokeHeight = -2
|
||||
|
||||
const UpgradeIgnitionHeight = -3
|
||||
const UpgradeRefuelHeight = -4
|
||||
|
||||
var UpgradeActorsV2Height = abi.ChainEpoch(30)
|
||||
|
||||
const UpgradeTapeHeight = 60
|
||||
|
||||
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
|
||||
// Miners, clients, developers, custodians all need time to prepare.
|
||||
// We still have upgrades and state changes to do, but can happen after signaling timing here.
|
||||
const UpgradeLiftoffHeight = -5
|
||||
|
||||
const UpgradeKumquatHeight = 90
|
||||
|
||||
const UpgradeCalicoHeight = 92000
|
||||
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
|
||||
|
||||
// 2020-12-17T19:00:00Z
|
||||
const UpgradeClausHeight = 161386
|
||||
|
||||
// 2021-01-17T19:00:00Z
|
||||
const UpgradeOrangeHeight = 250666
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 30))
|
||||
policy.SetSupportedProofTypes(
|
||||
abi.RegisteredSealProof_StackedDrg512MiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
)
|
||||
|
||||
SetAddressNetwork(address.Testnet)
|
||||
|
||||
Devnet = true
|
||||
|
||||
BuildType = BuildCalibnet
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 4
|
@ -1,11 +1,11 @@
|
||||
// +build !debug
|
||||
// +build !2k
|
||||
// +build !testground
|
||||
// +build !calibnet
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -19,7 +19,11 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
UpgradeSmokeHeight: DrandMainnet,
|
||||
}
|
||||
|
||||
const BootstrappersFile = "mainnet.pi"
|
||||
const GenesisFile = "mainnet.car"
|
||||
|
||||
const UpgradeBreezeHeight = 41280
|
||||
|
||||
const BreezeGasTampingDuration = 120
|
||||
|
||||
const UpgradeSmokeHeight = 51000
|
||||
@ -27,7 +31,7 @@ const UpgradeSmokeHeight = 51000
|
||||
const UpgradeIgnitionHeight = 94000
|
||||
const UpgradeRefuelHeight = 130800
|
||||
|
||||
var UpgradeActorsV2Height = abi.ChainEpoch(138720)
|
||||
const UpgradeActorsV2Height = 138720
|
||||
|
||||
const UpgradeTapeHeight = 140760
|
||||
|
||||
@ -53,11 +57,9 @@ func init() {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
|
||||
UpgradeActorsV2Height = math.MaxInt64
|
||||
}
|
||||
|
||||
Devnet = false
|
||||
|
||||
BuildType = BuildMainnet
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
|
||||
// Consensus / Network
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
const NewestNetworkVersion = network.Version8
|
||||
const NewestNetworkVersion = network.Version9
|
||||
const ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
// Epochs
|
||||
|
@ -97,11 +97,14 @@ var (
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
NewestNetworkVersion = network.Version8
|
||||
NewestNetworkVersion = network.Version9
|
||||
ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
Devnet = true
|
||||
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
|
||||
BootstrappersFile = ""
|
||||
GenesisFile = ""
|
||||
)
|
||||
|
||||
const BootstrapPeerThreshold = 1
|
||||
|
@ -10,26 +10,32 @@ var CurrentCommit string
|
||||
var BuildType int
|
||||
|
||||
const (
|
||||
BuildDefault = 0
|
||||
Build2k = 0x1
|
||||
BuildDebug = 0x3
|
||||
BuildDefault = 0
|
||||
BuildMainnet = 0x1
|
||||
Build2k = 0x2
|
||||
BuildDebug = 0x3
|
||||
BuildCalibnet = 0x4
|
||||
)
|
||||
|
||||
func buildType() string {
|
||||
switch BuildType {
|
||||
case BuildDefault:
|
||||
return ""
|
||||
case BuildDebug:
|
||||
return "+debug"
|
||||
case BuildMainnet:
|
||||
return "+mainnet"
|
||||
case Build2k:
|
||||
return "+2k"
|
||||
case BuildDebug:
|
||||
return "+debug"
|
||||
case BuildCalibnet:
|
||||
return "+calibnet"
|
||||
default:
|
||||
return "+huh?"
|
||||
}
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version, set by build system
|
||||
const BuildVersion = "1.4.0"
|
||||
const BuildVersion = "1.4.1"
|
||||
|
||||
func UserVersion() string {
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
@ -84,7 +90,7 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion = newVer(1, 0, 0)
|
||||
MinerAPIVersion = newVer(1, 0, 0)
|
||||
MinerAPIVersion = newVer(1, 0, 1)
|
||||
WorkerAPIVersion = newVer(1, 0, 0)
|
||||
)
|
||||
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -42,6 +43,10 @@ var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff
|
||||
|
||||
const MinSectorExpiration = miner0.MinSectorExpiration
|
||||
|
||||
// Not used / checked in v0
|
||||
var DeclarationsMax = miner2.DeclarationsMax
|
||||
var AddressedSectorsMax = miner2.AddressedSectorsMax
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
switch act.Code {
|
||||
case builtin0.StorageMinerActorCodeID:
|
||||
|
@ -56,7 +56,11 @@ func (s *server) HandleStream(stream inet.Stream) {
|
||||
}
|
||||
|
||||
_ = stream.SetDeadline(time.Now().Add(WriteResDeadline))
|
||||
if err := cborutil.WriteCborRPC(stream, resp); err != nil {
|
||||
buffered := bufio.NewWriter(stream)
|
||||
if err = cborutil.WriteCborRPC(buffered, resp); err == nil {
|
||||
err = buffered.Flush()
|
||||
}
|
||||
if err != nil {
|
||||
_ = stream.SetDeadline(time.Time{})
|
||||
log.Warnw("failed to write back response for handle stream",
|
||||
"err", err, "peer", stream.Conn().RemotePeer())
|
||||
|
@ -2,6 +2,7 @@ package market
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -129,6 +130,11 @@ func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Addres
|
||||
return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt)
|
||||
}
|
||||
|
||||
// GetReserved returns the amount that is currently reserved for the address
|
||||
func (fm *FundManager) GetReserved(addr address.Address) abi.TokenAmount {
|
||||
return fm.getFundedAddress(addr).getReserved()
|
||||
}
|
||||
|
||||
// FundedAddressState keeps track of the state of an address with funds in the
|
||||
// datastore
|
||||
type FundedAddressState struct {
|
||||
@ -147,7 +153,7 @@ type fundedAddress struct {
|
||||
env *fundManagerEnvironment
|
||||
str *Store
|
||||
|
||||
lk sync.Mutex
|
||||
lk sync.RWMutex
|
||||
state *FundedAddressState
|
||||
|
||||
// Note: These request queues are ephemeral, they are not saved to store
|
||||
@ -183,6 +189,13 @@ func (a *fundedAddress) start() {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *fundedAddress) getReserved() abi.TokenAmount {
|
||||
a.lk.RLock()
|
||||
defer a.lk.RUnlock()
|
||||
|
||||
return a.state.AmtReserved
|
||||
}
|
||||
|
||||
func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return a.requestAndWait(ctx, wallet, amt, &a.reservations)
|
||||
}
|
||||
@ -501,7 +514,13 @@ func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid c
|
||||
// request with an error
|
||||
newWithdrawalAmt := types.BigAdd(withdrawalAmt, amt)
|
||||
if newWithdrawalAmt.GreaterThan(netAvail) {
|
||||
err := xerrors.Errorf("insufficient funds for withdrawal of %d", amt)
|
||||
msg := fmt.Sprintf("insufficient funds for withdrawal of %s: ", types.FIL(amt))
|
||||
msg += fmt.Sprintf("net available (%s) = available (%s) - reserved (%s)",
|
||||
types.FIL(types.BigSub(netAvail, withdrawalAmt)), types.FIL(avail), types.FIL(a.state.AmtReserved))
|
||||
if !withdrawalAmt.IsZero() {
|
||||
msg += fmt.Sprintf(" - queued withdrawals (%s)", types.FIL(withdrawalAmt))
|
||||
}
|
||||
err := xerrors.Errorf(msg)
|
||||
a.debugf("%s", err)
|
||||
req.Complete(cid.Undef, err)
|
||||
continue
|
||||
|
@ -639,7 +639,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
|
||||
func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid, lookbackLimit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
|
||||
msg, err := sm.cs.GetCMessage(mcid)
|
||||
if err != nil {
|
||||
return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err)
|
||||
@ -656,7 +656,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty
|
||||
return head, r, foundMsg, nil
|
||||
}
|
||||
|
||||
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, LookbackNoLimit)
|
||||
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit)
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("failed to look back through chain for message %s", mcid)
|
||||
|
@ -363,7 +363,7 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
|
||||
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
|
||||
// internal state as our new head, if and only if it is heavier than the current
|
||||
// head.
|
||||
// head and does not exceed the maximum fork length.
|
||||
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
cs.heaviestLk.Lock()
|
||||
defer cs.heaviestLk.Unlock()
|
||||
@ -380,6 +380,15 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
||||
// TODO: don't do this for initial sync. Now that we don't have a
|
||||
// difference between 'bootstrap sync' and 'caught up' sync, we need
|
||||
// some other heuristic.
|
||||
|
||||
exceeds, err := cs.exceedsForkLength(cs.heaviest, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exceeds {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cs.takeHeaviestTipSet(ctx, ts)
|
||||
} else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
|
||||
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
|
||||
@ -387,6 +396,67 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if the two tipsets have a fork length above `ForkLengthThreshold`.
|
||||
// `synced` is the head of the chain we are currently synced to and `external`
|
||||
// is the incoming tipset potentially belonging to a forked chain. It assumes
|
||||
// the external chain has already been validated and available in the ChainStore.
|
||||
// The "fast forward" case is covered in this logic as a valid fork of length 0.
|
||||
//
|
||||
// FIXME: We may want to replace some of the logic in `syncFork()` with this.
|
||||
// `syncFork()` counts the length on both sides of the fork at the moment (we
|
||||
// need to settle on that) but here we just enforce it on the `synced` side.
|
||||
func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) {
|
||||
if synced == nil || external == nil {
|
||||
// FIXME: If `cs.heaviest` is nil we should just bypass the entire
|
||||
// `MaybeTakeHeavierTipSet` logic (instead of each of the called
|
||||
// functions having to handle the nil case on their own).
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
// `forkLength`: number of tipsets we need to walk back from the our `synced`
|
||||
// chain to the common ancestor with the new `external` head in order to
|
||||
// adopt the fork.
|
||||
for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ {
|
||||
// First walk back as many tipsets in the external chain to match the
|
||||
// `synced` height to compare them. If we go past the `synced` height
|
||||
// the subsequent match will fail but it will still be useful to get
|
||||
// closer to the `synced` head parent's height in the next loop.
|
||||
for external.Height() > synced.Height() {
|
||||
if external.Height() == 0 {
|
||||
// We reached the genesis of the external chain without a match;
|
||||
// this is considered a fork outside the allowed limit (of "infinite"
|
||||
// length).
|
||||
return true, nil
|
||||
}
|
||||
external, err = cs.LoadTipSet(external.Parents())
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now check if we arrived at the common ancestor.
|
||||
if synced.Equals(external) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If we didn't, go back *one* tipset on the `synced` side (incrementing
|
||||
// the `forkLength`).
|
||||
if synced.Height() == 0 {
|
||||
// Same check as the `external` side, if we reach the start (genesis)
|
||||
// there is no common ancestor.
|
||||
return true, nil
|
||||
}
|
||||
synced, err = cs.LoadTipSet(synced.Parents())
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// We traversed the fork length allowed without finding a common ancestor.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// ForceHeadSilent forces a chain head tipset without triggering a reorg
|
||||
// operation.
|
||||
//
|
||||
@ -524,9 +594,13 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
|
||||
// FlushValidationCache removes all results of block validation from the
|
||||
// chain metadata store. Usually the first step after a new chain import.
|
||||
func (cs *ChainStore) FlushValidationCache() error {
|
||||
return FlushValidationCache(cs.ds)
|
||||
}
|
||||
|
||||
func FlushValidationCache(ds datastore.Batching) error {
|
||||
log.Infof("clearing block validation cache...")
|
||||
|
||||
dsWalk, err := cs.ds.Query(query.Query{
|
||||
dsWalk, err := ds.Query(query.Query{
|
||||
// Potential TODO: the validation cache is not a namespace on its own
|
||||
// but is rather constructed as prefixed-key `foo:bar` via .Instance(), which
|
||||
// in turn does not work with the filter, which can match only on `foo/bar`
|
||||
@ -546,7 +620,7 @@ func (cs *ChainStore) FlushValidationCache() error {
|
||||
return xerrors.Errorf("failed to run key listing query: %w", err)
|
||||
}
|
||||
|
||||
batch, err := cs.ds.Batch()
|
||||
batch, err := ds.Batch()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open a DS batch: %w", err)
|
||||
}
|
||||
|
@ -250,18 +250,6 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
||||
|
||||
syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming)
|
||||
|
||||
if from == syncer.self {
|
||||
// TODO: this is kindof a hack...
|
||||
log.Debug("got block from ourselves")
|
||||
|
||||
if err := syncer.Sync(ctx, fts.TipSet()); err != nil {
|
||||
log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
|
||||
// the blockstore
|
||||
if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil {
|
||||
@ -688,6 +676,10 @@ func blockSanityChecks(h *types.BlockHeader) error {
|
||||
return xerrors.Errorf("block had nil bls aggregate signature")
|
||||
}
|
||||
|
||||
if h.Miner.Protocol() != address.ID {
|
||||
return xerrors.Errorf("block had non-ID miner address")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -117,6 +117,13 @@ func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
|
||||
// get the next sync target; it should be c1 as the heaviest tipset but added last (same weight as c2)
|
||||
bop = <-stc
|
||||
if bop.ts.Equals(c2) {
|
||||
// there's a small race and we might get c2 first.
|
||||
// But we should still end on c1.
|
||||
bop.done()
|
||||
bop = <-stc
|
||||
}
|
||||
|
||||
if !bop.ts.Equals(c1) {
|
||||
t.Fatalf("Expected tipset %s to sync, but got %s", c1, bop.ts)
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ func (f FIL) Unitless() string {
|
||||
var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"}
|
||||
|
||||
func (f FIL) Short() string {
|
||||
n := BigInt(f)
|
||||
n := BigInt(f).Abs()
|
||||
|
||||
dn := uint64(1)
|
||||
var prefix string
|
||||
@ -70,7 +70,7 @@ func (f FIL) UnmarshalText(text []byte) error {
|
||||
}
|
||||
|
||||
func ParseFIL(s string) (FIL, error) {
|
||||
suffix := strings.TrimLeft(s, ".1234567890")
|
||||
suffix := strings.TrimLeft(s, "-.1234567890")
|
||||
s = s[:len(s)-len(suffix)]
|
||||
var attofil bool
|
||||
if suffix != "" {
|
||||
|
@ -57,6 +57,52 @@ func TestFilShort(t *testing.T) {
|
||||
{fil: "0.000221234", expect: "221.234 μFIL"},
|
||||
{fil: "0.0002212344", expect: "221.234 μFIL"},
|
||||
{fil: "0.00022123444", expect: "221.234 μFIL"},
|
||||
|
||||
{fil: "-1", expect: "-1 FIL"},
|
||||
{fil: "-1.1", expect: "-1.1 FIL"},
|
||||
{fil: "-12", expect: "-12 FIL"},
|
||||
{fil: "-123", expect: "-123 FIL"},
|
||||
{fil: "-123456", expect: "-123456 FIL"},
|
||||
{fil: "-123.23", expect: "-123.23 FIL"},
|
||||
{fil: "-123456.234", expect: "-123456.234 FIL"},
|
||||
{fil: "-123456.2341234", expect: "-123456.234 FIL"},
|
||||
{fil: "-123456.234123445", expect: "-123456.234 FIL"},
|
||||
|
||||
{fil: "-0.1", expect: "-100 mFIL"},
|
||||
{fil: "-0.01", expect: "-10 mFIL"},
|
||||
{fil: "-0.001", expect: "-1 mFIL"},
|
||||
|
||||
{fil: "-0.0001", expect: "-100 μFIL"},
|
||||
{fil: "-0.00001", expect: "-10 μFIL"},
|
||||
{fil: "-0.000001", expect: "-1 μFIL"},
|
||||
|
||||
{fil: "-0.0000001", expect: "-100 nFIL"},
|
||||
{fil: "-0.00000001", expect: "-10 nFIL"},
|
||||
{fil: "-0.000000001", expect: "-1 nFIL"},
|
||||
|
||||
{fil: "-0.0000000001", expect: "-100 pFIL"},
|
||||
{fil: "-0.00000000001", expect: "-10 pFIL"},
|
||||
{fil: "-0.000000000001", expect: "-1 pFIL"},
|
||||
|
||||
{fil: "-0.0000000000001", expect: "-100 fFIL"},
|
||||
{fil: "-0.00000000000001", expect: "-10 fFIL"},
|
||||
{fil: "-0.000000000000001", expect: "-1 fFIL"},
|
||||
|
||||
{fil: "-0.0000000000000001", expect: "-100 aFIL"},
|
||||
{fil: "-0.00000000000000001", expect: "-10 aFIL"},
|
||||
{fil: "-0.000000000000000001", expect: "-1 aFIL"},
|
||||
|
||||
{fil: "-0.0000012", expect: "-1.2 μFIL"},
|
||||
{fil: "-0.00000123", expect: "-1.23 μFIL"},
|
||||
{fil: "-0.000001234", expect: "-1.234 μFIL"},
|
||||
{fil: "-0.0000012344", expect: "-1.234 μFIL"},
|
||||
{fil: "-0.00000123444", expect: "-1.234 μFIL"},
|
||||
|
||||
{fil: "-0.0002212", expect: "-221.2 μFIL"},
|
||||
{fil: "-0.00022123", expect: "-221.23 μFIL"},
|
||||
{fil: "-0.000221234", expect: "-221.234 μFIL"},
|
||||
{fil: "-0.0002212344", expect: "-221.234 μFIL"},
|
||||
{fil: "-0.00022123444", expect: "-221.234 μFIL"},
|
||||
} {
|
||||
s := s
|
||||
t.Run(s.fil, func(t *testing.T) {
|
||||
|
@ -43,9 +43,11 @@ import (
|
||||
|
||||
const MaxCallDepth = 4096
|
||||
|
||||
var log = logging.Logger("vm")
|
||||
var actorLog = logging.Logger("actors")
|
||||
var gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
|
||||
var (
|
||||
log = logging.Logger("vm")
|
||||
actorLog = logging.Logger("actors")
|
||||
gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
|
||||
)
|
||||
|
||||
// stat counters
|
||||
var (
|
||||
@ -72,8 +74,10 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad
|
||||
return aast.PubkeyAddress()
|
||||
}
|
||||
|
||||
var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
|
||||
var _ blockstore.Viewer = (*gasChargingBlocks)(nil)
|
||||
var (
|
||||
_ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
|
||||
_ blockstore.Viewer = (*gasChargingBlocks)(nil)
|
||||
)
|
||||
|
||||
type gasChargingBlocks struct {
|
||||
chargeGas func(GasCharge)
|
||||
@ -194,9 +198,11 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtim
|
||||
return vm.VM.makeRuntime(ctx, msg, nil)
|
||||
}
|
||||
|
||||
type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
|
||||
type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
|
||||
type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
|
||||
type (
|
||||
CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
|
||||
NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
|
||||
LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
|
||||
)
|
||||
|
||||
type VM struct {
|
||||
cstate *state.StateTree
|
||||
@ -265,7 +271,6 @@ type ApplyRet struct {
|
||||
|
||||
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
|
||||
gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) {
|
||||
|
||||
defer atomic.AddUint64(&StatSends, 1)
|
||||
|
||||
st := vm.cstate
|
||||
@ -563,7 +568,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
gasUsed = 0
|
||||
}
|
||||
|
||||
burn, err := vm.shouldBurn(st, msg, errcode)
|
||||
burn, err := vm.ShouldBurn(st, msg, errcode)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("deciding whether should burn failed: %w", err)
|
||||
}
|
||||
@ -606,7 +611,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vm *VM) shouldBurn(st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
|
||||
func (vm *VM) ShouldBurn(st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
|
||||
// Check to see if we should burn funds. We avoid burning on successful
|
||||
// window post. This won't catch _indirect_ window post calls, but this
|
||||
// is the best we can get for now.
|
||||
@ -737,7 +742,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err
|
||||
close(freeBufs)
|
||||
}()
|
||||
|
||||
var batch = <-freeBufs
|
||||
batch := <-freeBufs
|
||||
batchCp := func(blk block.Block) error {
|
||||
numBlocks++
|
||||
totalCopySize += len(blk.RawData())
|
||||
|
124
cli/chain.go
124
cli/chain.go
@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -56,6 +57,7 @@ var chainCmd = &cli.Command{
|
||||
chainGasPriceCmd,
|
||||
chainInspectUsage,
|
||||
chainDecodeCmd,
|
||||
chainEncodeCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1106,8 +1108,8 @@ var slashConsensusFault = &cli.Command{
|
||||
ArgsUsage: "[blockCid1 blockCid2]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "miner",
|
||||
Usage: "Miner address",
|
||||
Name: "from",
|
||||
Usage: "optionally specify the account to report consensus from",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "extra",
|
||||
@ -1142,9 +1144,25 @@ var slashConsensusFault = &cli.Command{
|
||||
return xerrors.Errorf("getting block 2: %w", err)
|
||||
}
|
||||
|
||||
def, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
if b1.Miner != b2.Miner {
|
||||
return xerrors.Errorf("block1.miner:%s block2.miner:%s", b1.Miner, b2.Miner)
|
||||
}
|
||||
|
||||
var fromAddr address.Address
|
||||
if from := cctx.String("from"); from == "" {
|
||||
defaddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddr = defaddr
|
||||
} else {
|
||||
addr, err := address.NewFromString(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddr = addr
|
||||
}
|
||||
|
||||
bh1, err := cborutil.Dump(b1)
|
||||
@ -1186,18 +1204,9 @@ var slashConsensusFault = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
if cctx.String("miner") == "" {
|
||||
return xerrors.Errorf("--miner flag is required")
|
||||
}
|
||||
|
||||
maddr, err := address.NewFromString(cctx.String("miner"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
From: def,
|
||||
To: b2.Miner,
|
||||
From: fromAddr,
|
||||
Value: types.NewInt(0),
|
||||
Method: builtin.MethodsMiner.ReportConsensusFault,
|
||||
Params: enc,
|
||||
@ -1320,3 +1329,86 @@ var chainDecodeParamsCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var chainEncodeCmd = &cli.Command{
|
||||
Name: "encode",
|
||||
Usage: "encode various types",
|
||||
Subcommands: []*cli.Command{
|
||||
chainEncodeParamsCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var chainEncodeParamsCmd = &cli.Command{
|
||||
Name: "params",
|
||||
Usage: "Encodes the given JSON params",
|
||||
ArgsUsage: "[toAddr method params]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "tipset",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "encoding",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
if cctx.Args().Len() != 3 {
|
||||
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
|
||||
}
|
||||
|
||||
to, err := address.NewFromString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing toAddr: %w", err)
|
||||
}
|
||||
|
||||
method, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing method id: %w", err)
|
||||
}
|
||||
|
||||
ts, err := LoadTipSet(ctx, cctx, api)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
act, err := api.StateGetActor(ctx, to, ts.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting actor: %w", err)
|
||||
}
|
||||
|
||||
methodMeta, found := stmgr.MethodsMap[act.Code][abi.MethodNum(method)]
|
||||
if !found {
|
||||
return fmt.Errorf("method %d not found on actor %s", method, act.Code)
|
||||
}
|
||||
|
||||
p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler)
|
||||
|
||||
if err := json.Unmarshal([]byte(cctx.Args().Get(2)), p); err != nil {
|
||||
return fmt.Errorf("unmarshaling input into params type: %w", err)
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if err := p.MarshalCBOR(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch cctx.String("encoding") {
|
||||
case "base64":
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(buf.Bytes()))
|
||||
case "hex":
|
||||
fmt.Println(hex.EncodeToString(buf.Bytes()))
|
||||
default:
|
||||
return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ var clientCmd = &cli.Command{
|
||||
WithCategory("retrieval", clientRetrieveCmd),
|
||||
WithCategory("util", clientCommPCmd),
|
||||
WithCategory("util", clientCarGenCmd),
|
||||
WithCategory("util", clientInfoCmd),
|
||||
WithCategory("util", clientBalancesCmd),
|
||||
WithCategory("util", clientListTransfers),
|
||||
WithCategory("util", clientRestartTransfer),
|
||||
WithCategory("util", clientCancelTransfer),
|
||||
@ -1732,9 +1732,9 @@ var clientGetDealCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var clientInfoCmd = &cli.Command{
|
||||
Name: "info",
|
||||
Usage: "Print storage market client information",
|
||||
var clientBalancesCmd = &cli.Command{
|
||||
Name: "balances",
|
||||
Usage: "Print storage market client balances",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "client",
|
||||
@ -1751,7 +1751,7 @@ var clientInfoCmd = &cli.Command{
|
||||
|
||||
var addr address.Address
|
||||
if clientFlag := cctx.String("client"); clientFlag != "" {
|
||||
ca, err := address.NewFromString("client")
|
||||
ca, err := address.NewFromString(clientFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1770,10 +1770,22 @@ var clientInfoCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Client Market Info:\n")
|
||||
reserved, err := api.MarketGetReserved(ctx, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Locked Funds:\t%s\n", types.FIL(balance.Locked))
|
||||
fmt.Printf("Escrowed Funds:\t%s\n", types.FIL(balance.Escrow))
|
||||
avail := big.Sub(big.Sub(balance.Escrow, balance.Locked), reserved)
|
||||
if avail.LessThan(big.Zero()) {
|
||||
avail = big.Zero()
|
||||
}
|
||||
|
||||
fmt.Printf("Client Market Balance for address %s:\n", addr)
|
||||
|
||||
fmt.Printf(" Escrowed Funds: %s\n", types.FIL(balance.Escrow))
|
||||
fmt.Printf(" Locked Funds: %s\n", types.FIL(balance.Locked))
|
||||
fmt.Printf(" Reserved Funds: %s\n", types.FIL(reserved))
|
||||
fmt.Printf(" Available to Withdraw: %s\n", types.FIL(avail))
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -1943,6 +1955,11 @@ var clientListTransfers = &cli.Command{
|
||||
Name: "list-transfers",
|
||||
Usage: "List ongoing data transfers for deals",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "verbose",
|
||||
Aliases: []string{"v"},
|
||||
Usage: "print verbose transfer details",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
@ -1974,6 +1991,7 @@ var clientListTransfers = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
verbose := cctx.Bool("verbose")
|
||||
completed := cctx.Bool("completed")
|
||||
color := cctx.Bool("color")
|
||||
watch := cctx.Bool("watch")
|
||||
@ -1989,7 +2007,7 @@ var clientListTransfers = &cli.Command{
|
||||
|
||||
tm.MoveCursor(1, 1)
|
||||
|
||||
OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed)
|
||||
OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed)
|
||||
|
||||
tm.Flush()
|
||||
|
||||
@ -2014,13 +2032,13 @@ var clientListTransfers = &cli.Command{
|
||||
}
|
||||
}
|
||||
}
|
||||
OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed)
|
||||
OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// OutputDataTransferChannels generates table output for a list of channels
|
||||
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool, showFailed bool) {
|
||||
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, color, showFailed bool) {
|
||||
sort.Slice(channels, func(i, j int) bool {
|
||||
return channels[i].TransferID < channels[j].TransferID
|
||||
})
|
||||
@ -2050,7 +2068,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
||||
tablewriter.Col("Voucher"),
|
||||
tablewriter.NewLineCol("Message"))
|
||||
for _, channel := range sendingChannels {
|
||||
w.Write(toChannelOutput(color, "Sending To", channel))
|
||||
w.Write(toChannelOutput(color, "Sending To", channel, verbose))
|
||||
}
|
||||
w.Flush(out) //nolint:errcheck
|
||||
|
||||
@ -2064,7 +2082,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
||||
tablewriter.Col("Voucher"),
|
||||
tablewriter.NewLineCol("Message"))
|
||||
for _, channel := range receivingChannels {
|
||||
w.Write(toChannelOutput(color, "Receiving From", channel))
|
||||
w.Write(toChannelOutput(color, "Receiving From", channel, verbose))
|
||||
}
|
||||
w.Flush(out) //nolint:errcheck
|
||||
}
|
||||
@ -2085,9 +2103,13 @@ func channelStatusString(useColor bool, status datatransfer.Status) string {
|
||||
}
|
||||
}
|
||||
|
||||
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} {
|
||||
rootCid := ellipsis(channel.BaseCID.String(), 8)
|
||||
otherParty := ellipsis(channel.OtherPeer.String(), 8)
|
||||
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} {
|
||||
rootCid := channel.BaseCID.String()
|
||||
otherParty := channel.OtherPeer.String()
|
||||
if !verbose {
|
||||
rootCid = ellipsis(rootCid, 8)
|
||||
otherParty = ellipsis(otherParty, 8)
|
||||
}
|
||||
|
||||
initiated := "N"
|
||||
if channel.IsInitiator {
|
||||
@ -2095,7 +2117,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
|
||||
}
|
||||
|
||||
voucher := channel.Voucher
|
||||
if len(voucher) > 40 {
|
||||
if len(voucher) > 40 && !verbose {
|
||||
voucher = ellipsis(voucher, 37)
|
||||
}
|
||||
|
||||
|
@ -362,15 +362,15 @@ var mpoolReplaceCmd = &cli.Command{
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "gas-feecap",
|
||||
Usage: "gas feecap for new message",
|
||||
Usage: "gas feecap for new message (burn and pay to miner, attoFIL/GasUnit)",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "gas-premium",
|
||||
Usage: "gas price for new message",
|
||||
Usage: "gas price for new message (pay to miner, attoFIL/GasUnit)",
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "gas-limit",
|
||||
Usage: "gas price for new message",
|
||||
Usage: "gas limit for new message (GasUnit)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "auto",
|
||||
@ -378,7 +378,7 @@ var mpoolReplaceCmd = &cli.Command{
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "max-fee",
|
||||
Usage: "Spend up to X FIL for this message (applicable for auto mode)",
|
||||
Usage: "Spend up to X attoFIL for this message (applicable for auto mode)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "<from nonce> | <message-cid>",
|
||||
|
@ -473,12 +473,12 @@ var msigApproveCmd = &cli.Command{
|
||||
return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID"))
|
||||
}
|
||||
|
||||
if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value> [ <method> <params> ]"))
|
||||
if cctx.Args().Len() > 2 && cctx.Args().Len() < 5 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value>"))
|
||||
}
|
||||
|
||||
if cctx.Args().Len() > 2 && cctx.Args().Len() != 5 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value>"))
|
||||
if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 {
|
||||
return ShowHelp(cctx, fmt.Errorf("usage: msig approve <msig addr> <message ID> <proposer address> <desination> <value> [ <method> <params> ]"))
|
||||
}
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
|
21
cli/send.go
21
cli/send.go
@ -15,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
@ -51,7 +52,7 @@ var sendCmd = &cli.Command{
|
||||
&cli.Uint64Flag{
|
||||
Name: "method",
|
||||
Usage: "specify method to invoke",
|
||||
Value: 0,
|
||||
Value: uint64(builtin.MethodSend),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "params-json",
|
||||
@ -61,6 +62,10 @@ var sendCmd = &cli.Command{
|
||||
Name: "params-hex",
|
||||
Usage: "specify invocation parameters in hex",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "force",
|
||||
Usage: "must be specified for the action to take effect if maybe SysErrInsufficientFunds etc",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 2 {
|
||||
@ -143,6 +148,20 @@ var sendCmd = &cli.Command{
|
||||
Params: params,
|
||||
}
|
||||
|
||||
if !cctx.Bool("force") {
|
||||
// Funds insufficient check
|
||||
fromBalance, err := api.WalletBalance(ctx, msg.From)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
totalCost := types.BigAdd(types.BigMul(msg.GasFeeCap, types.NewInt(uint64(msg.GasLimit))), msg.Value)
|
||||
|
||||
if fromBalance.LessThan(totalCost) {
|
||||
fmt.Printf("WARNING: From balance %s less than total cost %s\n", types.FIL(fromBalance), types.FIL(totalCost))
|
||||
return fmt.Errorf("--force must be specified for this action to have an effect; you have been warned")
|
||||
}
|
||||
}
|
||||
|
||||
if cctx.IsSet("nonce") {
|
||||
msg.Nonce = cctx.Uint64("nonce")
|
||||
sm, err := api.WalletSignMessage(ctx, fromAddr, msg)
|
||||
|
@ -1617,7 +1617,7 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er
|
||||
return nil, fmt.Errorf("unknown method %d for actor %s", method, act)
|
||||
}
|
||||
|
||||
paramObj := methodMeta.Params
|
||||
paramObj := methodMeta.Params.Elem()
|
||||
if paramObj.NumField() != len(args) {
|
||||
return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField())
|
||||
}
|
||||
|
161
cli/wallet.go
161
cli/wallet.go
@ -509,6 +509,7 @@ var walletMarket = &cli.Command{
|
||||
Usage: "Interact with market balances",
|
||||
Subcommands: []*cli.Command{
|
||||
walletMarketWithdraw,
|
||||
walletMarketAdd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -518,13 +519,13 @@ var walletMarketWithdraw = &cli.Command{
|
||||
ArgsUsage: "[amount (FIL) optional, otherwise will withdraw max available]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "Specify address to withdraw funds from, otherwise it will use the default wallet address",
|
||||
Aliases: []string{"f"},
|
||||
Name: "wallet",
|
||||
Usage: "Specify address to withdraw funds to, otherwise it will use the default wallet address",
|
||||
Aliases: []string{"w"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Usage: "Market address to withdraw from (account or miner actor address, defaults to --from address)",
|
||||
Usage: "Market address to withdraw from (account or miner actor address, defaults to --wallet address)",
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
},
|
||||
@ -536,6 +537,123 @@ var walletMarketWithdraw = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
var wallet address.Address
|
||||
if cctx.String("wallet") != "" {
|
||||
wallet, err = address.NewFromString(cctx.String("wallet"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing from address: %w", err)
|
||||
}
|
||||
} else {
|
||||
wallet, err = api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting default wallet address: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
addr := wallet
|
||||
if cctx.String("address") != "" {
|
||||
addr, err = address.NewFromString(cctx.String("address"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing market address: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Work out if there are enough unreserved, unlocked funds to withdraw
|
||||
bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err)
|
||||
}
|
||||
|
||||
reserved, err := api.MarketGetReserved(ctx, addr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting market reserved amount for address %s: %w", addr.String(), err)
|
||||
}
|
||||
|
||||
avail := big.Subtract(big.Subtract(bal.Escrow, bal.Locked), reserved)
|
||||
|
||||
notEnoughErr := func(msg string) error {
|
||||
return xerrors.Errorf("%s; "+
|
||||
"available (%s) = escrow (%s) - locked (%s) - reserved (%s)",
|
||||
msg, types.FIL(avail), types.FIL(bal.Escrow), types.FIL(bal.Locked), types.FIL(reserved))
|
||||
}
|
||||
|
||||
if avail.IsZero() || avail.LessThan(big.Zero()) {
|
||||
avail = big.Zero()
|
||||
return notEnoughErr("no funds available to withdraw")
|
||||
}
|
||||
|
||||
// Default to withdrawing all available funds
|
||||
amt := avail
|
||||
|
||||
// If there was an amount argument, only withdraw that amount
|
||||
if cctx.Args().Present() {
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amt = abi.TokenAmount(f)
|
||||
}
|
||||
|
||||
// Check the amount is positive
|
||||
if amt.IsZero() || amt.LessThan(big.Zero()) {
|
||||
return xerrors.Errorf("amount must be > 0")
|
||||
}
|
||||
|
||||
// Check there are enough available funds
|
||||
if amt.GreaterThan(avail) {
|
||||
msg := fmt.Sprintf("can't withdraw more funds than available; requested: %s", types.FIL(amt))
|
||||
return notEnoughErr(msg)
|
||||
}
|
||||
|
||||
fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), wallet.String())
|
||||
smsg, err := api.MarketWithdraw(ctx, wallet, addr, amt)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fund manager withdraw error: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("WithdrawBalance message cid: %s\n", smsg)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var walletMarketAdd = &cli.Command{
|
||||
Name: "add",
|
||||
Usage: "Add funds to the Storage Market Actor",
|
||||
ArgsUsage: "<amount>",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "Specify address to move funds from, otherwise it will use the default wallet address",
|
||||
Aliases: []string{"f"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Usage: "Market address to move funds to (account or miner actor address, defaults to --from address)",
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting node API: %w", err)
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
// Get amount param
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass amount to add")
|
||||
}
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amt := abi.TokenAmount(f)
|
||||
|
||||
// Get from param
|
||||
var from address.Address
|
||||
if cctx.String("from") != "" {
|
||||
from, err = address.NewFromString(cctx.String("from"))
|
||||
@ -549,6 +667,7 @@ var walletMarketWithdraw = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
// Get address param
|
||||
addr := from
|
||||
if cctx.String("address") != "" {
|
||||
addr, err = address.NewFromString(cctx.String("address"))
|
||||
@ -557,38 +676,14 @@ var walletMarketWithdraw = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK)
|
||||
// Add balance to market actor
|
||||
fmt.Printf("Submitting Add Balance message for amount %s for address %s\n", types.FIL(amt), addr)
|
||||
smsg, err := api.MarketAddBalance(ctx, from, addr, amt)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err)
|
||||
return xerrors.Errorf("add balance error: %w", err)
|
||||
}
|
||||
|
||||
avail := big.Subtract(bal.Escrow, bal.Locked)
|
||||
amt := avail
|
||||
|
||||
if cctx.Args().Present() {
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amt = abi.TokenAmount(f)
|
||||
}
|
||||
|
||||
if amt.GreaterThan(avail) {
|
||||
return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amt), types.FIL(avail))
|
||||
}
|
||||
|
||||
if avail.IsZero() {
|
||||
return xerrors.Errorf("zero unlocked funds available to withdraw")
|
||||
}
|
||||
|
||||
fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), from.String())
|
||||
smsg, err := api.MarketWithdraw(ctx, from, addr, amt)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fund manager withdraw error: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("WithdrawBalance message cid: %s\n", smsg)
|
||||
fmt.Printf("AddBalance message cid: %s\n", smsg)
|
||||
|
||||
return nil
|
||||
},
|
||||
|
@ -27,6 +27,16 @@ func main() {
|
||||
Hidden: true,
|
||||
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "limit",
|
||||
Usage: "spam transaction count limit, <= 0 is no limit",
|
||||
Value: 0,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "rate",
|
||||
Usage: "spam transaction rate, count per second",
|
||||
Value: 5,
|
||||
},
|
||||
},
|
||||
Commands: []*cli.Command{runCmd},
|
||||
}
|
||||
@ -52,11 +62,17 @@ var runCmd = &cli.Command{
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return sendSmallFundsTxs(ctx, api, addr, 5)
|
||||
rate := cctx.Int("rate")
|
||||
if rate <= 0 {
|
||||
rate = 5
|
||||
}
|
||||
limit := cctx.Int("limit")
|
||||
|
||||
return sendSmallFundsTxs(ctx, api, addr, rate, limit)
|
||||
},
|
||||
}
|
||||
|
||||
func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate int) error {
|
||||
func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate, limit int) error {
|
||||
var sendSet []address.Address
|
||||
for i := 0; i < 20; i++ {
|
||||
naddr, err := api.WalletNew(ctx, types.KTSecp256k1)
|
||||
@ -66,9 +82,14 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre
|
||||
|
||||
sendSet = append(sendSet, naddr)
|
||||
}
|
||||
count := limit
|
||||
|
||||
tick := build.Clock.Ticker(time.Second / time.Duration(rate))
|
||||
for {
|
||||
if count <= 0 && limit > 0 {
|
||||
fmt.Printf("%d messages sent.\n", limit)
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-tick.C:
|
||||
msg := &types.Message{
|
||||
@ -81,6 +102,7 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count--
|
||||
fmt.Println("Message sent: ", smsg.Cid())
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
|
@ -40,6 +40,8 @@ import (
|
||||
var log = logging.Logger("lotus-bench")
|
||||
|
||||
type BenchResults struct {
|
||||
EnvVar map[string]string
|
||||
|
||||
SectorSize abi.SectorSize
|
||||
SectorNumber int
|
||||
|
||||
@ -446,6 +448,15 @@ var sealBenchCmd = &cli.Command{
|
||||
bo.VerifyWindowPostHot = verifyWindowpost2.Sub(verifyWindowpost1)
|
||||
}
|
||||
|
||||
bo.EnvVar = make(map[string]string)
|
||||
for _, envKey := range []string{"BELLMAN_NO_GPU", "FIL_PROOFS_MAXIMIZE_CACHING", "FIL_PROOFS_USE_GPU_COLUMN_BUILDER",
|
||||
"FIL_PROOFS_USE_GPU_TREE_BUILDER", "FIL_PROOFS_USE_MULTICORE_SDR", "BELLMAN_CUSTOM_GPU"} {
|
||||
envValue, found := os.LookupEnv(envKey)
|
||||
if found {
|
||||
bo.EnvVar[envKey] = envValue
|
||||
}
|
||||
}
|
||||
|
||||
if c.Bool("json-out") {
|
||||
data, err := json.MarshalIndent(bo, "", " ")
|
||||
if err != nil {
|
||||
@ -454,6 +465,10 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
fmt.Println(string(data))
|
||||
} else {
|
||||
fmt.Println("environment variable list:")
|
||||
for envKey, envValue := range bo.EnvVar {
|
||||
fmt.Printf("%s=%s\n", envKey, envValue)
|
||||
}
|
||||
fmt.Printf("----\nresults (v28) SectorSize:(%d), SectorNumber:(%d)\n", sectorSize, sectorNumber)
|
||||
if robench == "" {
|
||||
fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingSum.AddPiece, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.AddPiece))
|
||||
|
@ -57,6 +57,7 @@ type gatewayDepsAPI interface {
|
||||
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
|
||||
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
||||
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error)
|
||||
StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, h abi.ChainEpoch) (*api.MsgLookup, error)
|
||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error)
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||
@ -299,6 +300,10 @@ func (a *GatewayAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKe
|
||||
return a.api.StateNetworkVersion(ctx, tsk)
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
|
||||
return a.api.StateSearchMsgLimited(ctx, msg, a.stateWaitLookbackLimit)
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||
return a.api.StateWaitMsgLimited(ctx, msg, confidence, a.stateWaitLookbackLimit)
|
||||
}
|
||||
|
@ -622,8 +622,8 @@ var actorControlSet = &cli.Command{
|
||||
|
||||
var actorSetOwnerCmd = &cli.Command{
|
||||
Name: "set-owner",
|
||||
Usage: "Set owner address",
|
||||
ArgsUsage: "[address]",
|
||||
Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)",
|
||||
ArgsUsage: "[newOwnerAddress senderAddress]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "really-do-it",
|
||||
@ -637,8 +637,8 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass address of new owner address")
|
||||
if cctx.NArg() != 2 {
|
||||
return fmt.Errorf("must pass new owner address and sender address")
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
@ -660,7 +660,17 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||
newAddrId, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fa, err := address.NewFromString(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddrId, err := api.StateLookupID(ctx, fa, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -675,13 +685,17 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
sp, err := actors.SerializeParams(&newAddr)
|
||||
if fromAddrId != mi.Owner && fromAddrId != newAddrId {
|
||||
return xerrors.New("from address must either be the old owner or the new owner")
|
||||
}
|
||||
|
||||
sp, err := actors.SerializeParams(&newAddrId)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("serializing params: %w", err)
|
||||
}
|
||||
|
||||
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
||||
From: mi.Owner,
|
||||
From: fromAddrId,
|
||||
To: maddr,
|
||||
Method: miner.Methods.ChangeOwnerAddress,
|
||||
Value: big.Zero(),
|
||||
@ -691,7 +705,7 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
return xerrors.Errorf("mpool push: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Propose Message CID:", smsg.Cid())
|
||||
fmt.Println("Message CID:", smsg.Cid())
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
|
||||
@ -701,34 +715,11 @@ var actorSetOwnerCmd = &cli.Command{
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Println("Propose owner change failed!")
|
||||
fmt.Println("owner change failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
smsg, err = api.MpoolPushMessage(ctx, &types.Message{
|
||||
From: newAddr,
|
||||
To: maddr,
|
||||
Method: miner.Methods.ChangeOwnerAddress,
|
||||
Value: big.Zero(),
|
||||
Params: sp,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("mpool push: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Approve Message CID:", smsg.Cid())
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err = api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Println("Approve owner change failed!")
|
||||
return err
|
||||
}
|
||||
fmt.Println("message succeeded!")
|
||||
|
||||
return nil
|
||||
},
|
||||
|
@ -222,7 +222,7 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
fmt.Printf(" PreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits).Short())
|
||||
fmt.Printf(" Pledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement).Short())
|
||||
fmt.Printf(" Vesting: %s\n", types.FIL(lockedFunds.VestingFunds).Short())
|
||||
color.Green(" Available: %s", types.FIL(availBalance).Short())
|
||||
colorTokenAmount(" Available: %s\n", availBalance)
|
||||
|
||||
mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
@ -232,7 +232,7 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
|
||||
fmt.Printf("Market Balance: %s\n", types.FIL(mb.Escrow).Short())
|
||||
fmt.Printf(" Locked: %s\n", types.FIL(mb.Locked).Short())
|
||||
color.Green(" Available: %s\n", types.FIL(big.Sub(mb.Escrow, mb.Locked)).Short())
|
||||
colorTokenAmount(" Available: %s\n", big.Sub(mb.Escrow, mb.Locked))
|
||||
|
||||
wb, err := api.WalletBalance(ctx, mi.Worker)
|
||||
if err != nil {
|
||||
@ -253,7 +253,7 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
|
||||
fmt.Printf(" Control: %s\n", types.FIL(cbsum).Short())
|
||||
}
|
||||
fmt.Printf("Total Spendable: %s\n", color.YellowString(types.FIL(spendable).Short()))
|
||||
colorTokenAmount("Total Spendable: %s\n", spendable)
|
||||
|
||||
fmt.Println()
|
||||
|
||||
@ -298,6 +298,10 @@ var stateList = []stateMeta{
|
||||
{col: color.FgYellow, state: sealing.CommitWait},
|
||||
{col: color.FgYellow, state: sealing.FinalizeSector},
|
||||
|
||||
{col: color.FgCyan, state: sealing.Terminating},
|
||||
{col: color.FgCyan, state: sealing.TerminateWait},
|
||||
{col: color.FgCyan, state: sealing.TerminateFinality},
|
||||
{col: color.FgCyan, state: sealing.TerminateFailed},
|
||||
{col: color.FgCyan, state: sealing.Removing},
|
||||
{col: color.FgCyan, state: sealing.Removed},
|
||||
|
||||
@ -355,3 +359,13 @@ func sectorsInfo(ctx context.Context, napi api.StorageMiner) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func colorTokenAmount(format string, amount abi.TokenAmount) {
|
||||
if amount.GreaterThan(big.Zero()) {
|
||||
color.Green(format, types.FIL(amount).Short())
|
||||
} else if amount.Equals(big.Zero()) {
|
||||
color.Yellow(format, types.FIL(amount).Short())
|
||||
} else {
|
||||
color.Red(format, types.FIL(amount).Short())
|
||||
}
|
||||
}
|
||||
|
@ -451,7 +451,7 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose
|
||||
w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0)
|
||||
|
||||
if verbose {
|
||||
_, _ = fmt.Fprintf(w, "Creation\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tTransferChannelID\tMessage\n")
|
||||
_, _ = fmt.Fprintf(w, "Creation\tVerified\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tTransferChannelID\tMessage\n")
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(w, "ProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\n")
|
||||
}
|
||||
@ -465,7 +465,7 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose
|
||||
fil := types.FIL(types.BigMul(deal.Proposal.StoragePricePerEpoch, types.NewInt(uint64(deal.Proposal.Duration()))))
|
||||
|
||||
if verbose {
|
||||
_, _ = fmt.Fprintf(w, "%s\t", deal.CreationTime.Time().Format(time.Stamp))
|
||||
_, _ = fmt.Fprintf(w, "%s\t%t\t", deal.CreationTime.Time().Format(time.Stamp), deal.Proposal.VerifiedDeal)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s", propcid, deal.DealID, storagemarket.DealStates[deal.State], deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize)), fil, deal.Proposal.Duration())
|
||||
@ -744,6 +744,11 @@ var transfersListCmd = &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List ongoing data transfers for this miner",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "verbose",
|
||||
Aliases: []string{"v"},
|
||||
Usage: "print verbose transfer details",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
@ -775,6 +780,7 @@ var transfersListCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
verbose := cctx.Bool("verbose")
|
||||
completed := cctx.Bool("completed")
|
||||
color := cctx.Bool("color")
|
||||
watch := cctx.Bool("watch")
|
||||
@ -790,7 +796,7 @@ var transfersListCmd = &cli.Command{
|
||||
|
||||
tm.MoveCursor(1, 1)
|
||||
|
||||
lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed)
|
||||
lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed)
|
||||
|
||||
tm.Flush()
|
||||
|
||||
@ -815,7 +821,7 @@ var transfersListCmd = &cli.Command{
|
||||
}
|
||||
}
|
||||
}
|
||||
lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed)
|
||||
lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
for parIdx, par := range partitions {
|
||||
sectors := make(map[abi.SectorNumber]struct{})
|
||||
|
||||
sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.AllSectors, types.EmptyTSK)
|
||||
sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsRefsCmd,
|
||||
sectorsUpdateCmd,
|
||||
sectorsPledgeCmd,
|
||||
sectorsTerminateCmd,
|
||||
sectorsRemoveCmd,
|
||||
sectorsMarkForUpgradeCmd,
|
||||
sectorsStartSealCmd,
|
||||
@ -396,9 +397,123 @@ var sectorsRefsCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsTerminateCmd = &cli.Command{
|
||||
Name: "terminate",
|
||||
Usage: "Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "really-do-it",
|
||||
Usage: "pass this flag if you know what you are doing",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
sectorsTerminateFlushCmd,
|
||||
sectorsTerminatePendingCmd,
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Bool("really-do-it") {
|
||||
return xerrors.Errorf("pass --really-do-it to confirm this action")
|
||||
}
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
if cctx.Args().Len() != 1 {
|
||||
return xerrors.Errorf("must pass sector number")
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return nodeApi.SectorTerminate(ctx, abi.SectorNumber(id))
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsTerminateFlushCmd = &cli.Command{
|
||||
Name: "flush",
|
||||
Usage: "Send a terminate message if there are sectors queued for termination",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
mcid, err := nodeApi.SectorTerminateFlush(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mcid == nil {
|
||||
return xerrors.New("no sectors were queued for termination")
|
||||
}
|
||||
|
||||
fmt.Println(mcid)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsTerminatePendingCmd = &cli.Command{
|
||||
Name: "pending",
|
||||
Usage: "List sector numbers of sectors pending termination",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer nCloser()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
pending, err := nodeApi.SectorTerminatePending(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maddr, err := nodeApi.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dl, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting proving deadline info failed: %w", err)
|
||||
}
|
||||
|
||||
for _, id := range pending {
|
||||
loc, err := api.StateSectorPartition(ctx, maddr, id.Number, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding sector partition: %w", err)
|
||||
}
|
||||
|
||||
fmt.Print(id.Number)
|
||||
|
||||
if loc.Deadline == (dl.Index+1)%miner.WPoStPeriodDeadlines || // not in next (in case the terminate message takes a while to get on chain)
|
||||
loc.Deadline == dl.Index || // not in current
|
||||
(loc.Deadline+1)%miner.WPoStPeriodDeadlines == dl.Index { // not in previous
|
||||
fmt.Print(" (in proving window)")
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsRemoveCmd = &cli.Command{
|
||||
Name: "remove",
|
||||
Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector)",
|
||||
Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
|
@ -1,14 +1,121 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/cheggaaa/pb.v1"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/backupds"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
var backupCmd = lcli.BackupCmd("repo", repo.FullNode, func(cctx *cli.Context) (lcli.BackupAPI, jsonrpc.ClientCloser, error) {
|
||||
return lcli.GetFullNodeAPI(cctx)
|
||||
})
|
||||
|
||||
func restore(cctx *cli.Context, r repo.Repo) error {
|
||||
bf, err := homedir.Expand(cctx.Path("restore"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expand backup file path: %w", err)
|
||||
}
|
||||
|
||||
st, err := os.Stat(bf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat backup file (%s): %w", bf, err)
|
||||
}
|
||||
|
||||
f, err := os.Open(bf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening backup file: %w", err)
|
||||
}
|
||||
defer f.Close() // nolint:errcheck
|
||||
|
||||
lr, err := r.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lr.Close() // nolint:errcheck
|
||||
|
||||
if cctx.IsSet("restore-config") {
|
||||
log.Info("Restoring config")
|
||||
|
||||
cf, err := homedir.Expand(cctx.String("restore-config"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding config path: %w", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(cf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat config file (%s): %w", cf, err)
|
||||
}
|
||||
|
||||
var cerr error
|
||||
err = lr.SetConfig(func(raw interface{}) {
|
||||
rcfg, ok := raw.(*config.FullNode)
|
||||
if !ok {
|
||||
cerr = xerrors.New("expected miner config")
|
||||
return
|
||||
}
|
||||
|
||||
ff, err := config.FromFile(cf, rcfg)
|
||||
if err != nil {
|
||||
cerr = xerrors.Errorf("loading config: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
*rcfg = *ff.(*config.FullNode)
|
||||
})
|
||||
if cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting config: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Warn("--restore-config NOT SET, WILL USE DEFAULT VALUES")
|
||||
}
|
||||
|
||||
log.Info("Restoring metadata backup")
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bar := pb.New64(st.Size())
|
||||
br := bar.NewProxyReader(f)
|
||||
bar.ShowTimeLeft = true
|
||||
bar.ShowPercent = true
|
||||
bar.ShowSpeed = true
|
||||
bar.Units = pb.U_BYTES
|
||||
|
||||
bar.Start()
|
||||
err = backupds.RestoreInto(br, mds)
|
||||
bar.Finish()
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("restoring metadata: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Resetting chainstore metadata")
|
||||
|
||||
chainHead := dstore.NewKey("head")
|
||||
if err := mds.Delete(chainHead); err != nil {
|
||||
return xerrors.Errorf("clearing chain head: %w", err)
|
||||
}
|
||||
if err := store.FlushValidationCache(mds); err != nil {
|
||||
return xerrors.Errorf("clearing chain validation cache: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -144,6 +144,14 @@ var DaemonCmd = &cli.Command{
|
||||
Name: "api-max-req-size",
|
||||
Usage: "maximum API request size accepted by the JSON RPC server",
|
||||
},
|
||||
&cli.PathFlag{
|
||||
Name: "restore",
|
||||
Usage: "restore from backup file",
|
||||
},
|
||||
&cli.PathFlag{
|
||||
Name: "restore-config",
|
||||
Usage: "config file to use when restoring from backup",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
isLite := cctx.Bool("lite")
|
||||
@ -203,9 +211,11 @@ var DaemonCmd = &cli.Command{
|
||||
r.SetConfigPath(cctx.String("config"))
|
||||
}
|
||||
|
||||
if err := r.Init(repo.FullNode); err != nil && err != repo.ErrRepoExists {
|
||||
err = r.Init(repo.FullNode)
|
||||
if err != nil && err != repo.ErrRepoExists {
|
||||
return xerrors.Errorf("repo init error: %w", err)
|
||||
}
|
||||
freshRepo := err != repo.ErrRepoExists
|
||||
|
||||
if !isLite {
|
||||
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil {
|
||||
@ -223,6 +233,15 @@ var DaemonCmd = &cli.Command{
|
||||
genBytes = build.MaybeGenesis()
|
||||
}
|
||||
|
||||
if cctx.IsSet("restore") {
|
||||
if !freshRepo {
|
||||
return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
|
||||
}
|
||||
if err := restore(cctx, r); err != nil {
|
||||
return xerrors.Errorf("restoring from backup: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
chainfile := cctx.String("import-chain")
|
||||
snapshot := cctx.String("import-snapshot")
|
||||
if chainfile != "" || snapshot != "" {
|
||||
|
@ -18,7 +18,7 @@ func TestProtocolCodenames(t *testing.T) {
|
||||
t.Fatal("expected breeze codename")
|
||||
}
|
||||
|
||||
if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(height) != "actorsv2" {
|
||||
if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(abi.ChainEpoch(height)) != "actorsv2" {
|
||||
t.Fatal("expected actorsv2 codename")
|
||||
}
|
||||
|
||||
|
165
cmd/tvx/exec.go
165
cmd/tvx/exec.go
@ -1,33 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/filecoin-project/go-address"
|
||||
cbornode "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/conformance"
|
||||
|
||||
"github.com/filecoin-project/test-vectors/schema"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/conformance"
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
)
|
||||
|
||||
var execFlags struct {
|
||||
file string
|
||||
out string
|
||||
driverOpts cli.StringSlice
|
||||
fallbackBlockstore bool
|
||||
}
|
||||
|
||||
const (
|
||||
optSaveBalances = "save-balances"
|
||||
)
|
||||
|
||||
var execCmd = &cli.Command{
|
||||
Name: "exec",
|
||||
Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, or a ndjson stdin stream",
|
||||
Action: runExecLotus,
|
||||
Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, a directory, or a ndjson stdin stream",
|
||||
Action: runExec,
|
||||
Flags: []cli.Flag{
|
||||
&repoFlag,
|
||||
&cli.StringFlag{
|
||||
Name: "file",
|
||||
Usage: "input file; if not supplied, the vector will be read from stdin",
|
||||
Usage: "input file or directory; if not supplied, the vector will be read from stdin",
|
||||
TakesFile: true,
|
||||
Destination: &execFlags.file,
|
||||
},
|
||||
@ -36,10 +51,20 @@ var execCmd = &cli.Command{
|
||||
Usage: "sets the full node API as a fallback blockstore; use this if you're transplanting vectors and get block not found errors",
|
||||
Destination: &execFlags.fallbackBlockstore,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "out",
|
||||
Usage: "output directory where to save the results, only used when the input is a directory",
|
||||
Destination: &execFlags.out,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "driver-opt",
|
||||
Usage: "comma-separated list of driver options (EXPERIMENTAL; will change), supported: 'save-balances=<dst>', 'pipeline-basefee' (unimplemented); only available in single-file mode",
|
||||
Destination: &execFlags.driverOpts,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func runExecLotus(c *cli.Context) error {
|
||||
func runExec(c *cli.Context) error {
|
||||
if execFlags.fallbackBlockstore {
|
||||
if err := initialize(c); err != nil {
|
||||
return fmt.Errorf("fallback blockstore was enabled, but could not resolve lotus API endpoint: %w", err)
|
||||
@ -48,30 +73,97 @@ func runExecLotus(c *cli.Context) error {
|
||||
conformance.FallbackBlockstoreGetter = FullAPI
|
||||
}
|
||||
|
||||
if file := execFlags.file; file != "" {
|
||||
// we have a single test vector supplied as a file.
|
||||
file, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open test vector: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
dec = json.NewDecoder(file)
|
||||
tv schema.TestVector
|
||||
)
|
||||
|
||||
if err = dec.Decode(&tv); err != nil {
|
||||
return fmt.Errorf("failed to decode test vector: %w", err)
|
||||
}
|
||||
|
||||
return executeTestVector(tv)
|
||||
path := execFlags.file
|
||||
if path == "" {
|
||||
return execVectorsStdin()
|
||||
}
|
||||
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
// we're in directory mode; ensure the out directory exists.
|
||||
outdir := execFlags.out
|
||||
if outdir == "" {
|
||||
return fmt.Errorf("no output directory provided")
|
||||
}
|
||||
if err := ensureDir(outdir); err != nil {
|
||||
return err
|
||||
}
|
||||
return execVectorDir(path, outdir)
|
||||
}
|
||||
|
||||
// process tipset vector options.
|
||||
if err := processTipsetOpts(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = execVectorFile(new(conformance.LogReporter), path)
|
||||
return err
|
||||
}
|
||||
|
||||
func processTipsetOpts() error {
|
||||
for _, opt := range execFlags.driverOpts.Value() {
|
||||
switch ss := strings.Split(opt, "="); {
|
||||
case ss[0] == optSaveBalances:
|
||||
filename := ss[1]
|
||||
log.Printf("saving balances after each tipset in: %s", filename)
|
||||
balancesFile, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := bufio.NewWriter(balancesFile)
|
||||
cb := func(bs blockstore.Blockstore, params *conformance.ExecuteTipsetParams, res *conformance.ExecuteTipsetResult) {
|
||||
cst := cbornode.NewCborStore(bs)
|
||||
st, err := state.LoadStateTree(cst, res.PostStateRoot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = st.ForEach(func(addr address.Address, actor *types.Actor) error {
|
||||
_, err := fmt.Fprintln(w, params.ExecEpoch, addr, actor.Balance)
|
||||
return err
|
||||
})
|
||||
_ = w.Flush()
|
||||
}
|
||||
conformance.TipsetVectorOpts.OnTipsetApplied = append(conformance.TipsetVectorOpts.OnTipsetApplied, cb)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func execVectorDir(path string, outdir string) error {
|
||||
files, err := filepath.Glob(filepath.Join(path, "*"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to glob input directory %s: %w", path, err)
|
||||
}
|
||||
for _, f := range files {
|
||||
outfile := strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)) + ".out"
|
||||
outpath := filepath.Join(outdir, outfile)
|
||||
outw, err := os.Create(outpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file %s: %w", outpath, err)
|
||||
}
|
||||
|
||||
log.Printf("processing vector %s; sending output to %s", f, outpath)
|
||||
log.SetOutput(io.MultiWriter(os.Stderr, outw)) // tee the output.
|
||||
_, _ = execVectorFile(new(conformance.LogReporter), f)
|
||||
log.SetOutput(os.Stderr)
|
||||
_ = outw.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func execVectorsStdin() error {
|
||||
r := new(conformance.LogReporter)
|
||||
for dec := json.NewDecoder(os.Stdin); ; {
|
||||
var tv schema.TestVector
|
||||
switch err := dec.Decode(&tv); err {
|
||||
case nil:
|
||||
if err = executeTestVector(tv); err != nil {
|
||||
if _, err = executeTestVector(r, tv); err != nil {
|
||||
return err
|
||||
}
|
||||
case io.EOF:
|
||||
@ -84,19 +176,30 @@ func runExecLotus(c *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func executeTestVector(tv schema.TestVector) error {
|
||||
func execVectorFile(r conformance.Reporter, path string) (diffs []string, error error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open test vector: %w", err)
|
||||
}
|
||||
|
||||
var tv schema.TestVector
|
||||
if err = json.NewDecoder(file).Decode(&tv); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode test vector: %w", err)
|
||||
}
|
||||
return executeTestVector(r, tv)
|
||||
}
|
||||
|
||||
func executeTestVector(r conformance.Reporter, tv schema.TestVector) (diffs []string, err error) {
|
||||
log.Println("executing test vector:", tv.Meta.ID)
|
||||
|
||||
for _, v := range tv.Pre.Variants {
|
||||
r := new(conformance.LogReporter)
|
||||
|
||||
switch class, v := tv.Class, v; class {
|
||||
case "message":
|
||||
conformance.ExecuteMessageVector(r, &tv, &v)
|
||||
diffs, err = conformance.ExecuteMessageVector(r, &tv, &v)
|
||||
case "tipset":
|
||||
conformance.ExecuteTipsetVector(r, &tv, &v)
|
||||
diffs, err = conformance.ExecuteTipsetVector(r, &tv, &v)
|
||||
default:
|
||||
return fmt.Errorf("test vector class %s not supported", class)
|
||||
return nil, fmt.Errorf("test vector class %s not supported", class)
|
||||
}
|
||||
|
||||
if r.Failed() {
|
||||
@ -106,5 +209,5 @@ func executeTestVector(tv schema.TestVector) error {
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return diffs, err
|
||||
}
|
||||
|
@ -1,8 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/filecoin-project/test-vectors/schema"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@ -21,6 +27,7 @@ type extractOpts struct {
|
||||
retain string
|
||||
precursor string
|
||||
ignoreSanityChecks bool
|
||||
squash bool
|
||||
}
|
||||
|
||||
var extractFlags extractOpts
|
||||
@ -62,13 +69,13 @@ var extractCmd = &cli.Command{
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "tsk",
|
||||
Usage: "tipset key to extract into a vector",
|
||||
Usage: "tipset key to extract into a vector, or range of tipsets in tsk1..tsk2 form",
|
||||
Destination: &extractFlags.tsk,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "out",
|
||||
Aliases: []string{"o"},
|
||||
Usage: "file to write test vector to",
|
||||
Usage: "file to write test vector to, or directory to write the batch to",
|
||||
Destination: &extractFlags.file,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@ -93,6 +100,12 @@ var extractCmd = &cli.Command{
|
||||
Value: false,
|
||||
Destination: &extractFlags.ignoreSanityChecks,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "squash",
|
||||
Usage: "when extracting a tipset range, squash all tipsets into a single vector",
|
||||
Value: false,
|
||||
Destination: &extractFlags.squash,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -106,3 +119,43 @@ func runExtract(_ *cli.Context) error {
|
||||
return fmt.Errorf("unsupported vector class")
|
||||
}
|
||||
}
|
||||
|
||||
// writeVector writes the vector into the specified file, or to stdout if
|
||||
// file is empty.
|
||||
func writeVector(vector *schema.TestVector, file string) (err error) {
|
||||
output := io.WriteCloser(os.Stdout)
|
||||
if file := file; file != "" {
|
||||
dir := filepath.Dir(file)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("unable to create directory %s: %w", dir, err)
|
||||
}
|
||||
output, err = os.Create(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close() //nolint:errcheck
|
||||
defer log.Printf("wrote test vector to file: %s", file)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(output)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(&vector)
|
||||
}
|
||||
|
||||
// writeVectors writes each vector to a different file under the specified
|
||||
// directory.
|
||||
func writeVectors(dir string, vectors ...*schema.TestVector) error {
|
||||
// verify the output directory exists.
|
||||
if err := ensureDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
// write each vector to its file.
|
||||
for _, v := range vectors {
|
||||
id := v.Meta.ID
|
||||
path := filepath.Join(dir, fmt.Sprintf("%s.json", id))
|
||||
if err := writeVector(v, path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -4,12 +4,9 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -316,28 +313,7 @@ func doExtractMessage(opts extractOpts) error {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return writeVector(vector, opts.file)
|
||||
}
|
||||
|
||||
func writeVector(vector schema.TestVector, file string) (err error) {
|
||||
output := io.WriteCloser(os.Stdout)
|
||||
if file := file; file != "" {
|
||||
dir := filepath.Dir(file)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("unable to create directory %s: %w", dir, err)
|
||||
}
|
||||
output, err = os.Create(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close() //nolint:errcheck
|
||||
defer log.Printf("wrote test vector to file: %s", file)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(output)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(&vector)
|
||||
return writeVector(&vector, opts.file)
|
||||
}
|
||||
|
||||
// resolveFromChain queries the chain for the provided message, using the block CID to
|
||||
|
@ -6,10 +6,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/filecoin-project/test-vectors/schema"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/conformance"
|
||||
)
|
||||
@ -17,170 +19,259 @@ import (
|
||||
func doExtractTipset(opts extractOpts) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if opts.tsk == "" {
|
||||
return fmt.Errorf("tipset key cannot be empty")
|
||||
}
|
||||
|
||||
if opts.retain != "accessed-cids" {
|
||||
return fmt.Errorf("tipset extraction only supports 'accessed-cids' state retention")
|
||||
}
|
||||
|
||||
ts, err := lcli.ParseTipSetRef(ctx, FullAPI, opts.tsk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tipset: %w", err)
|
||||
if opts.tsk == "" {
|
||||
return fmt.Errorf("tipset key cannot be empty")
|
||||
}
|
||||
|
||||
log.Printf("tipset block count: %d", len(ts.Blocks()))
|
||||
|
||||
var blocks []schema.Block
|
||||
for _, b := range ts.Blocks() {
|
||||
msgs, err := FullAPI.ChainGetBlockMessages(ctx, b.Cid())
|
||||
ss := strings.Split(opts.tsk, "..")
|
||||
switch len(ss) {
|
||||
case 1: // extracting a single tipset.
|
||||
ts, err := lcli.ParseTipSetRef(ctx, FullAPI, opts.tsk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get block messages (cid: %s): %w", b.Cid(), err)
|
||||
return fmt.Errorf("failed to fetch tipset: %w", err)
|
||||
}
|
||||
v, err := extractTipsets(ctx, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeVector(v, opts.file)
|
||||
|
||||
case 2: // extracting a range of tipsets.
|
||||
left, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tipset %s: %w", ss[0], err)
|
||||
}
|
||||
right, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch tipset %s: %w", ss[1], err)
|
||||
}
|
||||
|
||||
log.Printf("block %s has %d messages", b.Cid(), len(msgs.Cids))
|
||||
// resolve the tipset range.
|
||||
tss, err := resolveTipsetRange(ctx, left, right)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
packed := make([]schema.Base64EncodedBytes, 0, len(msgs.Cids))
|
||||
for _, m := range msgs.BlsMessages {
|
||||
b, err := m.Serialize()
|
||||
// are are squashing all tipsets into a single multi-tipset vector?
|
||||
if opts.squash {
|
||||
vector, err := extractTipsets(ctx, tss...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize message: %w", err)
|
||||
return err
|
||||
}
|
||||
packed = append(packed, b)
|
||||
return writeVector(vector, opts.file)
|
||||
}
|
||||
for _, m := range msgs.SecpkMessages {
|
||||
b, err := m.Message.Serialize()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize message: %w", err)
|
||||
}
|
||||
packed = append(packed, b)
|
||||
|
||||
// we are generating a single-tipset vector per tipset.
|
||||
vectors, err := extractIndividualTipsets(ctx, tss...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blocks = append(blocks, schema.Block{
|
||||
MinerAddr: b.Miner,
|
||||
WinCount: b.ElectionProof.WinCount,
|
||||
Messages: packed,
|
||||
})
|
||||
return writeVectors(opts.file, vectors...)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unrecognized tipset format")
|
||||
}
|
||||
}
|
||||
|
||||
func resolveTipsetRange(ctx context.Context, left *types.TipSet, right *types.TipSet) (tss []*types.TipSet, err error) {
|
||||
// start from the right tipset and walk back the chain until the left tipset, inclusive.
|
||||
for curr := right; curr.Key() != left.Parents(); {
|
||||
tss = append(tss, curr)
|
||||
curr, err = FullAPI.ChainGetTipSet(ctx, curr.Parents())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tipset %s (height: %d): %w", curr.Parents(), curr.Height()-1, err)
|
||||
}
|
||||
}
|
||||
// reverse the slice.
|
||||
for i, j := 0, len(tss)-1; i < j; i, j = i+1, j-1 {
|
||||
tss[i], tss[j] = tss[j], tss[i]
|
||||
}
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func extractIndividualTipsets(ctx context.Context, tss ...*types.TipSet) (vectors []*schema.TestVector, err error) {
|
||||
for _, ts := range tss {
|
||||
v, err := extractTipsets(ctx, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vectors = append(vectors, v)
|
||||
}
|
||||
return vectors, nil
|
||||
}
|
||||
|
||||
func extractTipsets(ctx context.Context, tss ...*types.TipSet) (*schema.TestVector, error) {
|
||||
var (
|
||||
// create a read-through store that uses ChainGetObject to fetch unknown CIDs.
|
||||
pst = NewProxyingStores(ctx, FullAPI)
|
||||
g = NewSurgeon(ctx, FullAPI, pst)
|
||||
|
||||
// recordingRand will record randomness so we can embed it in the test vector.
|
||||
recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI)
|
||||
)
|
||||
|
||||
tbs, ok := pst.Blockstore.(TracingBlockstore)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present")
|
||||
}
|
||||
|
||||
driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{
|
||||
DisableVMFlush: true,
|
||||
})
|
||||
|
||||
base := tss[0]
|
||||
last := tss[len(tss)-1]
|
||||
|
||||
// this is the root of the state tree we start with.
|
||||
root := ts.ParentState()
|
||||
root := base.ParentState()
|
||||
log.Printf("base state tree root CID: %s", root)
|
||||
|
||||
basefee := ts.Blocks()[0].ParentBaseFee
|
||||
log.Printf("basefee: %s", basefee)
|
||||
|
||||
tipset := schema.Tipset{
|
||||
BaseFee: *basefee.Int,
|
||||
Blocks: blocks,
|
||||
codename := GetProtocolCodename(base.Height())
|
||||
nv, err := FullAPI.StateNetworkVersion(ctx, base.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// recordingRand will record randomness so we can embed it in the test vector.
|
||||
recordingRand := conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI)
|
||||
version, err := FullAPI.Version(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("using state retention strategy: %s", extractFlags.retain)
|
||||
ntwkName, err := FullAPI.StateNetworkName(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tbs, ok := pst.Blockstore.(TracingBlockstore)
|
||||
if !ok {
|
||||
return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present")
|
||||
vector := schema.TestVector{
|
||||
Class: schema.ClassTipset,
|
||||
Meta: &schema.Metadata{
|
||||
ID: fmt.Sprintf("@%d..@%d", base.Height(), last.Height()),
|
||||
Gen: []schema.GenerationData{
|
||||
{Source: fmt.Sprintf("network:%s", ntwkName)},
|
||||
{Source: "github.com/filecoin-project/lotus", Version: version.String()}},
|
||||
// will be completed by extra tipset stamps.
|
||||
},
|
||||
Selector: schema.Selector{
|
||||
schema.SelectorMinProtocolVersion: codename,
|
||||
},
|
||||
Pre: &schema.Preconditions{
|
||||
Variants: []schema.Variant{
|
||||
{ID: codename, Epoch: int64(base.Height()), NetworkVersion: uint(nv)},
|
||||
},
|
||||
StateTree: &schema.StateTree{
|
||||
RootCID: base.ParentState(),
|
||||
},
|
||||
},
|
||||
Post: &schema.Postconditions{
|
||||
StateTree: new(schema.StateTree),
|
||||
},
|
||||
}
|
||||
|
||||
tbs.StartTracing()
|
||||
|
||||
params := conformance.ExecuteTipsetParams{
|
||||
Preroot: ts.ParentState(),
|
||||
ParentEpoch: ts.Height() - 1,
|
||||
Tipset: &tipset,
|
||||
ExecEpoch: ts.Height(),
|
||||
Rand: recordingRand,
|
||||
}
|
||||
result, err := driver.ExecuteTipset(pst.Blockstore, pst.Datastore, params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute tipset: %w", err)
|
||||
roots := []cid.Cid{base.ParentState()}
|
||||
for i, ts := range tss {
|
||||
log.Printf("tipset %s block count: %d", ts.Key(), len(ts.Blocks()))
|
||||
|
||||
var blocks []schema.Block
|
||||
for _, b := range ts.Blocks() {
|
||||
msgs, err := FullAPI.ChainGetBlockMessages(ctx, b.Cid())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get block messages (cid: %s): %w", b.Cid(), err)
|
||||
}
|
||||
|
||||
log.Printf("block %s has %d messages", b.Cid(), len(msgs.Cids))
|
||||
|
||||
packed := make([]schema.Base64EncodedBytes, 0, len(msgs.Cids))
|
||||
for _, m := range msgs.BlsMessages {
|
||||
b, err := m.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize message: %w", err)
|
||||
}
|
||||
packed = append(packed, b)
|
||||
}
|
||||
for _, m := range msgs.SecpkMessages {
|
||||
b, err := m.Message.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize message: %w", err)
|
||||
}
|
||||
packed = append(packed, b)
|
||||
}
|
||||
blocks = append(blocks, schema.Block{
|
||||
MinerAddr: b.Miner,
|
||||
WinCount: b.ElectionProof.WinCount,
|
||||
Messages: packed,
|
||||
})
|
||||
}
|
||||
|
||||
basefee := base.Blocks()[0].ParentBaseFee
|
||||
log.Printf("tipset basefee: %s", basefee)
|
||||
|
||||
tipset := schema.Tipset{
|
||||
BaseFee: *basefee.Int,
|
||||
Blocks: blocks,
|
||||
EpochOffset: int64(i),
|
||||
}
|
||||
|
||||
params := conformance.ExecuteTipsetParams{
|
||||
Preroot: roots[len(roots)-1],
|
||||
ParentEpoch: ts.Height() - 1,
|
||||
Tipset: &tipset,
|
||||
ExecEpoch: ts.Height(),
|
||||
Rand: recordingRand,
|
||||
}
|
||||
|
||||
result, err := driver.ExecuteTipset(pst.Blockstore, pst.Datastore, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute tipset: %w", err)
|
||||
}
|
||||
|
||||
roots = append(roots, result.PostStateRoot)
|
||||
|
||||
// update the vector.
|
||||
vector.ApplyTipsets = append(vector.ApplyTipsets, tipset)
|
||||
vector.Post.ReceiptsRoots = append(vector.Post.ReceiptsRoots, result.ReceiptsRoot)
|
||||
|
||||
for _, res := range result.AppliedResults {
|
||||
vector.Post.Receipts = append(vector.Post.Receipts, &schema.Receipt{
|
||||
ExitCode: int64(res.ExitCode),
|
||||
ReturnValue: res.Return,
|
||||
GasUsed: res.GasUsed,
|
||||
})
|
||||
}
|
||||
|
||||
vector.Meta.Gen = append(vector.Meta.Gen, schema.GenerationData{
|
||||
Source: "tipset:" + ts.Key().String(),
|
||||
})
|
||||
}
|
||||
|
||||
accessed := tbs.FinishTracing()
|
||||
|
||||
//
|
||||
// ComputeBaseFee(ctx, baseTs)
|
||||
|
||||
// write a CAR with the accessed state into a buffer.
|
||||
var (
|
||||
out = new(bytes.Buffer)
|
||||
gw = gzip.NewWriter(out)
|
||||
)
|
||||
if err := g.WriteCARIncluding(gw, accessed, ts.ParentState(), result.PostStateRoot); err != nil {
|
||||
return err
|
||||
if err := g.WriteCARIncluding(gw, accessed, roots...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = gw.Flush(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err = gw.Close(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
codename := GetProtocolCodename(ts.Height())
|
||||
nv, err := FullAPI.StateNetworkVersion(ctx, ts.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vector.Randomness = recordingRand.Recorded()
|
||||
vector.Post.StateTree.RootCID = roots[len(roots)-1]
|
||||
vector.CAR = out.Bytes()
|
||||
|
||||
version, err := FullAPI.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ntwkName, err := FullAPI.StateNetworkName(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vector := schema.TestVector{
|
||||
Class: schema.ClassTipset,
|
||||
Meta: &schema.Metadata{
|
||||
ID: opts.id,
|
||||
Gen: []schema.GenerationData{
|
||||
{Source: fmt.Sprintf("network:%s", ntwkName)},
|
||||
{Source: fmt.Sprintf("tipset:%s", ts.Key())},
|
||||
{Source: "github.com/filecoin-project/lotus", Version: version.String()}},
|
||||
},
|
||||
Selector: schema.Selector{
|
||||
schema.SelectorMinProtocolVersion: codename,
|
||||
},
|
||||
Randomness: recordingRand.Recorded(),
|
||||
CAR: out.Bytes(),
|
||||
Pre: &schema.Preconditions{
|
||||
Variants: []schema.Variant{
|
||||
{ID: codename, Epoch: int64(ts.Height()), NetworkVersion: uint(nv)},
|
||||
},
|
||||
BaseFee: basefee.Int,
|
||||
StateTree: &schema.StateTree{
|
||||
RootCID: ts.ParentState(),
|
||||
},
|
||||
},
|
||||
ApplyTipsets: []schema.Tipset{tipset},
|
||||
Post: &schema.Postconditions{
|
||||
StateTree: &schema.StateTree{
|
||||
RootCID: result.PostStateRoot,
|
||||
},
|
||||
ReceiptsRoots: []cid.Cid{result.ReceiptsRoot},
|
||||
},
|
||||
}
|
||||
|
||||
for _, res := range result.AppliedResults {
|
||||
vector.Post.Receipts = append(vector.Post.Receipts, &schema.Receipt{
|
||||
ExitCode: int64(res.ExitCode),
|
||||
ReturnValue: res.Return,
|
||||
GasUsed: res.GasUsed,
|
||||
})
|
||||
}
|
||||
|
||||
return writeVector(vector, opts.file)
|
||||
return &vector, nil
|
||||
}
|
||||
|
@ -113,3 +113,19 @@ func destroy(_ *cli.Context) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureDir(path string) error {
|
||||
switch fi, err := os.Stat(path); {
|
||||
case os.IsNotExist(err):
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", path, err)
|
||||
}
|
||||
case err == nil:
|
||||
if !fi.IsDir() {
|
||||
return fmt.Errorf("path %s is not a directory: %w", path, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("failed to stat directory %s: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ func runSimulateCmd(_ *cli.Context) error {
|
||||
},
|
||||
}
|
||||
|
||||
if err := writeVector(vector, simulateFlags.out); err != nil {
|
||||
if err := writeVector(&vector, simulateFlags.out); err != nil {
|
||||
return fmt.Errorf("failed to write vector: %w", err)
|
||||
}
|
||||
|
||||
|
@ -149,3 +149,14 @@ func (pb *proxyingBlockstore) Put(block blocks.Block) error {
|
||||
pb.lk.Unlock()
|
||||
return pb.Blockstore.Put(block)
|
||||
}
|
||||
|
||||
func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error {
|
||||
pb.lk.Lock()
|
||||
if pb.tracing {
|
||||
for _, b := range blocks {
|
||||
pb.traced[b.Cid()] = struct{}{}
|
||||
}
|
||||
}
|
||||
pb.lk.Unlock()
|
||||
return pb.Blockstore.PutMany(blocks)
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"github.com/filecoin-project/test-vectors/schema"
|
||||
)
|
||||
|
||||
var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant){
|
||||
var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant) ([]string, error){
|
||||
schema.ClassMessage: ExecuteMessageVector,
|
||||
schema.ClassTipset: ExecuteTipsetVector,
|
||||
}
|
||||
@ -133,7 +133,7 @@ func TestConformance(t *testing.T) {
|
||||
for _, variant := range vector.Pre.Variants {
|
||||
variant := variant
|
||||
t.Run(variant.ID, func(t *testing.T) {
|
||||
invokee(t, &vector, &variant)
|
||||
_, _ = invokee(t, &vector, &variant) //nolint:errcheck
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -71,6 +71,9 @@ type ExecuteTipsetResult struct {
|
||||
AppliedMessages []*types.Message
|
||||
// AppliedResults stores the results of AppliedMessages, in the same order.
|
||||
AppliedResults []*vm.ApplyRet
|
||||
|
||||
// PostBaseFee returns the basefee after applying this tipset.
|
||||
PostBaseFee abi.TokenAmount
|
||||
}
|
||||
|
||||
type ExecuteTipsetParams struct {
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/fatih/color"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -38,8 +39,19 @@ var FallbackBlockstoreGetter interface {
|
||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||
}
|
||||
|
||||
var TipsetVectorOpts struct {
|
||||
// PipelineBaseFee pipelines the basefee in multi-tipset vectors from one
|
||||
// tipset to another. Basefees in the vector are ignored, except for that of
|
||||
// the first tipset. UNUSED.
|
||||
PipelineBaseFee bool
|
||||
|
||||
// OnTipsetApplied contains callback functions called after a tipset has been
|
||||
// applied.
|
||||
OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult)
|
||||
}
|
||||
|
||||
// ExecuteMessageVector executes a message-class test vector.
|
||||
func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) {
|
||||
func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
baseEpoch = variant.Epoch
|
||||
@ -88,14 +100,16 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema
|
||||
// Once all messages are applied, assert that the final state root matches
|
||||
// the expected postcondition root.
|
||||
if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
|
||||
r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
|
||||
dumpThreeWayStateDiff(r, vector, bs, root)
|
||||
r.FailNow()
|
||||
ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
|
||||
r.Errorf(ierr.Error())
|
||||
err = multierror.Append(err, ierr)
|
||||
diffs = dumpThreeWayStateDiff(r, vector, bs, root)
|
||||
}
|
||||
return diffs, err
|
||||
}
|
||||
|
||||
// ExecuteTipsetVector executes a tipset-class test vector.
|
||||
func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) {
|
||||
func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
baseEpoch = abi.ChainEpoch(variant.Epoch)
|
||||
@ -107,6 +121,7 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
|
||||
bs, err := LoadBlockstore(vector.CAR)
|
||||
if err != nil {
|
||||
r.Fatalf("failed to load the vector CAR: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a new Driver.
|
||||
@ -118,15 +133,22 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
|
||||
for i, ts := range vector.ApplyTipsets {
|
||||
ts := ts // capture
|
||||
execEpoch := baseEpoch + abi.ChainEpoch(ts.EpochOffset)
|
||||
ret, err := driver.ExecuteTipset(bs, tmpds, ExecuteTipsetParams{
|
||||
params := ExecuteTipsetParams{
|
||||
Preroot: root,
|
||||
ParentEpoch: prevEpoch,
|
||||
Tipset: &ts,
|
||||
ExecEpoch: execEpoch,
|
||||
Rand: NewReplayingRand(r, vector.Randomness),
|
||||
})
|
||||
}
|
||||
ret, err := driver.ExecuteTipset(bs, tmpds, params)
|
||||
if err != nil {
|
||||
r.Fatalf("failed to apply tipset %d: %s", i, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// invoke callbacks.
|
||||
for _, cb := range TipsetVectorOpts.OnTipsetApplied {
|
||||
cb(bs, ¶ms, ret)
|
||||
}
|
||||
|
||||
for j, v := range ret.AppliedResults {
|
||||
@ -136,7 +158,9 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
|
||||
|
||||
// Compare the receipts root.
|
||||
if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual {
|
||||
r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
|
||||
ierr := fmt.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
|
||||
r.Errorf(ierr.Error())
|
||||
err = multierror.Append(err, ierr)
|
||||
}
|
||||
|
||||
prevEpoch = execEpoch
|
||||
@ -146,10 +170,12 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
|
||||
// Once all messages are applied, assert that the final state root matches
|
||||
// the expected postcondition root.
|
||||
if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
|
||||
r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
|
||||
dumpThreeWayStateDiff(r, vector, bs, root)
|
||||
r.FailNow()
|
||||
ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
|
||||
r.Errorf(ierr.Error())
|
||||
err = multierror.Append(err, ierr)
|
||||
diffs = dumpThreeWayStateDiff(r, vector, bs, root)
|
||||
}
|
||||
return diffs, err
|
||||
}
|
||||
|
||||
// AssertMsgResult compares a message result. It takes the expected receipt
|
||||
@ -169,7 +195,7 @@ func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet,
|
||||
}
|
||||
}
|
||||
|
||||
func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) {
|
||||
func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) []string {
|
||||
// check if statediff exists; if not, skip.
|
||||
if err := exec.Command("statediff", "--help").Run(); err != nil {
|
||||
r.Log("could not dump 3-way state tree diff upon test failure: statediff command not found")
|
||||
@ -178,7 +204,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.
|
||||
r.Log("$ cd statediff")
|
||||
r.Log("$ go generate ./...")
|
||||
r.Log("$ go install ./cmd/statediff")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
tmpCar, err := writeStateToTempCAR(bs,
|
||||
@ -188,6 +214,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.
|
||||
)
|
||||
if err != nil {
|
||||
r.Fatalf("failed to write temporary state CAR: %s", err)
|
||||
return nil
|
||||
}
|
||||
defer os.RemoveAll(tmpCar) //nolint:errcheck
|
||||
|
||||
@ -202,28 +229,43 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.
|
||||
d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]")
|
||||
)
|
||||
|
||||
printDiff := func(left, right cid.Cid) {
|
||||
diff := func(left, right cid.Cid) string {
|
||||
cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String())
|
||||
b, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
r.Fatalf("statediff failed: %s", err)
|
||||
}
|
||||
r.Log(string(b))
|
||||
return string(b)
|
||||
}
|
||||
|
||||
bold := color.New(color.Bold).SprintfFunc()
|
||||
|
||||
r.Log(bold("-----BEGIN STATEDIFF-----"))
|
||||
|
||||
// run state diffs.
|
||||
r.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c))
|
||||
|
||||
r.Log(bold("--- %s left: %s; right: %s ---", d1, a, b))
|
||||
printDiff(vector.Post.StateTree.RootCID, actual)
|
||||
diffA := diff(vector.Post.StateTree.RootCID, actual)
|
||||
r.Log(bold("----------BEGIN STATEDIFF A----------"))
|
||||
r.Log(diffA)
|
||||
r.Log(bold("----------END STATEDIFF A----------"))
|
||||
|
||||
r.Log(bold("--- %s left: %s; right: %s ---", d2, c, b))
|
||||
printDiff(vector.Pre.StateTree.RootCID, actual)
|
||||
diffB := diff(vector.Pre.StateTree.RootCID, actual)
|
||||
r.Log(bold("----------BEGIN STATEDIFF B----------"))
|
||||
r.Log(diffB)
|
||||
r.Log(bold("----------END STATEDIFF B----------"))
|
||||
|
||||
r.Log(bold("--- %s left: %s; right: %s ---", d3, c, a))
|
||||
printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID)
|
||||
diffC := diff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID)
|
||||
r.Log(bold("----------BEGIN STATEDIFF C----------"))
|
||||
r.Log(diffC)
|
||||
r.Log(bold("----------END STATEDIFF C----------"))
|
||||
|
||||
r.Log(bold("-----END STATEDIFF-----"))
|
||||
|
||||
return []string{diffA, diffB, diffC}
|
||||
}
|
||||
|
||||
// writeStateToTempCAR writes the provided roots to a temporary CAR that'll be
|
||||
|
@ -99,6 +99,9 @@
|
||||
* [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration)
|
||||
* [SectorSetSealDelay](#SectorSetSealDelay)
|
||||
* [SectorStartSealing](#SectorStartSealing)
|
||||
* [SectorTerminate](#SectorTerminate)
|
||||
* [SectorTerminateFlush](#SectorTerminateFlush)
|
||||
* [SectorTerminatePending](#SectorTerminatePending)
|
||||
* [Sectors](#Sectors)
|
||||
* [SectorsList](#SectorsList)
|
||||
* [SectorsListInStates](#SectorsListInStates)
|
||||
@ -193,7 +196,8 @@ Response:
|
||||
```json
|
||||
{
|
||||
"PreCommitControl": null,
|
||||
"CommitControl": null
|
||||
"CommitControl": null,
|
||||
"TerminateControl": null
|
||||
}
|
||||
```
|
||||
|
||||
@ -1475,7 +1479,9 @@ Inputs:
|
||||
Response: `{}`
|
||||
|
||||
### SectorRemove
|
||||
There are not yet any comments for this method.
|
||||
SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
|
||||
be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
@ -1535,6 +1541,43 @@ Inputs:
|
||||
|
||||
Response: `{}`
|
||||
|
||||
### SectorTerminate
|
||||
SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
|
||||
automatically removes it from storage
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
9
|
||||
]
|
||||
```
|
||||
|
||||
Response: `{}`
|
||||
|
||||
### SectorTerminateFlush
|
||||
SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
|
||||
Returns null if message wasn't sent
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `null`
|
||||
|
||||
### SectorTerminatePending
|
||||
SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `null`
|
||||
|
||||
## Sectors
|
||||
|
||||
|
||||
|
@ -68,6 +68,8 @@
|
||||
* [LogList](#LogList)
|
||||
* [LogSetLevel](#LogSetLevel)
|
||||
* [Market](#Market)
|
||||
* [MarketAddBalance](#MarketAddBalance)
|
||||
* [MarketGetReserved](#MarketGetReserved)
|
||||
* [MarketReleaseFunds](#MarketReleaseFunds)
|
||||
* [MarketReserveFunds](#MarketReserveFunds)
|
||||
* [MarketWithdraw](#MarketWithdraw)
|
||||
@ -175,6 +177,7 @@
|
||||
* [StateReadState](#StateReadState)
|
||||
* [StateReplay](#StateReplay)
|
||||
* [StateSearchMsg](#StateSearchMsg)
|
||||
* [StateSearchMsgLimited](#StateSearchMsgLimited)
|
||||
* [StateSectorExpiration](#StateSectorExpiration)
|
||||
* [StateSectorGetInfo](#StateSectorGetInfo)
|
||||
* [StateSectorPartition](#StateSectorPartition)
|
||||
@ -1653,6 +1656,43 @@ Response: `{}`
|
||||
## Market
|
||||
|
||||
|
||||
### MarketAddBalance
|
||||
MarketAddBalance adds funds to the market actor
|
||||
|
||||
|
||||
Perms: sign
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
"f01234",
|
||||
"f01234",
|
||||
"0"
|
||||
]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
}
|
||||
```
|
||||
|
||||
### MarketGetReserved
|
||||
MarketGetReserved gets the amount of funds that are currently reserved for the address
|
||||
|
||||
|
||||
Perms: sign
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
"f01234"
|
||||
]
|
||||
```
|
||||
|
||||
Response: `"0"`
|
||||
|
||||
### MarketReleaseFunds
|
||||
MarketReleaseFunds releases funds reserved by MarketReserveFunds
|
||||
|
||||
@ -4308,7 +4348,7 @@ Inputs:
|
||||
]
|
||||
```
|
||||
|
||||
Response: `8`
|
||||
Response: `9`
|
||||
|
||||
### StateReadState
|
||||
StateReadState returns the indicated actor's state.
|
||||
@ -4471,6 +4511,46 @@ Response:
|
||||
}
|
||||
```
|
||||
|
||||
### StateSearchMsgLimited
|
||||
StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
10101
|
||||
]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"Message": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
"Receipt": {
|
||||
"ExitCode": 0,
|
||||
"Return": "Ynl0ZSBhcnJheQ==",
|
||||
"GasUsed": 9
|
||||
},
|
||||
"ReturnDec": {},
|
||||
"TipSet": [
|
||||
{
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
{
|
||||
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
|
||||
}
|
||||
],
|
||||
"Height": 10101
|
||||
}
|
||||
```
|
||||
|
||||
### StateSectorExpiration
|
||||
StateSectorExpiration returns epoch at which given sector will expire
|
||||
|
||||
|
66
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
66
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
@ -45,6 +45,10 @@ func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error
|
||||
}
|
||||
|
||||
func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
|
||||
// TODO: allow tuning those:
|
||||
chunk := abi.PaddedPieceSize(4 << 20)
|
||||
parallel := runtime.NumCPU()
|
||||
|
||||
var offset abi.UnpaddedPieceSize
|
||||
for _, size := range existingPieceSizes {
|
||||
offset += size
|
||||
@ -108,10 +112,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi
|
||||
|
||||
pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw)
|
||||
|
||||
chunk := abi.PaddedPieceSize(4 << 20)
|
||||
throttle := make(chan []byte, parallel)
|
||||
piecePromises := make([]func() (abi.PieceInfo, error), 0)
|
||||
|
||||
buf := make([]byte, chunk.Unpadded())
|
||||
var pieceCids []abi.PieceInfo
|
||||
for i := 0; i < parallel; i++ {
|
||||
if abi.UnpaddedPieceSize(i)*chunk.Unpadded() >= pieceSize {
|
||||
break // won't use this many buffers
|
||||
}
|
||||
throttle <- make([]byte, chunk.Unpadded())
|
||||
}
|
||||
|
||||
for {
|
||||
var read int
|
||||
@ -132,13 +142,39 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi
|
||||
break
|
||||
}
|
||||
|
||||
c, err := sb.pieceCid(sector.ProofType, buf[:read])
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err)
|
||||
}
|
||||
pieceCids = append(pieceCids, abi.PieceInfo{
|
||||
Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(),
|
||||
PieceCID: c,
|
||||
done := make(chan struct {
|
||||
cid.Cid
|
||||
error
|
||||
}, 1)
|
||||
pbuf := <-throttle
|
||||
copy(pbuf, buf[:read])
|
||||
|
||||
go func(read int) {
|
||||
defer func() {
|
||||
throttle <- pbuf
|
||||
}()
|
||||
|
||||
c, err := sb.pieceCid(sector.ProofType, pbuf[:read])
|
||||
done <- struct {
|
||||
cid.Cid
|
||||
error
|
||||
}{c, err}
|
||||
}(read)
|
||||
|
||||
piecePromises = append(piecePromises, func() (abi.PieceInfo, error) {
|
||||
select {
|
||||
case e := <-done:
|
||||
if e.error != nil {
|
||||
return abi.PieceInfo{}, e.error
|
||||
}
|
||||
|
||||
return abi.PieceInfo{
|
||||
Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(),
|
||||
PieceCID: e.Cid,
|
||||
}, nil
|
||||
case <-ctx.Done():
|
||||
return abi.PieceInfo{}, ctx.Err()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -155,8 +191,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi
|
||||
}
|
||||
stagedFile = nil
|
||||
|
||||
if len(pieceCids) == 1 {
|
||||
return pieceCids[0], nil
|
||||
if len(piecePromises) == 1 {
|
||||
return piecePromises[0]()
|
||||
}
|
||||
|
||||
pieceCids := make([]abi.PieceInfo, len(piecePromises))
|
||||
for i, promise := range piecePromises {
|
||||
pieceCids[i], err = promise()
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids)
|
||||
|
87
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
87
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
@ -33,6 +33,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -622,3 +623,89 @@ func TestGenerateUnsealedCID(t *testing.T) {
|
||||
[][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)},
|
||||
)
|
||||
}
|
||||
|
||||
func TestAddPiece512M(t *testing.T) {
|
||||
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
|
||||
|
||||
cdir, err := ioutil.TempDir("", "sbtest-c-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
sp := &basicfs.Provider{
|
||||
Root: cdir,
|
||||
}
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
cleanup := func() {
|
||||
if t.Failed() {
|
||||
fmt.Printf("not removing %s\n", cdir)
|
||||
return
|
||||
}
|
||||
if err := os.RemoveAll(cdir); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
r := rand.New(rand.NewSource(0x7e5))
|
||||
|
||||
c, err := sb.AddPiece(context.TODO(), storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: miner,
|
||||
Number: 0,
|
||||
},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
|
||||
}, nil, sz, io.LimitReader(r, int64(sz)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
require.Equal(t, "baga6ea4seaqhyticusemlcrjhvulpfng4nint6bu3wpe5s3x4bnuj2rs47hfacy", c.PieceCID.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddPiece512M(b *testing.B) {
|
||||
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
|
||||
b.SetBytes(int64(sz))
|
||||
|
||||
cdir, err := ioutil.TempDir("", "sbtest-c-")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
sp := &basicfs.Provider{
|
||||
Root: cdir,
|
||||
}
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
b.Fatalf("%+v", err)
|
||||
}
|
||||
cleanup := func() {
|
||||
if b.Failed() {
|
||||
fmt.Printf("not removing %s\n", cdir)
|
||||
return
|
||||
}
|
||||
if err := os.RemoveAll(cdir); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
b.Cleanup(cleanup)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
c, err := sb.AddPiece(context.TODO(), storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: miner,
|
||||
Number: abi.SectorNumber(i),
|
||||
},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
|
||||
}, nil, sz, io.LimitReader(&nullreader.Reader{}, int64(sz)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
fmt.Println(c)
|
||||
}
|
||||
}
|
||||
|
34
extern/sector-storage/fsutil/filesize_unix.go
vendored
34
extern/sector-storage/fsutil/filesize_unix.go
vendored
@ -2,6 +2,7 @@ package fsutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@ -11,19 +12,32 @@ type SizeInfo struct {
|
||||
OnDisk int64
|
||||
}
|
||||
|
||||
// FileSize returns bytes used by a file on disk
|
||||
// FileSize returns bytes used by a file or directory on disk
|
||||
// NOTE: We care about the allocated bytes, not file or directory size
|
||||
func FileSize(path string) (SizeInfo, error) {
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(path, &stat); err != nil {
|
||||
if err == syscall.ENOENT {
|
||||
var size int64
|
||||
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return xerrors.New("FileInfo.Sys of wrong type")
|
||||
}
|
||||
|
||||
// NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize return SizeInfo{size}, nil
|
||||
// See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html
|
||||
size += int64(stat.Blocks) * 512 // nolint NOTE: int64 cast is needed on osx
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return SizeInfo{}, os.ErrNotExist
|
||||
}
|
||||
return SizeInfo{}, xerrors.Errorf("stat: %w", err)
|
||||
return SizeInfo{}, xerrors.Errorf("filepath.Walk err: %w", err)
|
||||
}
|
||||
|
||||
// NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize
|
||||
// See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html
|
||||
return SizeInfo{
|
||||
int64(stat.Blocks) * 512, // nolint NOTE: int64 cast is needed on osx
|
||||
}, nil
|
||||
return SizeInfo{size}, nil
|
||||
}
|
||||
|
2
extern/sector-storage/mock/mock.go
vendored
2
extern/sector-storage/mock/mock.go
vendored
@ -347,7 +347,7 @@ func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSea
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
||||
if len(mgr.sectors[sectorID.ID].pieces) > 1 || offset != 0 {
|
||||
if offset != 0 {
|
||||
panic("implme")
|
||||
}
|
||||
|
||||
|
1
extern/sector-storage/sched_worker.go
vendored
1
extern/sector-storage/sched_worker.go
vendored
@ -57,6 +57,7 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error {
|
||||
log.Warnw("duplicated worker added", "id", wid)
|
||||
|
||||
// this is ok, we're already handling this worker in a different goroutine
|
||||
sh.workersLk.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
4
extern/sector-storage/stores/index.go
vendored
4
extern/sector-storage/stores/index.go
vendored
@ -155,6 +155,10 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS
|
||||
i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, u)
|
||||
}
|
||||
|
||||
i.stores[si.ID].info.Weight = si.Weight
|
||||
i.stores[si.ID].info.CanSeal = si.CanSeal
|
||||
i.stores[si.ID].info.CanStore = si.CanStore
|
||||
|
||||
return nil
|
||||
}
|
||||
i.stores[si.ID] = &storageEntry{
|
||||
|
95
extern/storage-sealing/cbor_gen.go
vendored
95
extern/storage-sealing/cbor_gen.go
vendored
@ -475,7 +475,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{183}); err != nil {
|
||||
if _, err := w.Write([]byte{184, 25}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -928,6 +928,50 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.TerminateMessage (cid.Cid) (struct)
|
||||
if len("TerminateMessage") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"TerminateMessage\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TerminateMessage"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("TerminateMessage")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.TerminateMessage == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCidBuf(scratch, w, *t.TerminateMessage); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.TerminateMessage: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.TerminatedAt (abi.ChainEpoch) (int64)
|
||||
if len("TerminatedAt") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"TerminatedAt\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TerminatedAt"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("TerminatedAt")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.TerminatedAt >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TerminatedAt)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TerminatedAt-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.LastErr (string) (string)
|
||||
if len("LastErr") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"LastErr\" was too long")
|
||||
@ -1441,6 +1485,55 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
|
||||
t.Return = ReturnState(sval)
|
||||
}
|
||||
// t.TerminateMessage (cid.Cid) (struct)
|
||||
case "TerminateMessage":
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.TerminateMessage: %w", err)
|
||||
}
|
||||
|
||||
t.TerminateMessage = &c
|
||||
}
|
||||
|
||||
}
|
||||
// t.TerminatedAt (abi.ChainEpoch) (int64)
|
||||
case "TerminatedAt":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.TerminatedAt = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.LastErr (string) (string)
|
||||
case "LastErr":
|
||||
|
||||
|
26
extern/storage-sealing/fsm.go
vendored
26
extern/storage-sealing/fsm.go
vendored
@ -148,6 +148,21 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
on(SectorFaultReported{}, FaultReported),
|
||||
on(SectorFaulty{}, Faulty),
|
||||
),
|
||||
Terminating: planOne(
|
||||
on(SectorTerminating{}, TerminateWait),
|
||||
on(SectorTerminateFailed{}, TerminateFailed),
|
||||
),
|
||||
TerminateWait: planOne(
|
||||
on(SectorTerminated{}, TerminateFinality),
|
||||
on(SectorTerminateFailed{}, TerminateFailed),
|
||||
),
|
||||
TerminateFinality: planOne(
|
||||
on(SectorTerminateFailed{}, TerminateFailed),
|
||||
// SectorRemove (global)
|
||||
),
|
||||
TerminateFailed: planOne(
|
||||
// SectorTerminating (global)
|
||||
),
|
||||
Removing: planOne(
|
||||
on(SectorRemoved{}, Removed),
|
||||
on(SectorRemoveFailed{}, RemoveFailed),
|
||||
@ -328,6 +343,14 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
// Post-seal
|
||||
case Proving:
|
||||
return m.handleProvingSector, processed, nil
|
||||
case Terminating:
|
||||
return m.handleTerminating, processed, nil
|
||||
case TerminateWait:
|
||||
return m.handleTerminateWait, processed, nil
|
||||
case TerminateFinality:
|
||||
return m.handleTerminateFinality, processed, nil
|
||||
case TerminateFailed:
|
||||
return m.handleTerminateFailed, processed, nil
|
||||
case Removing:
|
||||
return m.handleRemoving, processed, nil
|
||||
case Removed:
|
||||
@ -409,8 +432,9 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
// m.unsealedInfoMap.lk.Lock() taken early in .New to prevent races
|
||||
defer m.unsealedInfoMap.lk.Unlock()
|
||||
|
||||
for _, sector := range trackedSectors {
|
||||
if err := m.sectors.Send(uint64(sector.SectorNumber), SectorRestart{}); err != nil {
|
||||
log.Errorf("restarting sector %d: %+v", sector.SectorNumber, err)
|
||||
|
26
extern/storage-sealing/fsm_events.go
vendored
26
extern/storage-sealing/fsm_events.go
vendored
@ -314,6 +314,32 @@ func (evt SectorFaultReported) apply(state *SectorInfo) {
|
||||
|
||||
type SectorFaultedFinal struct{}
|
||||
|
||||
// Terminating
|
||||
|
||||
type SectorTerminate struct{}
|
||||
|
||||
func (evt SectorTerminate) applyGlobal(state *SectorInfo) bool {
|
||||
state.State = Terminating
|
||||
return true
|
||||
}
|
||||
|
||||
type SectorTerminating struct{ Message *cid.Cid }
|
||||
|
||||
func (evt SectorTerminating) apply(state *SectorInfo) {
|
||||
state.TerminateMessage = evt.Message
|
||||
}
|
||||
|
||||
type SectorTerminated struct{ TerminatedAt abi.ChainEpoch }
|
||||
|
||||
func (evt SectorTerminated) apply(state *SectorInfo) {
|
||||
state.TerminatedAt = evt.TerminatedAt
|
||||
}
|
||||
|
||||
type SectorTerminateFailed struct{ error }
|
||||
|
||||
func (evt SectorTerminateFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorTerminateFailed) apply(*SectorInfo) {}
|
||||
|
||||
// External events
|
||||
|
||||
type SectorRemove struct{}
|
||||
|
31
extern/storage-sealing/sealing.go
vendored
31
extern/storage-sealing/sealing.go
vendored
@ -19,6 +19,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
statemachine "github.com/filecoin-project/go-statemachine"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
@ -60,6 +61,8 @@ type SealingAPI interface {
|
||||
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
|
||||
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
|
||||
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
|
||||
StateMinerProvingDeadline(context.Context, address.Address, TipSetToken) (*dline.Info, error)
|
||||
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error)
|
||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
||||
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
||||
@ -94,12 +97,15 @@ type Sealing struct {
|
||||
|
||||
stats SectorStats
|
||||
|
||||
terminator *TerminateBatcher
|
||||
|
||||
getConfig GetSealingConfigFunc
|
||||
}
|
||||
|
||||
type FeeConfig struct {
|
||||
MaxPreCommitGasFee abi.TokenAmount
|
||||
MaxCommitGasFee abi.TokenAmount
|
||||
MaxTerminateGasFee abi.TokenAmount
|
||||
}
|
||||
|
||||
type UnsealedSectorMap struct {
|
||||
@ -136,6 +142,8 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
|
||||
notifee: notifee,
|
||||
addrSel: as,
|
||||
|
||||
terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc),
|
||||
|
||||
getConfig: gc,
|
||||
|
||||
stats: SectorStats{
|
||||
@ -145,6 +153,8 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
|
||||
|
||||
s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{})
|
||||
|
||||
s.unsealedInfoMap.lk.Lock() // released after initialized in .Run()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
@ -158,7 +168,14 @@ func (m *Sealing) Run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (m *Sealing) Stop(ctx context.Context) error {
|
||||
return m.sectors.Stop(ctx)
|
||||
if err := m.terminator.Stop(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.sectors.Stop(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
@ -263,6 +280,18 @@ func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error {
|
||||
return m.sectors.Send(uint64(sid), SectorRemove{})
|
||||
}
|
||||
|
||||
func (m *Sealing) Terminate(ctx context.Context, sid abi.SectorNumber) error {
|
||||
return m.sectors.Send(uint64(sid), SectorTerminate{})
|
||||
}
|
||||
|
||||
func (m *Sealing) TerminateFlush(ctx context.Context) (*cid.Cid, error) {
|
||||
return m.terminator.Flush(ctx)
|
||||
}
|
||||
|
||||
func (m *Sealing) TerminatePending(ctx context.Context) ([]abi.SectorID, error) {
|
||||
return m.terminator.Pending(ctx)
|
||||
}
|
||||
|
||||
// Caller should NOT hold m.unsealedInfoMap.lk
|
||||
func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error {
|
||||
// locking here ensures that when the SectorStartPacking event is sent, the sector won't be picked up anywhere else
|
||||
|
11
extern/storage-sealing/sector_state.go
vendored
11
extern/storage-sealing/sector_state.go
vendored
@ -30,6 +30,10 @@ var ExistSectorStateList = map[SectorState]struct{}{
|
||||
Faulty: {},
|
||||
FaultReported: {},
|
||||
FaultedFinal: {},
|
||||
Terminating: {},
|
||||
TerminateWait: {},
|
||||
TerminateFinality: {},
|
||||
TerminateFailed: {},
|
||||
Removing: {},
|
||||
RemoveFailed: {},
|
||||
Removed: {},
|
||||
@ -69,6 +73,11 @@ const (
|
||||
FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain
|
||||
FaultedFinal SectorState = "FaultedFinal" // fault declared on chain
|
||||
|
||||
Terminating SectorState = "Terminating"
|
||||
TerminateWait SectorState = "TerminateWait"
|
||||
TerminateFinality SectorState = "TerminateFinality"
|
||||
TerminateFailed SectorState = "TerminateFailed"
|
||||
|
||||
Removing SectorState = "Removing"
|
||||
RemoveFailed SectorState = "RemoveFailed"
|
||||
Removed SectorState = "Removed"
|
||||
@ -78,7 +87,7 @@ func toStatState(st SectorState) statSectorState {
|
||||
switch st {
|
||||
case Empty, WaitDeals, Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector:
|
||||
return sstSealing
|
||||
case Proving, Removed, Removing:
|
||||
case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed:
|
||||
return sstProving
|
||||
}
|
||||
|
||||
|
20
extern/storage-sealing/states_failed.go
vendored
20
extern/storage-sealing/states_failed.go
vendored
@ -224,9 +224,9 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo
|
||||
case *ErrBadCommD:
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)})
|
||||
case *ErrExpiredTicket:
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)})
|
||||
return ctx.Send(SectorTicketExpired{xerrors.Errorf("ticket expired error, removing sector: %w", err)})
|
||||
case *ErrBadTicket:
|
||||
return ctx.Send(SectorTicketExpired{xerrors.Errorf("expired ticket: %w", err)})
|
||||
return ctx.Send(SectorTicketExpired{xerrors.Errorf("expired ticket, removing sector: %w", err)})
|
||||
case *ErrInvalidDeals:
|
||||
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
||||
return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed})
|
||||
@ -309,6 +309,22 @@ func (m *Sealing) handleRemoveFailed(ctx statemachine.Context, sector SectorInfo
|
||||
return ctx.Send(SectorRemove{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleTerminateFailed(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// ignoring error as it's most likely an API error - `pci` will be nil, and we'll go back to
|
||||
// the Terminating state after cooldown. If the API is still failing, well get back to here
|
||||
// with the error in SectorInfo log.
|
||||
pci, _ := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil)
|
||||
if pci != nil {
|
||||
return nil // pause the fsm, needs manual user action
|
||||
}
|
||||
|
||||
if err := failedCooldown(ctx, sector); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.Send(SectorTerminate{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// First make vary sure the sector isn't committed
|
||||
si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil)
|
||||
|
88
extern/storage-sealing/states_proving.go
vendored
88
extern/storage-sealing/states_proving.go
vendored
@ -1,9 +1,14 @@
|
||||
package sealing
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/go-statemachine"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
func (m *Sealing) handleFaulty(ctx statemachine.Context, sector SectorInfo) error {
|
||||
@ -31,6 +36,89 @@ func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInf
|
||||
return ctx.Send(SectorFaultedFinal{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleTerminating(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// First step of sector termination
|
||||
// * See if sector is live
|
||||
// * If not, goto removing
|
||||
// * Add to termination queue
|
||||
// * Wait for message to land on-chain
|
||||
// * Check for correct termination
|
||||
// * wait for expiration (+winning lookback?)
|
||||
|
||||
si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting sector info: %w", err)})
|
||||
}
|
||||
|
||||
if si == nil {
|
||||
// either already terminated or not committed yet
|
||||
|
||||
pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("checking precommit presence: %w", err)})
|
||||
}
|
||||
if pci != nil {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("sector was precommitted but not proven, remove instead of terminating")})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorRemove{})
|
||||
}
|
||||
|
||||
termCid, terminated, err := m.terminator.AddTermination(ctx.Context(), m.minerSectorID(sector.SectorNumber))
|
||||
if err != nil {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("queueing termination: %w", err)})
|
||||
}
|
||||
|
||||
if terminated {
|
||||
return ctx.Send(SectorTerminating{Message: nil})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorTerminating{Message: &termCid})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleTerminateWait(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if sector.TerminateMessage == nil {
|
||||
return xerrors.New("entered TerminateWait with nil TerminateMessage")
|
||||
}
|
||||
|
||||
mw, err := m.api.StateWaitMsg(ctx.Context(), *sector.TerminateMessage)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("waiting for terminate message to land on chain: %w", err)})
|
||||
}
|
||||
|
||||
if mw.Receipt.ExitCode != exitcode.Ok {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("terminate message failed to execute: exit %d: %w", mw.Receipt.ExitCode, err)})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorTerminated{TerminatedAt: mw.Height})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleTerminateFinality(ctx statemachine.Context, sector SectorInfo) error {
|
||||
for {
|
||||
tok, epoch, err := m.api.ChainHead(ctx.Context())
|
||||
if err != nil {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting chain head: %w", err)})
|
||||
}
|
||||
|
||||
nv, err := m.api.StateNetworkVersion(ctx.Context(), tok)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting network version: %w", err)})
|
||||
}
|
||||
|
||||
if epoch >= sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv) {
|
||||
return ctx.Send(SectorRemove{})
|
||||
}
|
||||
|
||||
toWait := time.Duration(epoch-sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv)) * time.Duration(build.BlockDelaySecs) * time.Second
|
||||
select {
|
||||
case <-time.After(toWait):
|
||||
continue
|
||||
case <-ctx.Context().Done():
|
||||
return ctx.Context().Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
|
||||
return ctx.Send(SectorRemoveFailed{err})
|
||||
|
351
extern/storage-sealing/terminate_batch.go
vendored
Normal file
351
extern/storage-sealing/terminate_batch.go
vendored
Normal file
@ -0,0 +1,351 @@
|
||||
package sealing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO: config
|
||||
|
||||
TerminateBatchMax uint64 = 100 // adjust based on real-world gas numbers, actors limit at 10k
|
||||
TerminateBatchMin uint64 = 1
|
||||
TerminateBatchWait = 5 * time.Minute
|
||||
)
|
||||
|
||||
type TerminateBatcherApi interface {
|
||||
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error)
|
||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
||||
StateMinerProvingDeadline(context.Context, address.Address, TipSetToken) (*dline.Info, error)
|
||||
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error)
|
||||
}
|
||||
|
||||
type TerminateBatcher struct {
|
||||
api TerminateBatcherApi
|
||||
maddr address.Address
|
||||
mctx context.Context
|
||||
addrSel AddrSel
|
||||
feeCfg FeeConfig
|
||||
|
||||
todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField
|
||||
|
||||
waiting map[abi.SectorNumber][]chan cid.Cid
|
||||
|
||||
notify, stop, stopped chan struct{}
|
||||
force chan chan *cid.Cid
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig) *TerminateBatcher {
|
||||
b := &TerminateBatcher{
|
||||
api: api,
|
||||
maddr: maddr,
|
||||
mctx: mctx,
|
||||
addrSel: addrSel,
|
||||
feeCfg: feeCfg,
|
||||
|
||||
todo: map[SectorLocation]*bitfield.BitField{},
|
||||
waiting: map[abi.SectorNumber][]chan cid.Cid{},
|
||||
|
||||
notify: make(chan struct{}, 1),
|
||||
force: make(chan chan *cid.Cid),
|
||||
stop: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
}
|
||||
|
||||
go b.run()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TerminateBatcher) run() {
|
||||
var forceRes chan *cid.Cid
|
||||
var lastMsg *cid.Cid
|
||||
|
||||
for {
|
||||
if forceRes != nil {
|
||||
forceRes <- lastMsg
|
||||
forceRes = nil
|
||||
}
|
||||
lastMsg = nil
|
||||
|
||||
var sendAboveMax, sendAboveMin bool
|
||||
select {
|
||||
case <-b.stop:
|
||||
close(b.stopped)
|
||||
return
|
||||
case <-b.notify:
|
||||
sendAboveMax = true
|
||||
case <-time.After(TerminateBatchWait):
|
||||
sendAboveMin = true
|
||||
case fr := <-b.force: // user triggered
|
||||
forceRes = fr
|
||||
}
|
||||
|
||||
var err error
|
||||
lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin)
|
||||
if err != nil {
|
||||
log.Warnw("TerminateBatcher processBatch error", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
|
||||
dl, err := b.api.StateMinerProvingDeadline(b.mctx, b.maddr, nil)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting proving deadline info failed: %w", err)
|
||||
}
|
||||
|
||||
b.lk.Lock()
|
||||
defer b.lk.Unlock()
|
||||
params := miner2.TerminateSectorsParams{}
|
||||
|
||||
var total uint64
|
||||
for loc, sectors := range b.todo {
|
||||
n, err := sectors.Count()
|
||||
if err != nil {
|
||||
log.Errorw("TerminateBatcher: failed to count sectors to terminate", "deadline", loc.Deadline, "partition", loc.Partition, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// don't send terminations for currently challenged sectors
|
||||
if loc.Deadline == (dl.Index+1)%miner.WPoStPeriodDeadlines || // not in next (in case the terminate message takes a while to get on chain)
|
||||
loc.Deadline == dl.Index || // not in current
|
||||
(loc.Deadline+1)%miner.WPoStPeriodDeadlines == dl.Index { // not in previous
|
||||
continue
|
||||
}
|
||||
|
||||
if n < 1 {
|
||||
log.Warnw("TerminateBatcher: zero sectors in bucket", "deadline", loc.Deadline, "partition", loc.Partition)
|
||||
continue
|
||||
}
|
||||
|
||||
toTerminate, err := sectors.Copy()
|
||||
if err != nil {
|
||||
log.Warnw("TerminateBatcher: copy sectors bitfield", "deadline", loc.Deadline, "partition", loc.Partition, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if total+n > uint64(miner.AddressedSectorsMax) {
|
||||
n = uint64(miner.AddressedSectorsMax) - total
|
||||
|
||||
toTerminate, err = toTerminate.Slice(0, n)
|
||||
if err != nil {
|
||||
log.Warnw("TerminateBatcher: slice toTerminate bitfield", "deadline", loc.Deadline, "partition", loc.Partition, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s, err := bitfield.SubtractBitField(*sectors, toTerminate)
|
||||
if err != nil {
|
||||
log.Warnw("TerminateBatcher: sectors-toTerminate", "deadline", loc.Deadline, "partition", loc.Partition, "error", err)
|
||||
continue
|
||||
}
|
||||
*sectors = s
|
||||
}
|
||||
|
||||
total += n
|
||||
|
||||
params.Terminations = append(params.Terminations, miner2.TerminationDeclaration{
|
||||
Deadline: loc.Deadline,
|
||||
Partition: loc.Partition,
|
||||
Sectors: toTerminate,
|
||||
})
|
||||
|
||||
if total >= uint64(miner.AddressedSectorsMax) {
|
||||
break
|
||||
}
|
||||
|
||||
if len(params.Terminations) >= miner.DeclarationsMax {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(params.Terminations) == 0 {
|
||||
return nil, nil // nothing to do
|
||||
}
|
||||
|
||||
if notif && total < TerminateBatchMax {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if after && total < TerminateBatchMin {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
enc := new(bytes.Buffer)
|
||||
if err := params.MarshalCBOR(enc); err != nil {
|
||||
return nil, xerrors.Errorf("couldn't serialize TerminateSectors params: %w", err)
|
||||
}
|
||||
|
||||
mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
|
||||
}
|
||||
|
||||
from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, b.feeCfg.MaxTerminateGasFee, b.feeCfg.MaxTerminateGasFee)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("no good address found: %w", err)
|
||||
}
|
||||
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), b.feeCfg.MaxTerminateGasFee, enc.Bytes())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("sending message failed: %w", err)
|
||||
}
|
||||
log.Infow("Sent TerminateSectors message", "cid", mcid, "from", from, "terminations", len(params.Terminations))
|
||||
|
||||
for _, t := range params.Terminations {
|
||||
delete(b.todo, SectorLocation{
|
||||
Deadline: t.Deadline,
|
||||
Partition: t.Partition,
|
||||
})
|
||||
|
||||
err := t.Sectors.ForEach(func(sn uint64) error {
|
||||
for _, ch := range b.waiting[abi.SectorNumber(sn)] {
|
||||
ch <- mcid // buffered
|
||||
}
|
||||
delete(b.waiting, abi.SectorNumber(sn))
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("sectors foreach: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &mcid, nil
|
||||
}
|
||||
|
||||
// register termination, wait for batch message, return message CID
|
||||
// can return cid.Undef,true if the sector is already terminated on-chain
|
||||
func (b *TerminateBatcher) AddTermination(ctx context.Context, s abi.SectorID) (mcid cid.Cid, terminated bool, err error) {
|
||||
maddr, err := address.NewIDAddress(uint64(s.Miner))
|
||||
if err != nil {
|
||||
return cid.Undef, false, err
|
||||
}
|
||||
|
||||
loc, err := b.api.StateSectorPartition(ctx, maddr, s.Number, nil)
|
||||
if err != nil {
|
||||
return cid.Undef, false, xerrors.Errorf("getting sector location: %w", err)
|
||||
}
|
||||
if loc == nil {
|
||||
return cid.Undef, false, xerrors.New("sector location not found")
|
||||
}
|
||||
|
||||
{
|
||||
// check if maybe already terminated
|
||||
parts, err := b.api.StateMinerPartitions(ctx, maddr, loc.Deadline, nil)
|
||||
if err != nil {
|
||||
return cid.Cid{}, false, xerrors.Errorf("getting partitions: %w", err)
|
||||
}
|
||||
live, err := parts[loc.Partition].LiveSectors.IsSet(uint64(s.Number))
|
||||
if err != nil {
|
||||
return cid.Cid{}, false, xerrors.Errorf("checking if sector is in live set: %w", err)
|
||||
}
|
||||
if !live {
|
||||
// already terminated
|
||||
return cid.Undef, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
b.lk.Lock()
|
||||
bf, ok := b.todo[*loc]
|
||||
if !ok {
|
||||
n := bitfield.New()
|
||||
bf = &n
|
||||
b.todo[*loc] = bf
|
||||
}
|
||||
bf.Set(uint64(s.Number))
|
||||
|
||||
sent := make(chan cid.Cid, 1)
|
||||
b.waiting[s.Number] = append(b.waiting[s.Number], sent)
|
||||
|
||||
select {
|
||||
case b.notify <- struct{}{}:
|
||||
default: // already have a pending notification, don't need more
|
||||
}
|
||||
b.lk.Unlock()
|
||||
|
||||
select {
|
||||
case c := <-sent:
|
||||
return c, false, nil
|
||||
case <-ctx.Done():
|
||||
return cid.Undef, false, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *TerminateBatcher) Flush(ctx context.Context) (*cid.Cid, error) {
|
||||
resCh := make(chan *cid.Cid, 1)
|
||||
select {
|
||||
case b.force <- resCh:
|
||||
select {
|
||||
case res := <-resCh:
|
||||
return res, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *TerminateBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) {
|
||||
b.lk.Lock()
|
||||
defer b.lk.Unlock()
|
||||
|
||||
mid, err := address.IDFromAddress(b.maddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]abi.SectorID, 0)
|
||||
for _, bf := range b.todo {
|
||||
err := bf.ForEach(func(id uint64) error {
|
||||
res = append(res, abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(id),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
if res[i].Miner != res[j].Miner {
|
||||
return res[i].Miner < res[j].Miner
|
||||
}
|
||||
|
||||
return res[i].Number < res[j].Number
|
||||
})
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (b *TerminateBatcher) Stop(ctx context.Context) error {
|
||||
close(b.stop)
|
||||
|
||||
select {
|
||||
case <-b.stopped:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
4
extern/storage-sealing/types.go
vendored
4
extern/storage-sealing/types.go
vendored
@ -103,6 +103,10 @@ type SectorInfo struct {
|
||||
// Recovery
|
||||
Return ReturnState
|
||||
|
||||
// Termination
|
||||
TerminateMessage *cid.Cid
|
||||
TerminatedAt abi.ChainEpoch
|
||||
|
||||
// Debug
|
||||
LastErr string
|
||||
|
||||
|
12
go.mod
12
go.mod
@ -14,7 +14,7 @@ require (
|
||||
github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
||||
github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
|
||||
github.com/dgraph-io/badger/v2 v2.2007.2
|
||||
github.com/docker/go-units v0.4.0
|
||||
@ -31,9 +31,9 @@ require (
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
|
||||
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||
github.com/filecoin-project/go-data-transfer v1.2.3
|
||||
github.com/filecoin-project/go-data-transfer v1.2.7
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a
|
||||
github.com/filecoin-project/go-fil-markets v1.0.10
|
||||
github.com/filecoin-project/go-fil-markets v1.1.2
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2
|
||||
github.com/filecoin-project/go-multistore v0.0.3
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
||||
@ -70,7 +70,7 @@ require (
|
||||
github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459
|
||||
github.com/ipfs/go-filestore v1.0.0
|
||||
github.com/ipfs/go-fs-lock v0.0.6
|
||||
github.com/ipfs/go-graphsync v0.5.1
|
||||
github.com/ipfs/go-graphsync v0.5.2
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.3
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.5
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||
@ -104,7 +104,7 @@ require (
|
||||
github.com/libp2p/go-libp2p-mplex v0.3.0
|
||||
github.com/libp2p/go-libp2p-noise v0.1.2
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.6
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.1
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.9.0
|
||||
github.com/libp2p/go-libp2p-record v0.1.3
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
||||
@ -124,7 +124,7 @@ require (
|
||||
github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/raulk/clock v1.1.0
|
||||
github.com/raulk/go-watchdog v0.0.1
|
||||
github.com/raulk/go-watchdog v1.0.1
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/supranational/blst v0.1.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
|
37
go.sum
37
go.sum
@ -129,6 +129,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
@ -145,6 +146,8 @@ github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbL
|
||||
github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@ -155,8 +158,8 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
@ -255,8 +258,8 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMX
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
|
||||
github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno=
|
||||
github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo=
|
||||
github.com/filecoin-project/go-data-transfer v1.2.3 h1:rM/HgGOOMsKvmeQjY7CVR3v7Orxf04LJSSczSpGlhg4=
|
||||
github.com/filecoin-project/go-data-transfer v1.2.3/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY=
|
||||
github.com/filecoin-project/go-data-transfer v1.2.7 h1:WE5Cpp9eMt5BDoWOVR64QegSn6bwHQaDzyyjVU377Y0=
|
||||
github.com/filecoin-project/go-data-transfer v1.2.7/go.mod h1:mvjZ+C3NkBX10JP4JMu27DCjUouHFjHwUGh+Xc4yvDA=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
|
||||
@ -264,8 +267,8 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.10 h1:1QunPsgApTLNXVlaXoPMxyrMtOsMLPOQq3RUjGRmgVI=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.10/go.mod h1:tcXby9CsTNuHu19dH05YZ5pNDsoYcQXSrbkxzVeMJrY=
|
||||
github.com/filecoin-project/go-fil-markets v1.1.2 h1:5FVdDmF9GvW6Xllql9OGiJXEZjh/tu590BXSQH2W/vU=
|
||||
github.com/filecoin-project/go-fil-markets v1.1.2/go.mod h1:6oTRaAsHnCqhi3mpZqdvnWIzH6QzHQc4dbhJrI9/BfQ=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
|
||||
@ -394,6 +397,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -559,10 +564,8 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28
|
||||
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
|
||||
github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0=
|
||||
github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY=
|
||||
github.com/ipfs/go-graphsync v0.5.0 h1:iaByvxq88Ys1KcaQzTS1wmRhNsNEo3SaUiSGqTSbGmM=
|
||||
github.com/ipfs/go-graphsync v0.5.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
|
||||
github.com/ipfs/go-graphsync v0.5.1 h1:4fXBRvRKicTgTmCFMmEua/H5jvmAOLgU9Z7PCPWt2ec=
|
||||
github.com/ipfs/go-graphsync v0.5.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
|
||||
github.com/ipfs/go-graphsync v0.5.2 h1:USD+daaSC+7pLHCxROThSaF6SF7WYXF03sjrta0rCfA=
|
||||
github.com/ipfs/go-graphsync v0.5.2/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
|
||||
github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk=
|
||||
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||
@ -925,8 +928,8 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1
|
||||
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.0 h1:YNVRyXqBgv9i4RG88jzoTtkSOaSB45CqHkL29NNBZb4=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.0/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.1 h1:j4umIg5nyus+sqNfU+FWvb9aeYFQH/A+nDFhWj+8yy8=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.1/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E=
|
||||
@ -1238,6 +1241,8 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc=
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df h1:vdYtBU6zvL7v+Tr+0xFM/qhahw/EvY8DMMunZHKH6eE=
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
|
||||
@ -1315,8 +1320,8 @@ github.com/prometheus/procfs v0.1.0 h1:jhMy6QXfi3y2HEzFoyuCj40z4OZIIHHPtFyCMftmv
|
||||
github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y=
|
||||
github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0=
|
||||
github.com/raulk/go-watchdog v0.0.1 h1:q0ad0fanW8uaLRTvxQ0RfdADBiKa6CL6NMByhB0vpBs=
|
||||
github.com/raulk/go-watchdog v0.0.1/go.mod h1:dIvQcKy0laxuHGda1ms8/2T9wE3ZJRbz9bxEO7c0q1M=
|
||||
github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4=
|
||||
github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
@ -1368,6 +1373,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
|
||||
@ -1428,6 +1435,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
|
||||
github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4=
|
||||
github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
|
||||
|
@ -183,6 +183,8 @@ func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
|
||||
require.True(t, ok)
|
||||
|
||||
cancel()
|
||||
// pull one value out to avoid race
|
||||
_, _ = <-ch
|
||||
|
||||
v, ok = <-ch
|
||||
require.Equal(t, cid.Undef, v)
|
||||
|
@ -12,22 +12,22 @@ var log = logging.Logger("markets")
|
||||
|
||||
// StorageClientLogger logs events from the storage client
|
||||
func StorageClientLogger(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
|
||||
log.Infow("storage event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message)
|
||||
log.Infow("storage client event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message)
|
||||
}
|
||||
|
||||
// StorageProviderLogger logs events from the storage provider
|
||||
func StorageProviderLogger(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
|
||||
log.Infow("storage event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message)
|
||||
log.Infow("storage provider event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message)
|
||||
}
|
||||
|
||||
// RetrievalClientLogger logs events from the retrieval client
|
||||
func RetrievalClientLogger(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
|
||||
log.Infow("retrieval event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
|
||||
log.Infow("retrieval client event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
|
||||
}
|
||||
|
||||
// RetrievalProviderLogger logs events from the retrieval provider
|
||||
func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
|
||||
log.Infow("retrieval event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
|
||||
log.Infow("retrieval provider event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
|
||||
}
|
||||
|
||||
// DataTransferLogger logs events from the data transfer module
|
||||
|
53
markets/storageadapter/api.go
Normal file
53
markets/storageadapter/api.go
Normal file
@ -0,0 +1,53 @@
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/apibstore"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type apiWrapper struct {
|
||||
api interface {
|
||||
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
|
||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
}
|
||||
}
|
||||
|
||||
func (ca *apiWrapper) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) {
|
||||
store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(ca.api)))
|
||||
|
||||
preAct, err := ca.api.StateGetActor(ctx, actor, pre)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting pre actor: %w", err)
|
||||
}
|
||||
curAct, err := ca.api.StateGetActor(ctx, actor, cur)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting cur actor: %w", err)
|
||||
}
|
||||
|
||||
preSt, err := miner.Load(store, preAct)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading miner actor: %w", err)
|
||||
}
|
||||
curSt, err := miner.Load(store, curAct)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading miner actor: %w", err)
|
||||
}
|
||||
|
||||
diff, err := miner.DiffPreCommits(preSt, curSt)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("diff precommits: %w", err)
|
||||
}
|
||||
|
||||
return diff, err
|
||||
}
|
@ -34,9 +34,8 @@ import (
|
||||
)
|
||||
|
||||
type ClientNodeAdapter struct {
|
||||
full.StateAPI
|
||||
full.ChainAPI
|
||||
full.MpoolAPI
|
||||
*clientApi
|
||||
*apiWrapper
|
||||
|
||||
fundmgr *market.FundManager
|
||||
ev *events.Events
|
||||
@ -46,14 +45,14 @@ type ClientNodeAdapter struct {
|
||||
type clientApi struct {
|
||||
full.ChainAPI
|
||||
full.StateAPI
|
||||
full.MpoolAPI
|
||||
}
|
||||
|
||||
func NewClientNodeAdapter(stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) storagemarket.StorageClientNode {
|
||||
capi := &clientApi{chain, stateapi}
|
||||
capi := &clientApi{chain, stateapi, mpool}
|
||||
return &ClientNodeAdapter{
|
||||
StateAPI: stateapi,
|
||||
ChainAPI: chain,
|
||||
MpoolAPI: mpool,
|
||||
clientApi: capi,
|
||||
apiWrapper: &apiWrapper{api: capi},
|
||||
|
||||
fundmgr: fundmgr,
|
||||
ev: events.NewEvents(context.TODO(), capi),
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
@ -18,47 +19,49 @@ type getCurrentDealInfoAPI interface {
|
||||
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
||||
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error)
|
||||
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error)
|
||||
|
||||
diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error)
|
||||
}
|
||||
|
||||
// GetCurrentDealInfo gets current information on a deal, and corrects the deal ID as needed
|
||||
func GetCurrentDealInfo(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (abi.DealID, *api.MarketDeal, error) {
|
||||
func GetCurrentDealInfo(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (abi.DealID, *api.MarketDeal, types.TipSetKey, error) {
|
||||
marketDeal, dealErr := api.StateMarketStorageDeal(ctx, dealID, ts.Key())
|
||||
if dealErr == nil {
|
||||
equal, err := checkDealEquality(ctx, ts, api, proposal, marketDeal.Proposal)
|
||||
if err != nil {
|
||||
return dealID, nil, err
|
||||
return dealID, nil, types.EmptyTSK, err
|
||||
}
|
||||
if equal {
|
||||
return dealID, marketDeal, nil
|
||||
return dealID, marketDeal, types.EmptyTSK, nil
|
||||
}
|
||||
dealErr = xerrors.Errorf("Deal proposals did not match")
|
||||
}
|
||||
if publishCid == nil {
|
||||
return dealID, nil, dealErr
|
||||
return dealID, nil, types.EmptyTSK, dealErr
|
||||
}
|
||||
// attempt deal id correction
|
||||
lookup, err := api.StateSearchMsg(ctx, *publishCid)
|
||||
if err != nil {
|
||||
return dealID, nil, err
|
||||
return dealID, nil, types.EmptyTSK, err
|
||||
}
|
||||
|
||||
if lookup.Receipt.ExitCode != exitcode.Ok {
|
||||
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", *publishCid, lookup.Receipt.ExitCode)
|
||||
return dealID, nil, types.EmptyTSK, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", *publishCid, lookup.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
var retval market.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil {
|
||||
return dealID, nil, xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err)
|
||||
return dealID, nil, types.EmptyTSK, xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err)
|
||||
}
|
||||
|
||||
if len(retval.IDs) != 1 {
|
||||
// market currently only ever sends messages with 1 deal
|
||||
return dealID, nil, xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal")
|
||||
return dealID, nil, types.EmptyTSK, xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal")
|
||||
}
|
||||
|
||||
if retval.IDs[0] == dealID {
|
||||
// DealID did not change, so we are stuck with the original lookup error
|
||||
return dealID, nil, dealErr
|
||||
return dealID, nil, lookup.TipSet, dealErr
|
||||
}
|
||||
|
||||
dealID = retval.IDs[0]
|
||||
@ -67,13 +70,13 @@ func GetCurrentDealInfo(ctx context.Context, ts *types.TipSet, api getCurrentDea
|
||||
if err == nil {
|
||||
equal, err := checkDealEquality(ctx, ts, api, proposal, marketDeal.Proposal)
|
||||
if err != nil {
|
||||
return dealID, nil, err
|
||||
return dealID, nil, types.EmptyTSK, err
|
||||
}
|
||||
if !equal {
|
||||
return dealID, nil, xerrors.Errorf("Deal proposals did not match")
|
||||
return dealID, nil, types.EmptyTSK, xerrors.Errorf("Deal proposals did not match")
|
||||
}
|
||||
}
|
||||
return dealID, marketDeal, err
|
||||
return dealID, marketDeal, lookup.TipSet, err
|
||||
}
|
||||
|
||||
func checkDealEquality(ctx context.Context, ts *types.TipSet, api getCurrentDealInfoAPI, p1, p2 market.DealProposal) (bool, error) {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
test "github.com/filecoin-project/lotus/chain/events/state/mock"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -209,7 +210,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
|
||||
MarketDeals: marketDeals,
|
||||
}
|
||||
|
||||
dealID, marketDeal, err := GetCurrentDealInfo(ctx, ts, api, startDealID, proposal, data.publishCid)
|
||||
dealID, marketDeal, _, err := GetCurrentDealInfo(ctx, ts, api, startDealID, proposal, data.publishCid)
|
||||
require.Equal(t, data.expectedDealID, dealID)
|
||||
require.Equal(t, data.expectedMarketDeal, marketDeal)
|
||||
if data.expectedError == nil {
|
||||
@ -236,6 +237,10 @@ type mockGetCurrentDealInfoAPI struct {
|
||||
MarketDeals map[marketDealKey]*api.MarketDeal
|
||||
}
|
||||
|
||||
func (mapi *mockGetCurrentDealInfoAPI) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) {
|
||||
return &miner.PreCommitChanges{}, nil
|
||||
}
|
||||
|
||||
func (mapi *mockGetCurrentDealInfoAPI) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, ts types.TipSetKey) (*api.MarketDeal, error) {
|
||||
deal, ok := mapi.MarketDeals[marketDealKey{dealID, ts}]
|
||||
if !ok {
|
||||
|
@ -5,16 +5,18 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type sectorCommittedEventsAPI interface {
|
||||
@ -32,7 +34,7 @@ func OnDealSectorPreCommitted(ctx context.Context, api getCurrentDealInfoAPI, ev
|
||||
|
||||
// First check if the deal is already active, and if so, bail out
|
||||
checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) {
|
||||
isActive, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid)
|
||||
di, isActive, publishTs, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid)
|
||||
if err != nil {
|
||||
// Note: the error returned from here will end up being returned
|
||||
// from OnDealSectorPreCommitted so no need to call the callback
|
||||
@ -46,6 +48,36 @@ func OnDealSectorPreCommitted(ctx context.Context, api getCurrentDealInfoAPI, ev
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// Check that precommits which landed between when the deal was published
|
||||
// and now don't already contain the deal we care about.
|
||||
// (this can happen when the precommit lands vary quickly (in tests), or
|
||||
// when the client node was down after the deal was published, and when
|
||||
// the precommit containing it landed on chain)
|
||||
|
||||
if publishTs == types.EmptyTSK {
|
||||
lookup, err := api.StateSearchMsg(ctx, *publishCid)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
if lookup != nil { // can be nil in tests
|
||||
publishTs = lookup.TipSet
|
||||
}
|
||||
}
|
||||
|
||||
diff, err := api.diffPreCommits(ctx, provider, publishTs, ts.Key())
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
for _, info := range diff.Added {
|
||||
for _, d := range info.Info.DealIDs {
|
||||
if d == di {
|
||||
cb(info.Info.SectorNumber, false, nil)
|
||||
return true, false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Not yet active, start matching against incoming messages
|
||||
return false, true, nil
|
||||
}
|
||||
@ -88,7 +120,7 @@ func OnDealSectorPreCommitted(ctx context.Context, api getCurrentDealInfoAPI, ev
|
||||
|
||||
// When the deal is published, the deal ID may change, so get the
|
||||
// current deal ID from the publish message CID
|
||||
dealID, _, err = GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid)
|
||||
dealID, _, _, err = GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -130,7 +162,7 @@ func OnDealSectorCommitted(ctx context.Context, api getCurrentDealInfoAPI, event
|
||||
|
||||
// First check if the deal is already active, and if so, bail out
|
||||
checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) {
|
||||
isActive, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid)
|
||||
_, isActive, _, err := checkIfDealAlreadyActive(ctx, api, ts, dealID, proposal, publishCid)
|
||||
if err != nil {
|
||||
// Note: the error returned from here will end up being returned
|
||||
// from OnDealSectorCommitted so no need to call the callback
|
||||
@ -186,7 +218,7 @@ func OnDealSectorCommitted(ctx context.Context, api getCurrentDealInfoAPI, event
|
||||
}
|
||||
|
||||
// Get the deal info
|
||||
_, sd, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid)
|
||||
_, sd, _, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to look up deal on chain: %w", err)
|
||||
}
|
||||
@ -216,22 +248,22 @@ func OnDealSectorCommitted(ctx context.Context, api getCurrentDealInfoAPI, event
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkIfDealAlreadyActive(ctx context.Context, api getCurrentDealInfoAPI, ts *types.TipSet, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (bool, error) {
|
||||
_, sd, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid)
|
||||
func checkIfDealAlreadyActive(ctx context.Context, api getCurrentDealInfoAPI, ts *types.TipSet, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid) (abi.DealID, bool, types.TipSetKey, error) {
|
||||
di, sd, publishTs, err := GetCurrentDealInfo(ctx, ts, api, dealID, proposal, publishCid)
|
||||
if err != nil {
|
||||
// TODO: This may be fine for some errors
|
||||
return false, xerrors.Errorf("failed to look up deal on chain: %w", err)
|
||||
return 0, false, types.EmptyTSK, xerrors.Errorf("failed to look up deal on chain: %w", err)
|
||||
}
|
||||
|
||||
// Sector with deal is already active
|
||||
if sd.State.SectorStartEpoch > 0 {
|
||||
return true, nil
|
||||
return 0, true, publishTs, nil
|
||||
}
|
||||
|
||||
// Sector was slashed
|
||||
if sd.State.SlashEpoch > 0 {
|
||||
return false, xerrors.Errorf("deal %d was slashed at epoch %d", dealID, sd.State.SlashEpoch)
|
||||
return 0, false, types.EmptyTSK, xerrors.Errorf("deal %d was slashed at epoch %d", dealID, sd.State.SlashEpoch)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return di, false, publishTs, nil
|
||||
}
|
||||
|
@ -161,8 +161,7 @@ func TestOnDealSectorPreCommitted(t *testing.T) {
|
||||
deals: map[abi.DealID]*api.MarketDeal{},
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBError: errors.New("handling applied event: something went wrong"),
|
||||
expectedCBCallCount: 0,
|
||||
expectedError: errors.New("failed to set up called handler: something went wrong"),
|
||||
},
|
||||
"proposed deal epoch timeout": {
|
||||
|
@ -41,6 +41,7 @@ var log = logging.Logger("storageadapter")
|
||||
|
||||
type ProviderNodeAdapter struct {
|
||||
api.FullNode
|
||||
*apiWrapper
|
||||
|
||||
// this goes away with the data transfer module
|
||||
dag dtypes.StagingDAG
|
||||
@ -55,7 +56,8 @@ type ProviderNodeAdapter struct {
|
||||
func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode {
|
||||
return func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode {
|
||||
na := &ProviderNodeAdapter{
|
||||
FullNode: full,
|
||||
FullNode: full,
|
||||
apiWrapper: &apiWrapper{api: full},
|
||||
|
||||
dag: dag,
|
||||
secb: secb,
|
||||
|
@ -69,6 +69,7 @@ type SealingConfig struct {
|
||||
type MinerFeeConfig struct {
|
||||
MaxPreCommitGasFee types.FIL
|
||||
MaxCommitGasFee types.FIL
|
||||
MaxTerminateGasFee types.FIL
|
||||
MaxWindowPoStGasFee types.FIL
|
||||
MaxPublishDealsFee types.FIL
|
||||
MaxMarketBalanceAddFee types.FIL
|
||||
@ -211,6 +212,7 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
Fees: MinerFeeConfig{
|
||||
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
|
||||
MaxCommitGasFee: types.MustParseFIL("0.05"),
|
||||
MaxTerminateGasFee: types.MustParseFIL("0.5"),
|
||||
MaxWindowPoStGasFee: types.MustParseFIL("5"),
|
||||
MaxPublishDealsFee: types.MustParseFIL("0.05"),
|
||||
MaxMarketBalanceAddFee: types.MustParseFIL("0.007"),
|
||||
|
@ -93,13 +93,15 @@ type gasMeta struct {
|
||||
limit int64
|
||||
}
|
||||
|
||||
// finds 55th percntile instead of median to put negative pressure on gas price
|
||||
func medianGasPremium(prices []gasMeta, blocks int) abi.TokenAmount {
|
||||
sort.Slice(prices, func(i, j int) bool {
|
||||
// sort desc by price
|
||||
return prices[i].price.GreaterThan(prices[j].price)
|
||||
})
|
||||
|
||||
at := build.BlockGasTarget * int64(blocks) / 2
|
||||
at := build.BlockGasTarget * int64(blocks) / 2 // 50th
|
||||
at += build.BlockGasTarget * int64(blocks) / (2 * 20) // move 5% further
|
||||
prev1, prev2 := big.Zero(), big.Zero()
|
||||
for _, price := range prices {
|
||||
prev1, prev2 = price.price, prev1
|
||||
@ -227,6 +229,9 @@ func gasEstimateGasLimit(
|
||||
pending, ts := mpool.PendingFor(fromA)
|
||||
priorMsgs := make([]types.ChainMsg, 0, len(pending))
|
||||
for _, m := range pending {
|
||||
if m.Message.Nonce == msg.Nonce {
|
||||
break
|
||||
}
|
||||
priorMsgs = append(priorMsgs, m)
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,7 @@ type StateModuleAPI interface {
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||
StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error)
|
||||
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
|
||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
|
||||
}
|
||||
@ -589,8 +590,14 @@ func stateWaitMsgLimited(ctx context.Context, smgr *stmgr.StateManager, cstore *
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *StateAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
|
||||
ts, recpt, found, err := a.StateManager.SearchForMessage(ctx, msg)
|
||||
func (m *StateModule) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
|
||||
return stateSearchMsgLimited(ctx, m.StateManager, msg, stmgr.LookbackNoLimit)
|
||||
}
|
||||
func (a *StateAPI) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error) {
|
||||
return stateSearchMsgLimited(ctx, a.StateManager, msg, lookbackLimit)
|
||||
}
|
||||
func stateSearchMsgLimited(ctx context.Context, smgr *stmgr.StateManager, msg cid.Cid, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error) {
|
||||
ts, recpt, found, err := smgr.SearchForMessage(ctx, msg, lookbackLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -3,21 +3,49 @@ package market
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
|
||||
type MarketAPI struct {
|
||||
fx.In
|
||||
|
||||
full.MpoolAPI
|
||||
FMgr *market.FundManager
|
||||
}
|
||||
|
||||
func (a *MarketAPI) MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
params, err := actors.SerializeParams(&addr)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
smsg, aerr := a.MpoolPushMessage(ctx, &types.Message{
|
||||
To: marketactor.Address,
|
||||
From: wallet,
|
||||
Value: amt,
|
||||
Method: marketactor.Methods.AddBalance,
|
||||
Params: params,
|
||||
}, nil)
|
||||
|
||||
if aerr != nil {
|
||||
return cid.Undef, aerr
|
||||
}
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func (a *MarketAPI) MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) {
|
||||
return a.FMgr.GetReserved(addr), nil
|
||||
}
|
||||
|
||||
func (a *MarketAPI) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return a.FMgr.Reserve(ctx, wallet, addr, amt)
|
||||
}
|
||||
|
@ -328,6 +328,18 @@ func (sm *StorageMinerAPI) SectorRemove(ctx context.Context, id abi.SectorNumber
|
||||
return sm.Miner.RemoveSector(ctx, id)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorTerminate(ctx context.Context, id abi.SectorNumber) error {
|
||||
return sm.Miner.TerminateSector(ctx, id)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) {
|
||||
return sm.Miner.TerminateFlush(ctx)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) {
|
||||
return sm.Miner.TerminatePending(ctx)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error {
|
||||
return sm.Miner.MarkForUpgrade(id)
|
||||
}
|
||||
|
@ -120,7 +120,11 @@ func RegisterClientValidator(crv dtypes.ClientRequestValidator, dtm dtypes.Clien
|
||||
// uses the clients's Client DAG service for transfers
|
||||
func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientDataTransfer, error) {
|
||||
sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/client/counter"))
|
||||
net := dtnet.NewFromLibp2pHost(h)
|
||||
|
||||
// go-data-transfer protocol retries:
|
||||
// 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour
|
||||
dtRetryParams := dtnet.RetryParameters(time.Second, 5*time.Minute, 15, 5)
|
||||
net := dtnet.NewFromLibp2pHost(h, dtRetryParams)
|
||||
|
||||
dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers"))
|
||||
transport := dtgstransport.NewTransport(h.ID(), gs)
|
||||
@ -129,7 +133,9 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc)
|
||||
// data-transfer push channel restart configuration
|
||||
dtRestartConfig := dtimpl.PushChannelRestartConfig(time.Minute, 10, 1024, 10*time.Minute, 3)
|
||||
dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc, dtRestartConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -153,7 +159,11 @@ func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore {
|
||||
}
|
||||
|
||||
func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, j journal.Journal) (storagemarket.StorageClient, error) {
|
||||
net := smnet.NewFromLibp2pHost(h)
|
||||
// go-fil-markets protocol retries:
|
||||
// 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour
|
||||
marketsRetryParams := smnet.RetryParameters(time.Second, 5*time.Minute, 15, 5)
|
||||
net := smnet.NewFromLibp2pHost(h, marketsRetryParams)
|
||||
|
||||
c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, storageimpl.DealPollingInterval(time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p-core/peerstore"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/raulk/go-watchdog"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -28,7 +30,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/system"
|
||||
"github.com/raulk/go-watchdog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -69,45 +70,71 @@ func MemoryConstraints() system.MemoryConstraints {
|
||||
|
||||
// MemoryWatchdog starts the memory watchdog, applying the computed resource
|
||||
// constraints.
|
||||
func MemoryWatchdog(lc fx.Lifecycle, constraints system.MemoryConstraints) {
|
||||
func MemoryWatchdog(lr repo.LockedRepo, lc fx.Lifecycle, constraints system.MemoryConstraints) {
|
||||
if os.Getenv(EnvWatchdogDisabled) == "1" {
|
||||
log.Infof("memory watchdog is disabled via %s", EnvWatchdogDisabled)
|
||||
return
|
||||
}
|
||||
|
||||
cfg := watchdog.MemConfig{
|
||||
Resolution: 5 * time.Second,
|
||||
Policy: &watchdog.WatermarkPolicy{
|
||||
Watermarks: []float64{0.50, 0.60, 0.70, 0.85, 0.90, 0.925, 0.95},
|
||||
EmergencyWatermark: 0.95,
|
||||
},
|
||||
Logger: logWatchdog,
|
||||
// configure heap profile capture so that one is captured per episode where
|
||||
// utilization climbs over 90% of the limit. A maximum of 10 heapdumps
|
||||
// will be captured during life of this process.
|
||||
watchdog.HeapProfileDir = filepath.Join(lr.Path(), "heapprof")
|
||||
watchdog.HeapProfileMaxCaptures = 10
|
||||
watchdog.HeapProfileThreshold = 0.9
|
||||
watchdog.Logger = logWatchdog
|
||||
|
||||
policy := watchdog.NewWatermarkPolicy(0.50, 0.60, 0.70, 0.85, 0.90, 0.925, 0.95)
|
||||
|
||||
// Try to initialize a watchdog in the following order of precedence:
|
||||
// 1. If a max heap limit has been provided, initialize a heap-driven watchdog.
|
||||
// 2. Else, try to initialize a cgroup-driven watchdog.
|
||||
// 3. Else, try to initialize a system-driven watchdog.
|
||||
// 4. Else, log a warning that the system is flying solo, and return.
|
||||
|
||||
addStopHook := func(stopFn func()) {
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
stopFn()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// if user has set max heap limit, apply it. Otherwise, fall back to total
|
||||
// system memory constraint.
|
||||
// 1. If user has set max heap limit, apply it.
|
||||
if maxHeap := constraints.MaxHeapMem; maxHeap != 0 {
|
||||
log.Infof("memory watchdog will apply max heap constraint: %d bytes", maxHeap)
|
||||
cfg.Limit = maxHeap
|
||||
cfg.Scope = watchdog.ScopeHeap
|
||||
} else {
|
||||
log.Infof("max heap size not provided; memory watchdog will apply total system memory constraint: %d bytes", constraints.TotalSystemMem)
|
||||
cfg.Limit = constraints.TotalSystemMem
|
||||
cfg.Scope = watchdog.ScopeSystem
|
||||
const minGOGC = 10
|
||||
err, stopFn := watchdog.HeapDriven(maxHeap, minGOGC, policy)
|
||||
if err == nil {
|
||||
log.Infof("initialized heap-driven watchdog; max heap: %d bytes", maxHeap)
|
||||
addStopHook(stopFn)
|
||||
return
|
||||
}
|
||||
log.Warnf("failed to initialize heap-driven watchdog; err: %s", err)
|
||||
log.Warnf("trying a cgroup-driven watchdog")
|
||||
}
|
||||
|
||||
err, stop := watchdog.Memory(cfg)
|
||||
if err != nil {
|
||||
log.Warnf("failed to instantiate memory watchdog: %s", err)
|
||||
// 2. cgroup-driven watchdog.
|
||||
err, stopFn := watchdog.CgroupDriven(5*time.Second, policy)
|
||||
if err == nil {
|
||||
log.Infof("initialized cgroup-driven watchdog")
|
||||
addStopHook(stopFn)
|
||||
return
|
||||
}
|
||||
log.Warnf("failed to initialize cgroup-driven watchdog; err: %s", err)
|
||||
log.Warnf("trying a system-driven watchdog")
|
||||
|
||||
// 3. system-driven watchdog.
|
||||
err, stopFn = watchdog.SystemDriven(0, 5*time.Second, policy) // 0 calculates the limit automatically.
|
||||
if err == nil {
|
||||
log.Infof("initialized system-driven watchdog")
|
||||
addStopHook(stopFn)
|
||||
return
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
stop()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
// 4. log the failure
|
||||
log.Warnf("failed to initialize system-driven watchdog; err: %s", err)
|
||||
log.Warnf("system running without a memory watchdog")
|
||||
}
|
||||
|
||||
type JwtPayload struct {
|
||||
@ -166,7 +193,7 @@ func BuiltinBootstrap() (dtypes.BootstrapPeers, error) {
|
||||
|
||||
func DrandBootstrap(ds dtypes.DrandSchedule) (dtypes.DrandBootstrap, error) {
|
||||
// TODO: retry resolving, don't fail if at least one resolve succeeds
|
||||
res := []peer.AddrInfo{}
|
||||
var res []peer.AddrInfo
|
||||
for _, d := range ds {
|
||||
addrs, err := addrutil.ParseAddresses(context.TODO(), d.Config.Relays)
|
||||
if err != nil {
|
||||
|
@ -164,6 +164,20 @@ func TestWindowedPost(t *testing.T) {
|
||||
test.TestWindowPost(t, builder.MockSbBuilder, 2*time.Millisecond, 10)
|
||||
}
|
||||
|
||||
func TestTerminate(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestTerminate(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestCCUpgrade(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
|
@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
@ -14,6 +12,8 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
|
||||
@ -243,6 +243,15 @@ func (s SealingAPIAdapter) StateSectorPartition(ctx context.Context, maddr addre
|
||||
return nil, nil // not found
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerPartitions(ctx context.Context, maddr address.Address, dlIdx uint64, tok sealing.TipSetToken) ([]api.Partition, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||
}
|
||||
|
||||
return s.delegate.StateMinerPartitions(ctx, maddr, dlIdx, tsk)
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok sealing.TipSetToken) (market.DealProposal, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
@ -266,6 +275,15 @@ func (s SealingAPIAdapter) StateNetworkVersion(ctx context.Context, tok sealing.
|
||||
return s.delegate.StateNetworkVersion(ctx, tsk)
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerProvingDeadline(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (*dline.Info, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.delegate.StateMinerProvingDeadline(ctx, maddr, tsk)
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) {
|
||||
msg := types.Message{
|
||||
To: to,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user