diff --git a/.circleci/config.yml b/.circleci/config.yml
index d49d40bf9..acd447f69 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -225,12 +225,17 @@ jobs:
cd extern/test-vectors
git fetch
git checkout origin/<< parameters.vectors-branch >>
- - run:
- name: go get vectors branch
- command: go get github.com/filecoin-project/test-vectors@<< parameters.vectors-branch >>
- go/install-gotestsum:
gobin: $HOME/.local/bin
version: 0.5.2
+ - run:
+ name: install statediff globally
+ command: |
+ ## statediff is optional; we succeed even if compilation fails.
+ mkdir -p /tmp/statediff
+ git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
+ cd /tmp/statediff
+ go install ./cmd/statediff || exit 0
- run:
name: go test
environment:
@@ -249,6 +254,25 @@ jobs:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/conformance-coverage.html
+ build-lotus-soup:
+ description: |
+ Compile `lotus-soup` Testground test plan using the current version of Lotus.
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: cd extern/oni && git submodule sync
+ - run: cd extern/oni && git submodule update --init
+ - run: cd extern/filecoin-ffi && make
+ - run:
+ name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
+ command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst
+ - run:
+ name: "build lotus-soup testplan"
+ command: pushd extern/oni/lotus-soup && go build -tags=testground .
+
build-macos:
description: build darwin lotus binary
@@ -395,7 +419,8 @@ workflows:
version: 2.1
ci:
jobs:
- - lint-all
+ - lint-all:
+ concurrency: "16" # expend all docker 2xlarge CPUs.
- mod-tidy-check
- gofmt
- cbor-gen-check
@@ -422,6 +447,7 @@ workflows:
test-suite-name: conformance-bleeding-edge
packages: "./conformance"
vectors-branch: master
+ - build-lotus-soup
- build-debug
- build-all:
requires:
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000..6d717b44d
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,16 @@
+## filecoin-project/lotus CODEOWNERS
+## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners.
+##
+## These users or groups will be automatically assigned as reviewers every time
+## a PR is submitted that modifies code in the specified locations.
+##
+## The Lotus repo configuration requires that at least ONE codeowner approves
+## the PR before merging.
+
+### Global owners.
+* @magik6k @whyrusleeping @Kubuxu
+
+### Conformance testing.
+conformance/ @raulk
+extern/test-vectors @raulk
+cmd/tvx @raulk
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index a1d152294..fd51881b7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,8 +10,9 @@
/lotus-fountain
/lotus-stats
/lotus-bench
-/lotus-wallet
+/lotus-gateway
/lotus-pcr
+/lotus-wallet
/bench.json
/lotuspond/front/node_modules
/lotuspond/front/build
diff --git a/.gitmodules b/.gitmodules
index ad09aba35..35f5a3d3f 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -8,3 +8,9 @@
[submodule "extern/test-vectors"]
path = extern/test-vectors
url = https://github.com/filecoin-project/test-vectors.git
+[submodule "extern/fil-blst"]
+ path = extern/fil-blst
+ url = https://github.com/filecoin-project/fil-blst.git
+[submodule "extern/oni"]
+ path = extern/oni
+ url = https://github.com/filecoin-project/oni
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ccefeda10..4f6f0b3a0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,449 @@
# Lotus changelog
+# 0.9.0 / 2020-10-07
+
+This consensus-breaking release of Lotus upgrades the actors version to v2.0.0. This requires migrating actor state from v0 to v2. The changes that break consensus are:
+
+- Introducing v2 actors and its migration (https://github.com/filecoin-project/lotus/pull/3936)
+- Runtime's Receiver() should only return ID addresses (https://github.com/filecoin-project/lotus/pull/3589)
+- Update miner eligibility checks for v2 actors (https://github.com/filecoin-project/lotus/pull/4188)
+- Add funds that have left FilReserve to circ supply (https://github.com/filecoin-project/lotus/pull/4160)
+- Set WinningPoStSectorSetLookback to finality post-v2 actors (https://github.com/filecoin-project/lotus/pull/4190)
+- fix: error when actor panics directly (https://github.com/filecoin-project/lotus/pull/3697)
+
+## Changes
+
+#### Dependencies
+
+- Update go-bitfield (https://github.com/filecoin-project/lotus/pull/4171)
+- update the AMT implementation (https://github.com/filecoin-project/lotus/pull/4194)
+- Update to actors v0.2.1 (https://github.com/filecoin-project/lotus/pull/4199)
+
+#### Core Lotus
+
+- Paych: fix voucher amount verification (https://github.com/filecoin-project/lotus/pull/3821)
+- Cap market provider messages (https://github.com/filecoin-project/lotus/pull/4141)
+- Run fork function after cron for null block safety (https://github.com/filecoin-project/lotus/pull/4114)
+- use bitswap sessions when fetching messages, and cancel them (https://github.com/filecoin-project/lotus/pull/4142)
+- relax pubsub IPColocationFactorThreshold to 5 (https://github.com/filecoin-project/lotus/pull/4183)
+- Support addresses with mainnet prefixes (https://github.com/filecoin-project/lotus/pull/4186)
+- fix: make message signer nonce generation transactional (https://github.com/filecoin-project/lotus/pull/4165)
+- build: Env var to keep test address output (https://github.com/filecoin-project/lotus/pull/4213)
+- make vm.EnableGasTracing public (https://github.com/filecoin-project/lotus/pull/4214)
+- introduce separate state-tree versions (https://github.com/filecoin-project/lotus/pull/4197)
+- reject explicit "calls" at the upgrade height (https://github.com/filecoin-project/lotus/pull/4231)
+- return an illegal actor error when we see an unsupported actor version (https://github.com/filecoin-project/lotus/pull/4232)
+- Set head should unmark blocks as valid (https://gist.github.com/travisperson/3c7cddd77a33979a519ccef4e6515f20)
+
+#### Mining
+
+- Increased ExpectedSealDuration and and WaitDealsDelay (https://github.com/filecoin-project/lotus/pull/3743)
+- Miner backup/restore commands (https://github.com/filecoin-project/lotus/pull/4133)
+- lotus-miner: add more help text to storage / attach (https://github.com/filecoin-project/lotus/pull/3961)
+- Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173)
+- feat(miner): add miner deadline diffing logic (https://github.com/filecoin-project/lotus/pull/4178)
+
+#### UX
+
+- Improve the UX for replacing messages (https://github.com/filecoin-project/lotus/pull/4134)
+- Add verified flag to interactive deal creation (https://github.com/filecoin-project/lotus/pull/4145)
+- Add command to (slowly) prune lotus chain datastore (https://github.com/filecoin-project/lotus/pull/3876)
+- Some helpers for verifreg work (https://github.com/filecoin-project/lotus/pull/4124)
+- Always use default 720h for setask duration and hide the duration param option (https://github.com/filecoin-project/lotus/pull/4077)
+- Convert ID addresses to key addresses before checking wallet (https://github.com/filecoin-project/lotus/pull/4122)
+- add a command to view block space utilization (https://github.com/filecoin-project/lotus/pull/4176)
+- allow usage inspection on a chain segment (https://github.com/filecoin-project/lotus/pull/4177)
+- Add mpool stats for base fee (https://github.com/filecoin-project/lotus/pull/4170)
+- Add verified status to api.DealInfo (https://github.com/filecoin-project/lotus/pull/4153)
+- Add a CLI command to set a miner's owner address (https://github.com/filecoin-project/lotus/pull/4189)
+
+#### Tooling and validation
+
+- Lotus-pcr: add recover-miners command (https://github.com/filecoin-project/lotus/pull/3714)
+- MpoolPushUntrusted API for gateway (https://github.com/filecoin-project/lotus/pull/3915)
+- Test lotus-miner info all (https://github.com/filecoin-project/lotus/pull/4166)
+- chain export: Error with unfinished exports (https://github.com/filecoin-project/lotus/pull/4179)
+- add printf in TestWindowPost (https://github.com/filecoin-project/lotus/pull/4043)
+- add trace wdpost (https://github.com/filecoin-project/lotus/pull/4020)
+- Fix noncefix (https://github.com/filecoin-project/lotus/pull/4202)
+- Lotus-pcr: Limit the fee cap of messages we will process, refund gas fees for windowed post and storage deals (https://github.com/filecoin-project/lotus/pull/4198)
+- Fix pond (https://github.com/filecoin-project/lotus/pull/4203)
+- allow manual setting of noncefix fee cap (https://github.com/filecoin-project/lotus/pull/4205)
+- implement command to get execution traces of any message (https://github.com/filecoin-project/lotus/pull/4200)
+- conformance: minor driver refactors (https://github.com/filecoin-project/lotus/pull/4211)
+- lotus-pcr: ignore all other messages (https://github.com/filecoin-project/lotus/pull/4218)
+- lotus-pcr: zero refund (https://github.com/filecoin-project/lotus/pull/4229)
+
+## Contributors
+
+The following contributors had 5 or more commits go into this release.
+We are grateful for every contribution!
+
+| Contributor | Commits | Lines ± |
+|--------------------|---------|---------------|
+| Stebalien | 84 | +3425/-2287 |
+| magik6k | 41 | +2121/-506 |
+| arajasek | 39 | +2467/-424 |
+| Kubuxu | 25 | +2344/-775 |
+| raulk | 21 | +287/-196 |
+| whyrusleeping | 13 | +727/-71 |
+| hsanjuan | 13 | +5886/-7956 |
+| dirkmc | 11 | +2634/-576 |
+| travisperson | 8 | +923/-202 |
+| ribasushi | 6 | +188/-128 |
+| zgfzgf | 5 | +21/-17 |
+
+# 0.8.1 / 2020-09-30
+
+This optional release of Lotus introduces a new version of markets which switches to CBOR-map encodings, and allows datastore migrations. The release also introduces several improvements to the mining process, a few performance optimizations, and a battery of UX additions and enhancements.
+
+## Changes
+
+#### Dependencies
+
+- Markets 0.7.0 with updated data stores (https://github.com/filecoin-project/lotus/pull/4089)
+- Update ffi to code with blst fixes (https://github.com/filecoin-project/lotus/pull/3998)
+
+#### Core Lotus
+
+- Fix GetPower with no miner address (https://github.com/filecoin-project/lotus/pull/4049)
+- Refactor: Move nonce generation out of mpool (https://github.com/filecoin-project/lotus/pull/3970)
+
+#### Performance
+
+- Implement caching syscalls for import-bench (https://github.com/filecoin-project/lotus/pull/3888)
+- Fetch tipset blocks in parallel (https://github.com/filecoin-project/lotus/pull/4074)
+- Optimize Tipset equals() (https://github.com/filecoin-project/lotus/pull/4056)
+- Make state transition in validation async (https://github.com/filecoin-project/lotus/pull/3868)
+
+#### Mining
+
+- Add trace window post (https://github.com/filecoin-project/lotus/pull/4020)
+- Use abstract types for Dont recompute post on revert (https://github.com/filecoin-project/lotus/pull/4022)
+- Fix injectNulls logic in test miner (https://github.com/filecoin-project/lotus/pull/4058)
+- Fix potential panic in FinalizeSector (https://github.com/filecoin-project/lotus/pull/4092)
+- Don't recompute post on revert (https://github.com/filecoin-project/lotus/pull/3924)
+- Fix some failed precommit handling (https://github.com/filecoin-project/lotus/pull/3445)
+- Add --no-swap flag for worker (https://github.com/filecoin-project/lotus/pull/4107)
+- Allow some single-thread tasks to run in parallel with PC2/C2 (https://github.com/filecoin-project/lotus/pull/4116)
+
+#### UX
+
+- Add an envvar to set address network version (https://github.com/filecoin-project/lotus/pull/4028)
+- Add logging to chain export (https://github.com/filecoin-project/lotus/pull/4030)
+- Add JSON output to state compute (https://github.com/filecoin-project/lotus/pull/4038)
+- Wallet list CLI: Print balances/nonces (https://github.com/filecoin-project/lotus/pull/4088)
+- Added an option to show or not show sector info for `lotus-miner info` (https://github.com/filecoin-project/lotus/pull/4003)
+- Add a command to import an ipld object into the chainstore (https://github.com/filecoin-project/lotus/pull/3434)
+- Improve the lotus-shed dealtracker (https://github.com/filecoin-project/lotus/pull/4051)
+- Docs review and re-organization (https://github.com/filecoin-project/lotus/pull/3431)
+- Fix wallet list (https://github.com/filecoin-project/lotus/pull/4104)
+- Add an endpoint to validate whether a string is a well-formed address (https://github.com/filecoin-project/lotus/pull/4106)
+- Add an option to set config path (https://github.com/filecoin-project/lotus/pull/4103)
+- Add printf in TestWindowPost (https://github.com/filecoin-project/lotus/pull/4043)
+- Improve miner sectors list UX (https://github.com/filecoin-project/lotus/pull/4108)
+
+#### Tooling
+
+- Move policy change to seal bench (https://github.com/filecoin-project/lotus/pull/4032)
+- Add back network power to stats (https://github.com/filecoin-project/lotus/pull/4050)
+- Conformance: Record and feed circulating supply (https://github.com/filecoin-project/lotus/pull/4078)
+- Snapshot import progress bar, add HTTP support (https://github.com/filecoin-project/lotus/pull/4070)
+- Add lotus shed util to validate a tipset (https://github.com/filecoin-project/lotus/pull/4065)
+- tvx: a test vector extraction and execution tool (https://github.com/filecoin-project/lotus/pull/4064)
+
+#### Bootstrap
+
+- Add new bootstrappers (https://github.com/filecoin-project/lotus/pull/4007)
+- Add Glif node to bootstrap peers (https://github.com/filecoin-project/lotus/pull/4004)
+- Add one more node located in China (https://github.com/filecoin-project/lotus/pull/4041)
+- Add ipfsmain bootstrapper (https://github.com/filecoin-project/lotus/pull/4067)
+
+# 0.8.0 / 2020-09-26
+
+This consensus-breaking release of Lotus introduces an upgrade to the network. The changes that break consensus are:
+
+- Upgrading to specs-actors v0.9.11, which reduces WindowPoSt faults per [FIP 0002](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0002.md) to reduce cost for honest miners with occasional faults (see https://github.com/filecoin-project/specs-actors/pull/1181)
+- Revisions to some cryptoeconomics and network params
+
+This release also updates go-fil-markets to fix an incompatibility issue between v0.7.2 and earlier versions.
+
+## Changes
+
+#### Dependencies
+
+- Update spec actors to 0.9.11 (https://github.com/filecoin-project/lotus/pull/4039)
+- Update markets to 0.6.3 (https://github.com/filecoin-project/lotus/pull/4013)
+
+#### Core Lotus
+
+- Network upgrade (https://github.com/filecoin-project/lotus/pull/4039)
+- Fix AddSupportedProofTypes (https://github.com/filecoin-project/lotus/pull/4033)
+- Return an error when we fail to find a sector when checking sector expiration (https://github.com/filecoin-project/lotus/pull/4026)
+- Batch blockstore copies after block validation (https://github.com/filecoin-project/lotus/pull/3980)
+- Remove a misleading miner actor abstraction (https://github.com/filecoin-project/lotus/pull/3977)
+- Fix out-of-bounds when loading all sector infos (https://github.com/filecoin-project/lotus/pull/3976)
+- Fix break condition in the miner (https://github.com/filecoin-project/lotus/pull/3953)
+
+#### UX
+
+- Correct helptext around miners setting ask (https://github.com/filecoin-project/lotus/pull/4009)
+- Make sync wait nicer (https://github.com/filecoin-project/lotus/pull/3991)
+
+#### Tooling and validation
+
+- Small adjustments following network upgradability changes (https://github.com/filecoin-project/lotus/pull/3996)
+- Add some more big pictures stats to stateroot stat (https://github.com/filecoin-project/lotus/pull/3995)
+- Add some actors policy setters for testing (https://github.com/filecoin-project/lotus/pull/3975)
+
+## Contributors
+
+The following contributors had 5 or more commits go into this release.
+We are grateful for every contribution!
+
+| Contributor | Commits | Lines ± |
+|--------------------|---------|---------------|
+| arajasek | 66 | +3140/-1261 |
+| Stebalien | 64 | +3797/-3434 |
+| magik6k | 48 | +1892/-976 |
+| raulk | 40 | +2412/-1549 |
+| vyzo | 22 | +287/-196 |
+| alanshaw | 15 | +761/-146 |
+| whyrusleeping | 15 | +736/-52 |
+| hannahhoward | 14 | +1237/-837 |
+| anton | 6 | +32/-8 |
+| travisperson | 5 | +502/-6 |
+| Frank | 5 | +78/-39 |
+| Jennifer | 5 | +148/-41 |
+
+# 0.7.2 / 2020-09-23
+
+This optional release of Lotus introduces a major refactor around how a Lotus node interacts with code from the specs-actors repo. We now use interfaces to read the state of actors, which is required to be able to reason about different versions of actors code at the same time.
+
+Additionally, this release introduces various improvements to the sync process, as well as changes to better the overall UX experience.
+
+## Changes
+
+#### Core Lotus
+
+- Network upgrade support (https://github.com/filecoin-project/lotus/pull/3781)
+- Upgrade markets to `v0.6.2` (https://github.com/filecoin-project/lotus/pull/3974)
+- Validate chain sync response indices when fetching messages (https://github.com/filecoin-project/lotus/pull/3939)
+- Add height diff to sync wait (https://github.com/filecoin-project/lotus/pull/3926)
+- Replace Requires with Wants (https://github.com/filecoin-project/lotus/pull/3898)
+- Update state diffing for market actor (https://github.com/filecoin-project/lotus/pull/3889)
+- Parallel fetch for sync (https://github.com/filecoin-project/lotus/pull/3887)
+- Fix SectorState (https://github.com/filecoin-project/lotus/pull/3881)
+
+#### User Experience
+
+- Add basic deal stats api server for spacerace slingshot (https://github.com/filecoin-project/lotus/pull/3963)
+- When doing `sectors update-state`, show a list of existing states if user inputs an invalid one (https://github.com/filecoin-project/lotus/pull/3944)
+- Fix `lotus-miner storage find` error (https://github.com/filecoin-project/lotus/pull/3927)
+- Log shutdown method for lotus daemon and miner (https://github.com/filecoin-project/lotus/pull/3925)
+- Update build and setup instruction link (https://github.com/filecoin-project/lotus/pull/3919)
+- Add an option to hide removed sectors from `sectors list` output (https://github.com/filecoin-project/lotus/pull/3903)
+
+#### Testing and validation
+
+- Add init.State#Remove() for testing (https://github.com/filecoin-project/lotus/pull/3971)
+- lotus-shed: add consensus check command (https://github.com/filecoin-project/lotus/pull/3933)
+- Add keyinfo verify and jwt token command to lotus-shed (https://github.com/filecoin-project/lotus/pull/3914)
+- Fix conformance gen (https://github.com/filecoin-project/lotus/pull/3892)
+
+# 0.7.1 / 2020-09-17
+
+This optional release of Lotus introduces some critical fixes to the window PoSt process. It also upgrades some core dependencies, and introduces many improvements to the mining process, deal-making cycle, and overall User Experience.
+
+## Changes
+
+#### Some notable improvements:
+
+- Correctly construct params for `SubmitWindowedPoSt` messages (https://github.com/filecoin-project/lotus/pull/3909)
+- Skip sectors correctly for Window PoSt (https://github.com/filecoin-project/lotus/pull/3839)
+- Split window PoST submission into multiple messages (https://github.com/filecoin-project/lotus/pull/3689)
+- Improve journal coverage (https://github.com/filecoin-project/lotus/pull/2455)
+- Allow retrievals while sealing (https://github.com/filecoin-project/lotus/pull/3778)
+- Don't prune locally published messages (https://github.com/filecoin-project/lotus/pull/3772)
+- Add get-ask, set-ask retrieval commands (https://github.com/filecoin-project/lotus/pull/3886)
+- Consistently name winning and window post in logs (https://github.com/filecoin-project/lotus/pull/3873))
+- Add auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3752))
+
+#### Dependencies
+
+- Upgrade markets to `v0.6.1` (https://github.com/filecoin-project/lotus/pull/3906)
+- Upgrade specs-actors to `v0.9.10` (https://github.com/filecoin-project/lotus/pull/3846)
+- Upgrade badger (https://github.com/filecoin-project/lotus/pull/3739)
+
+# 0.7.0 / 2020-09-10
+
+This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are:
+
+- Upgrading the Drand network used from the test Drand network to the League of Entropy main drand network. This is the same Drand network that will be used in the Filecoin mainnet.
+- Upgrading to specs-actors v0.9.8, which adds a new method to the Multisig actor.
+
+## Changes
+
+#### Core Lotus
+
+- Fix IsAncestorOf (https://github.com/filecoin-project/lotus/pull/3717)
+- Update to specs-actors v0.9.8 (https://github.com/filecoin-project/lotus/pull/3725)
+- Increase chain throughput by 20% (https://github.com/filecoin-project/lotus/pull/3732)
+- Updare to go-libp2p-pubsub `master` (https://github.com/filecoin-project/lotus/pull/3735)
+- Drand upgrade (https://github.com/filecoin-project/lotus/pull/3670)
+- Multisig API additions (https://github.com/filecoin-project/lotus/pull/3590)
+
+#### Storage Miner
+
+- Increase the number of times precommit2 is attempted before moving back to precommit1 (https://github.com/filecoin-project/lotus/pull/3720)
+
+#### Message pool
+
+- Relax mpool add strictness checks for local pushes (https://github.com/filecoin-project/lotus/pull/3724)
+
+
+#### Maintenance
+
+- Fix devnets (https://github.com/filecoin-project/lotus/pull/3712)
+- Fix(chainwatch): compare prev miner with cur miner (https://github.com/filecoin-project/lotus/pull/3715)
+- CI: fix statediff build; make optional (https://github.com/filecoin-project/lotus/pull/3729)
+- Feat: Chaos abort (https://github.com/filecoin-project/lotus/pull/3733)
+
+## Contributors
+
+The following contributors had commits go into this release.
+We are grateful for every contribution!
+
+| Contributor | Commits | Lines ± |
+|--------------------|---------|---------------|
+| arajasek | 28 | +1144/-239 |
+| Kubuxu | 19 | +452/-261 |
+| whyrusleeping | 13 | +456/-87 |
+| vyzo | 11 | +318/-20 |
+| raulk | 10 | +1289/-350 |
+| magik6k | 6 | +188/-55 |
+| dirkmc | 3 | +31/-8 |
+| alanshaw | 3 | +176/-37 |
+| Stebalien | 2 | +9/-12 |
+| lanzafame | 1 | +1/-1 |
+| frrist | 1 | +1/-1 |
+| mishmosh | 1 | +1/-1 |
+| nonsense | 1 | +1/-0 |
+
+# 0.6.2 / 2020-09-09
+
+This release introduces some critical fixes to message selection and gas estimation logic. It also adds the ability for nodes to mark a certain tipset as checkpointed, as well as various minor improvements and bugfixes.
+
+## Changes
+
+#### Messagepool
+
+- Warn when optimal selection fails to pack a block and we fall back to random selection (https://github.com/filecoin-project/lotus/pull/3708)
+- Add basic command for printing gas performance of messages in the mpool (https://github.com/filecoin-project/lotus/pull/3701)
+- Adjust optimal selection to always try to fill blocks (https://github.com/filecoin-project/lotus/pull/3685)
+- Fix very minor bug in repub baseFeeLowerBound (https://github.com/filecoin-project/lotus/pull/3663)
+- Add an auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3676)
+- Fix mpool optimal selection packing failure (https://github.com/filecoin-project/lotus/pull/3698)
+
+#### Core Lotus
+
+- Don't use latency as initital estimate for blocksync (https://github.com/filecoin-project/lotus/pull/3648)
+- Add niceSleep 1 second when drand errors (https://github.com/filecoin-project/lotus/pull/3664)
+- Fix isChainNearSync check in block validator (https://github.com/filecoin-project/lotus/pull/3650)
+- Add peer to peer manager before fetching the tipset (https://github.com/filecoin-project/lotus/pull/3667)
+- Add StageFetchingMessages to sync status (https://github.com/filecoin-project/lotus/pull/3668)
+- Pass tipset through upgrade logic (https://github.com/filecoin-project/lotus/pull/3673)
+- Allow nodes to mark tipsets as checkpointed (https://github.com/filecoin-project/lotus/pull/3680)
+- Remove hard-coded late-fee in window PoSt (https://github.com/filecoin-project/lotus/pull/3702)
+- Gas: Fix median calc (https://github.com/filecoin-project/lotus/pull/3686)
+
+#### Storage
+
+- Storage manager: bail out with an error if unsealed cid is undefined (https://github.com/filecoin-project/lotus/pull/3655)
+- Storage: return true from Sealer.ReadPiece() on success (https://github.com/filecoin-project/lotus/pull/3657)
+
+#### Maintenance
+
+- Resolve lotus, test-vectors, statediff dependency cycle (https://github.com/filecoin-project/lotus/pull/3688)
+- Paych: add docs on how to use paych status (https://github.com/filecoin-project/lotus/pull/3690)
+- Initial CODEOWNERS (https://github.com/filecoin-project/lotus/pull/3691)
+
+# 0.6.1 / 2020-09-08
+
+This optional release introduces a minor improvement to the sync process, ensuring nodes don't fall behind and then resync.
+
+## Changes
+
+- Update `test-vectors` (https://github.com/filecoin-project/lotus/pull/3645)
+- Revert "only subscribe to pubsub topics once we are synced" (https://github.com/filecoin-project/lotus/pull/3643)
+
+# 0.6.0 / 2020-09-07
+
+This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are:
+
+- Tweaking of some cryptoecon parameters in specs-actors 0.9.7 (https://github.com/filecoin-project/specs-actors/releases/tag/v0.9.7)
+- Rebalancing FIL distribution to make testnet FIL scarce, which prevents base fee spikes and sets better expectations for mainnet
+
+This release also introduces many improvements to Lotus! Among them are a new version of go-fil-markets that supports non-blocking retrieval, various spam reduction measures in the messagepool and p2p logic, and UX improvements to payment channels, dealmaking, and state inspection.
+
+## Changes
+
+#### Core Lotus and dependencies
+
+- Implement faucet funds reallocation logic (https://github.com/filecoin-project/lotus/pull/3632)
+- Network upgrade: Upgrade to correct fork threshold (https://github.com/filecoin-project/lotus/pull/3628)
+- Update to specs 0.9.7 and markets 0.6.0 (https://github.com/filecoin-project/lotus/pull/3627)
+- Network upgrade: Perform base fee tamping (https://github.com/filecoin-project/lotus/pull/3623)
+- Chain events: if cache best() is nil, return chain head (https://github.com/filecoin-project/lotus/pull/3611)
+- Update to specs actors v0.9.6 (https://github.com/filecoin-project/lotus/pull/3603)
+
+#### Messagepool
+
+- Temporarily allow negative chains (https://github.com/filecoin-project/lotus/pull/3625)
+- Improve publish/republish logic (https://github.com/filecoin-project/lotus/pull/3592)
+- Fix selection bug; priority messages were not included if other chains were negative (https://github.com/filecoin-project/lotus/pull/3580)
+- Add defensive check for minimum GasFeeCap for inclusion within the next 20 blocks (https://github.com/filecoin-project/lotus/pull/3579)
+- Add additional info about gas premium (https://github.com/filecoin-project/lotus/pull/3578)
+- Fix GasPremium capping logic (https://github.com/filecoin-project/lotus/pull/3552)
+
+#### Payment channels
+
+- Get available funds by address or by from/to (https://github.com/filecoin-project/lotus/pull/3547)
+- Create `lotus paych status` command (https://github.com/filecoin-project/lotus/pull/3523)
+- Rename CLI command from "paych get" to "paych add-funds" (https://github.com/filecoin-project/lotus/pull/3520)
+
+#### Peer-to-peer
+
+- Only subscribe to pubsub topics once we are synced (https://github.com/filecoin-project/lotus/pull/3602)
+- Reduce mpool add failure log spam (https://github.com/filecoin-project/lotus/pull/3562)
+- Republish messages even if the chains have negative performance(https://github.com/filecoin-project/lotus/pull/3557)
+- Adjust gossipsub gossip factor (https://github.com/filecoin-project/lotus/pull/3556)
+- Integrate pubsub Random Early Drop (https://github.com/filecoin-project/lotus/pull/3518)
+
+#### Miscellaneous
+
+- Fix panic in OnDealExpiredSlashed (https://github.com/filecoin-project/lotus/pull/3553)
+- Robustify state manager against holes in actor method numbers (https://github.com/filecoin-project/lotus/pull/3538)
+
+#### UX
+
+- VM: Fix an error message (https://github.com/filecoin-project/lotus/pull/3608)
+- Documentation: Batch replacement,update lotus-storage-miner to lotus-miner (https://github.com/filecoin-project/lotus/pull/3571)
+- CLI: Robust actor lookup (https://github.com/filecoin-project/lotus/pull/3535)
+- Add agent flag to net peers (https://github.com/filecoin-project/lotus/pull/3534)
+- Add watch option to storage-deals list (https://github.com/filecoin-project/lotus/pull/3527)
+
+#### Testing & tooling
+
+- Decommission chain-validation (https://github.com/filecoin-project/lotus/pull/3606)
+- Metrics: add expected height metric (https://github.com/filecoin-project/lotus/pull/3586)
+- PCR: Use current tipset during refund (https://github.com/filecoin-project/lotus/pull/3570)
+- Lotus-shed: Add math command (https://github.com/filecoin-project/lotus/pull/3568)
+- PCR: Add tipset aggergation (https://github.com/filecoin-project/lotus/pull/3565)- Fix broken paych tests (https://github.com/filecoin-project/lotus/pull/3551)
+- Make chain export ~1000x times faster (https://github.com/filecoin-project/lotus/pull/3533)
+- Chainwatch: Stop SyncIncomingBlocks from leaking into chainwatch processing; No panics during processing (https://github.com/filecoin-project/lotus/pull/3526)
+- Conformance: various changes (https://github.com/filecoin-project/lotus/pull/3521)
+
# 0.5.10 / 2020-09-03
This patch includes a crucial fix to the message pool selection logic, strongly disfavouring messages that might cause a miner penalty.
diff --git a/Makefile b/Makefile
index 4f6ece417..56ab361ec 100644
--- a/Makefile
+++ b/Makefile
@@ -92,6 +92,12 @@ lotus-shed: $(BUILD_DEPS)
.PHONY: lotus-shed
BINS+=lotus-shed
+lotus-gateway: $(BUILD_DEPS)
+ rm -f lotus-gateway
+ go build $(GOFLAGS) -o lotus-gateway ./cmd/lotus-gateway
+.PHONY: lotus-gateway
+BINS+=lotus-gateway
+
build: lotus lotus-miner lotus-worker
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
diff --git a/README.md b/README.md
index 6c1e23efa..fa432bf7d 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
@@ -18,7 +18,7 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
## Building & Documentation
-For instructions on how to build lotus from source, please visit [https://lotu.sh](https://lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
+For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/).
## Reporting a Vulnerability
diff --git a/api/api_full.go b/api/api_full.go
index f913483b3..601b14660 100644
--- a/api/api_full.go
+++ b/api/api_full.go
@@ -2,24 +2,29 @@ package api
import (
"context"
+ "fmt"
"time"
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-multistore"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes"
@@ -71,6 +76,9 @@ type FullNode interface {
// blockstore and returns raw bytes.
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
+ // ChainDeleteObj deletes node referenced by the given CID
+ ChainDeleteObj(context.Context, cid.Cid) error
+
// ChainHasObj checks if a given CID exists in the chain blockstore.
ChainHasObj(context.Context, cid.Cid) (bool, error)
@@ -112,7 +120,8 @@ type FullNode interface {
// The exported chain data includes the header chain from the given tipset
// back to genesis, the entire genesis state, and the most recent 'nroots'
// state trees.
- ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error)
+ // If oldmsgskip is set, messages from before the requested roots are also not included.
+ ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error)
// MethodGroup: Beacon
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
@@ -153,14 +162,23 @@ type FullNode interface {
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
+ // SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
+ SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error
+
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution.
SyncMarkBad(ctx context.Context, bcid cid.Cid) error
+ // SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
+ SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error
+
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason.
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
+ // SyncValidateTipset indicates whether the provided tipset is valid or not
+ SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error)
+
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
@@ -174,6 +192,9 @@ type FullNode interface {
// MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
+ // MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
+ MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error)
+
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool.
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified
@@ -218,7 +239,7 @@ type FullNode interface {
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error)
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
// The address does not have to be in the wallet.
- WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) bool
+ WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error)
// WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error)
// WalletSetDefault marks the given address as as the default one.
@@ -229,6 +250,8 @@ type FullNode interface {
WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
// WalletDelete deletes an address from the wallet.
WalletDelete(context.Context, address.Address) error
+ // WalletValidateAddress validates whether a given string can be decoded as a well-formed address
+ WalletValidateAddress(context.Context, string) (address.Address, error)
// Other
@@ -260,7 +283,7 @@ type FullNode interface {
// of status updates.
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error)
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
- ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error)
+ ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error)
// ClientCalcCommP calculates the CommP for a specified file
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error)
// ClientGenCar generates a CAR file for the specified file.
@@ -270,6 +293,9 @@ type FullNode interface {
// ClientListTransfers returns the status of all ongoing transfers of data
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
+ // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
+ // which are stuck due to insufficient funds
+ ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error
// ClientUnimport removes references to the specified file from filestore
//ClientUnimport(path string)
@@ -298,28 +324,26 @@ type FullNode interface {
// StateNetworkName returns the name of the network the node is synced to
StateNetworkName(context.Context) (dtypes.NetworkName, error)
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
- // If the filterOut boolean is set to true, any sectors in the filter are excluded.
- // If false, only those sectors in the filter are included.
- StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*ChainSectorInfo, error)
+ StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
- StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error)
+ StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
- StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error)
+ StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
// StateMinerPower returns the power of the indicated miner
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
// StateMinerInfo returns info about the indicated miner
- StateMinerInfo(context.Context, address.Address, types.TipSetKey) (MinerInfo, error)
+ StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
// StateMinerDeadlines returns all the proving deadlines for the given miner
- StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error)
- // StateMinerPartitions loads miner partitions for the specified miner/deadline
- StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error)
+ StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
+ // StateMinerPartitions returns all partitions in the specified deadline
+ StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error)
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
- StateMinerFaults(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
+ StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
- StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
+ StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
@@ -333,11 +357,13 @@ type FullNode interface {
// expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
// StateSectorExpiration returns epoch at which given sector will expire
- StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*SectorExpiration, error)
+ StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error)
// StateSectorPartition finds deadline/partition with the specified sector
- StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*SectorLocation, error)
+ StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
+ // StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip
+ StateMsgGasCost(context.Context, cid.Cid, types.TipSetKey) (*MsgGasCost, error)
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
// message arrives on chain, and gets to the indicated confidence depth.
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
@@ -367,16 +393,24 @@ type FullNode interface {
// StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error)
+ // StateVerifierStatus returns the data cap for the given address.
+ // Returns nil if there is no entry in the data cap table for the
+ // address.
+ StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
// StateVerifiedClientStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
- StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*verifreg.DataCap, error)
+ StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
+ // StateVerifiedClientStatus returns the address of the Verified Registry's root key
+ StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error)
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
// can issue. It takes the deal size and verified status as parameters.
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error)
// StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset
StateCirculatingSupply(context.Context, types.TipSetKey) (CirculatingSupply, error)
+ // StateNetworkVersion returns the network version at the given tipset
+ StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
@@ -384,6 +418,12 @@ type FullNode interface {
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
+ // MsigGetLockedBalance returns the locked balance of an msig at a vien epoch.
+ // The return may be greater than the multisig actor's actual balance.
+ MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error)
+ // MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
+ // It takes the following params: , ,
+ MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error)
// MsigCreate creates a multisig wallet
// It takes the following params: , ,
//, ,
@@ -400,17 +440,29 @@ type FullNode interface {
// It takes the following params: , , , ,
// , ,
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
+ // MsigAddPropose proposes adding a signer in the multisig
+ // It takes the following params: , ,
+ // ,
+ MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error)
+ // MsigAddApprove approves a previously proposed AddSigner message
+ // It takes the following params: , , ,
+ // , ,
+ MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error)
+ // MsigAddCancel cancels a previously proposed AddSigner message
+ // It takes the following params: , , ,
+ // ,
+ MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error)
// MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: , ,
- //
+ // ,
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
// MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: , , ,
- // ,
+ // , ,
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
// MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: , , ,
- //
+ // ,
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
@@ -421,7 +473,8 @@ type FullNode interface {
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
- PaychAvailableFunds(from, to address.Address) (*ChannelAvailableFunds, error)
+ PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
+ PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
PaychList(context.Context) ([]address.Address, error)
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
PaychSettle(context.Context, address.Address) (cid.Cid, error)
@@ -434,6 +487,12 @@ type FullNode interface {
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
+
+ // CreateBackup creates node backup onder the specified file name. The
+ // method requires that the lotus daemon is running with the
+ // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
+ // the path specified when calling CreateBackup is within the base path
+ CreateBackup(ctx context.Context, fpath string) error
}
type FileRef struct {
@@ -442,21 +501,12 @@ type FileRef struct {
}
type MinerSectors struct {
- Sectors uint64
- Active uint64
-}
-
-type SectorExpiration struct {
- OnTime abi.ChainEpoch
-
- // non-zero if sector is faulty, epoch at which it will be permanently
- // removed if it doesn't recover
- Early abi.ChainEpoch
-}
-
-type SectorLocation struct {
- Deadline uint64
- Partition uint64
+ // Live sectors that should be proven.
+ Live uint64
+ // Sectors actively contributing to power.
+ Active uint64
+ // Sectors with failed proofs.
+ Faulty uint64
}
type ImportRes struct {
@@ -489,6 +539,7 @@ type DealInfo struct {
DealID abi.DealID
CreationTime time.Time
+ Verified bool
}
type MsgLookup struct {
@@ -499,6 +550,17 @@ type MsgLookup struct {
Height abi.ChainEpoch
}
+type MsgGasCost struct {
+ Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
+ GasUsed abi.TokenAmount
+ BaseFeeBurn abi.TokenAmount
+ OverEstimationBurn abi.TokenAmount
+ MinerPenalty abi.TokenAmount
+ MinerTip abi.TokenAmount
+ Refund abi.TokenAmount
+ TotalCost abi.TokenAmount
+}
+
type BlockMessages struct {
BlsMessages []*types.Message
SecpkMessages []*types.SignedMessage
@@ -511,11 +573,6 @@ type Message struct {
Message *types.Message
}
-type ChainSectorInfo struct {
- Info miner.SectorOnChainInfo
- ID abi.SectorNumber
-}
-
type ActorState struct {
Balance types.BigInt
State interface{}
@@ -540,7 +597,12 @@ type ChannelInfo struct {
}
type ChannelAvailableFunds struct {
+ // Channel is the address of the channel
Channel *address.Address
+ // From is the from address of the channel (channel creator)
+ From address.Address
+ // To is the to address of the channel
+ To address.Address
// ConfirmedAmt is the amount of funds that have been confirmed on-chain
// for the channel
ConfirmedAmt types.BigInt
@@ -582,8 +644,9 @@ type VoucherCreateResult struct {
}
type MinerPower struct {
- MinerPower power.Claim
- TotalPower power.Claim
+ MinerPower power.Claim
+ TotalPower power.Claim
+ HasMinPower bool
}
type QueryOffer struct {
@@ -686,6 +749,8 @@ type ActiveSync struct {
type SyncState struct {
ActiveSyncs []ActiveSync
+
+ VMApplied uint64
}
type SyncStateStage int
@@ -697,8 +762,28 @@ const (
StageMessages
StageSyncComplete
StageSyncErrored
+ StageFetchingMessages
)
+func (v SyncStateStage) String() string {
+ switch v {
+ case StageHeaders:
+ return "header sync"
+ case StagePersistHeaders:
+ return "persisting headers"
+ case StageMessages:
+ return "message sync"
+ case StageSyncComplete:
+ return "complete"
+ case StageSyncErrored:
+ return "error"
+ case StageFetchingMessages:
+ return "fetching messages"
+ default:
+ return fmt.Sprintf("", v)
+ }
+}
+
type MpoolChange int
const (
@@ -730,14 +815,14 @@ type CirculatingSupply struct {
}
type MiningBaseInfo struct {
- MinerPower types.BigInt
- NetworkPower types.BigInt
- Sectors []abi.SectorInfo
- WorkerKey address.Address
- SectorSize abi.SectorSize
- PrevBeaconEntry types.BeaconEntry
- BeaconEntries []types.BeaconEntry
- HasMinPower bool
+ MinerPower types.BigInt
+ NetworkPower types.BigInt
+ Sectors []builtin.SectorInfo
+ WorkerKey address.Address
+ SectorSize abi.SectorSize
+ PrevBeaconEntry types.BeaconEntry
+ BeaconEntries []types.BeaconEntry
+ EligibleForMining bool
}
type BlockTemplate struct {
@@ -749,7 +834,7 @@ type BlockTemplate struct {
Messages []*types.SignedMessage
Epoch abi.ChainEpoch
Timestamp uint64
- WinningPoStProof []abi.PoStProof
+ WinningPoStProof []builtin.PoStProof
}
type DataSize struct {
@@ -773,7 +858,31 @@ const (
MsigCancel
)
+type Deadline struct {
+ PostSubmissions bitfield.BitField
+}
+
+type Partition struct {
+ AllSectors bitfield.BitField
+ FaultySectors bitfield.BitField
+ RecoveringSectors bitfield.BitField
+ LiveSectors bitfield.BitField
+ ActiveSectors bitfield.BitField
+}
+
type Fault struct {
Miner address.Address
Epoch abi.ChainEpoch
}
+
+var EmptyVesting = MsigVesting{
+ InitialBalance: types.EmptyInt,
+ StartEpoch: -1,
+ UnlockDuration: -1,
+}
+
+type MsigVesting struct {
+ InitialBalance abi.TokenAmount
+ StartEpoch abi.ChainEpoch
+ UnlockDuration abi.ChainEpoch
+}
diff --git a/api/api_storage.go b/api/api_storage.go
index 48f6e9e45..529224f6e 100644
--- a/api/api_storage.go
+++ b/api/api_storage.go
@@ -11,11 +11,11 @@ import (
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
// StorageMiner is a low-level interface to the Filecoin network storage miner node
@@ -71,7 +71,7 @@ type StorageMiner interface {
stores.SectorIndex
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
- MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
+ MarketListDeals(ctx context.Context) ([]MarketDeal, error)
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error)
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error)
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
@@ -83,7 +83,7 @@ type StorageMiner interface {
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
- DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
+ DealsList(ctx context.Context) ([]MarketDeal, error)
DealsConsiderOnlineStorageDeals(context.Context) (bool, error)
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error)
@@ -101,6 +101,12 @@ type StorageMiner interface {
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error)
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error)
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error)
+
+ // CreateBackup creates node backup onder the specified file name. The
+ // method requires that the lotus-miner is running with the
+ // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
+ // the path specified when calling CreateBackup is within the base path
+ CreateBackup(ctx context.Context, fpath string) error
}
type SealRes struct {
diff --git a/api/api_test.go b/api/api_test.go
index 1b438258a..34c47f432 100644
--- a/api/api_test.go
+++ b/api/api_test.go
@@ -1,12 +1,16 @@
package api
import (
+ "encoding/json"
"os"
"os/exec"
"path/filepath"
+ "reflect"
"runtime"
"strings"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func goCmd() string {
@@ -32,3 +36,68 @@ func TestDoesntDependOnFFI(t *testing.T) {
}
}
}
+
+func TestReturnTypes(t *testing.T) {
+ errType := reflect.TypeOf(new(error)).Elem()
+ bareIface := reflect.TypeOf(new(interface{})).Elem()
+ jmarsh := reflect.TypeOf(new(json.Marshaler)).Elem()
+
+ tst := func(api interface{}) func(t *testing.T) {
+ return func(t *testing.T) {
+ ra := reflect.TypeOf(api).Elem()
+ for i := 0; i < ra.NumMethod(); i++ {
+ m := ra.Method(i)
+ switch m.Type.NumOut() {
+ case 1: // if 1 return value, it must be an error
+ require.Equal(t, errType, m.Type.Out(0), m.Name)
+
+ case 2: // if 2 return values, first cant be an interface/function, second must be an error
+ seen := map[reflect.Type]struct{}{}
+ todo := []reflect.Type{m.Type.Out(0)}
+ for len(todo) > 0 {
+ typ := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ if _, ok := seen[typ]; ok {
+ continue
+ }
+ seen[typ] = struct{}{}
+
+ if typ.Kind() == reflect.Interface && typ != bareIface && !typ.Implements(jmarsh) {
+ t.Error("methods can't return interfaces", m.Name)
+ }
+
+ switch typ.Kind() {
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Array:
+ fallthrough
+ case reflect.Slice:
+ fallthrough
+ case reflect.Chan:
+ todo = append(todo, typ.Elem())
+ case reflect.Map:
+ todo = append(todo, typ.Elem())
+ todo = append(todo, typ.Key())
+ case reflect.Struct:
+ for i := 0; i < typ.NumField(); i++ {
+ todo = append(todo, typ.Field(i).Type)
+ }
+ }
+ }
+
+ require.NotEqual(t, reflect.Func.String(), m.Type.Out(0).Kind().String(), m.Name)
+ require.Equal(t, errType, m.Type.Out(1), m.Name)
+
+ default:
+ t.Error("methods can only have 1 or 2 return values", m.Name)
+ }
+ }
+ }
+ }
+
+ t.Run("common", tst(new(Common)))
+ t.Run("full", tst(new(FullNode)))
+ t.Run("miner", tst(new(StorageMiner)))
+ t.Run("worker", tst(new(WorkerAPI)))
+}
diff --git a/api/api_worker.go b/api/api_worker.go
index 00c4df8bc..ac1446fdd 100644
--- a/api/api_worker.go
+++ b/api/api_worker.go
@@ -6,10 +6,10 @@ import (
"github.com/ipfs/go-cid"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
- "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/build"
diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go
index 5eb757c7b..01de13e61 100644
--- a/api/apistruct/struct.go
+++ b/api/apistruct/struct.go
@@ -5,6 +5,8 @@ import (
"io"
"time"
+ stnetwork "github.com/filecoin-project/go-state-types/network"
+
"github.com/ipfs/go-cid"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
@@ -12,26 +14,27 @@ import (
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-multistore"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -84,6 +87,7 @@ type FullNodeStruct struct {
ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"`
ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"`
ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"`
+ ChainDeleteObj func(context.Context, cid.Cid) error `perm:"admin"`
ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"`
ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"`
ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"`
@@ -92,7 +96,7 @@ type FullNodeStruct struct {
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
- ChainExport func(context.Context, abi.ChainEpoch, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
+ ChainExport func(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
@@ -104,8 +108,11 @@ type FullNodeStruct struct {
SyncState func(context.Context) (*api.SyncState, error) `perm:"read"`
SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"`
SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
+ SyncCheckpoint func(ctx context.Context, key types.TipSetKey) error `perm:"admin"`
SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
+ SyncUnmarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"`
+ SyncValidateTipset func(ctx context.Context, tsk types.TipSetKey) (bool, error) `perm:"read"`
MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"`
MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"`
@@ -115,7 +122,9 @@ type FullNodeStruct struct {
MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
MpoolClear func(context.Context, bool) error `perm:"write"`
- MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
+ MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
+ MpoolPushUntrusted func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
+
MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"`
MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
@@ -123,60 +132,63 @@ type FullNodeStruct struct {
MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
- WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"`
- WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"`
- WalletList func(context.Context) ([]address.Address, error) `perm:"write"`
- WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"`
- WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"`
- WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"`
- WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) bool `perm:"read"`
- WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"`
- WalletSetDefault func(context.Context, address.Address) error `perm:"admin"`
- WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"`
- WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
- WalletDelete func(context.Context, address.Address) error `perm:"write"`
+ WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"`
+ WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"`
+ WalletList func(context.Context) ([]address.Address, error) `perm:"write"`
+ WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"`
+ WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"`
+ WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"`
+ WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"`
+ WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"`
+ WalletSetDefault func(context.Context, address.Address) error `perm:"admin"`
+ WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"`
+ WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
+ WalletDelete func(context.Context, address.Address) error `perm:"write"`
+ WalletValidateAddress func(context.Context, string) (address.Address, error) `perm:"read"`
- ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
- ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
- ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"`
- ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
- ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
- ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"`
- ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
- ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
- ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
- ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"`
- ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
- ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
- ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
- ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"`
- ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
- ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"`
- ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
- ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
+ ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
+ ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
+ ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"`
+ ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
+ ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
+ ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"`
+ ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
+ ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
+ ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
+ ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"`
+ ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
+ ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
+ ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
+ ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"`
+ ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
+ ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"`
+ ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
+ ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
+ ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"`
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
- StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
- StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
- StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) `perm:"read"`
+ StateMinerSectors func(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
+ StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
+ StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) `perm:"read"`
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
- StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
- StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error) `perm:"read"`
- StateMinerPartitions func(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error) `perm:"read"`
- StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (abi.BitField, error) `perm:"read"`
+ StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) `perm:"read"`
+ StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) `perm:"read"`
+ StateMinerPartitions func(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) `perm:"read"`
+ StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
- StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (abi.BitField, error) `perm:"read"`
+ StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
- StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*api.SectorExpiration, error) `perm:"read"`
- StateSectorPartition func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*api.SectorLocation, error) `perm:"read"`
+ StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
+ StateSectorPartition func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"`
StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
+ StateMsgGasCost func(context.Context, cid.Cid, types.TipSetKey) (*api.MsgGasCost, error) `perm:"read"`
StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
@@ -192,37 +204,48 @@ type FullNodeStruct struct {
StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"`
StateListMessages func(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
- StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*verifreg.DataCap, error) `perm:"read"`
+ StateVerifierStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
+ StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
+ StateVerifiedRegistryRootKey func(ctx context.Context, tsk types.TipSetKey) (address.Address, error) `perm:"read"`
StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"`
StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
+ StateNetworkVersion func(context.Context, types.TipSetKey) (stnetwork.Version, error) `perm:"read"`
MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
+ MsigGetVestingSchedule func(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) `perm:"read"`
+ MsigGetVested func(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) `perm:"read"`
MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
+ MsigAddPropose func(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"`
+ MsigAddApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"`
+ MsigAddCancel func(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) `perm:"sign"`
MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
- PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
- PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
- PaychAvailableFunds func(address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
- PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
- PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
- PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
- PaychCollect func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
- PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"`
- PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
- PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"`
- PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"`
- PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"`
- PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
- PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*api.VoucherCreateResult, error) `perm:"sign"`
- PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
- PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"`
+ PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
+ PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
+ PaychAvailableFunds func(context.Context, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
+ PaychAvailableFundsByFromTo func(context.Context, address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
+ PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
+ PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
+ PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
+ PaychCollect func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
+ PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"`
+ PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
+ PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"`
+ PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"`
+ PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"`
+ PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
+ PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*api.VoucherCreateResult, error) `perm:"sign"`
+ PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
+ PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"`
+
+ CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"`
}
}
@@ -240,7 +263,7 @@ type StorageMinerStruct struct {
MiningBase func(context.Context) (*types.TipSet, error) `perm:"read"`
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
- MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
+ MarketListDeals func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"`
MarketListRetrievalDeals func(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"`
MarketGetDealUpdates func(ctx context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"`
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
@@ -285,7 +308,7 @@ type StorageMinerStruct struct {
StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"`
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
- DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
+ DealsList func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"`
DealsConsiderOnlineStorageDeals func(context.Context) (bool, error) `perm:"read"`
DealsSetConsiderOnlineStorageDeals func(context.Context, bool) error `perm:"admin"`
DealsConsiderOnlineRetrievalDeals func(context.Context) (bool, error) `perm:"read"`
@@ -303,6 +326,8 @@ type StorageMinerStruct struct {
PiecesListCidInfos func(ctx context.Context) ([]cid.Cid, error) `perm:"read"`
PiecesGetPieceInfo func(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"`
PiecesGetCIDInfo func(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"`
+
+ CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"`
}
}
@@ -483,7 +508,7 @@ func (c *FullNodeStruct) ClientRetrieveWithEvents(ctx context.Context, order api
return c.Internal.ClientRetrieveWithEvents(ctx, order, ref)
}
-func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
+func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
return c.Internal.ClientQueryAsk(ctx, p, miner)
}
func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
@@ -506,6 +531,10 @@ func (c *FullNodeStruct) ClientDataTransferUpdates(ctx context.Context) (<-chan
return c.Internal.ClientDataTransferUpdates(ctx)
}
+func (c *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error {
+ return c.Internal.ClientRetrieveTryRestartInsufficientFunds(ctx, paymentChannel)
+}
+
func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk)
}
@@ -546,6 +575,10 @@ func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessag
return c.Internal.MpoolPush(ctx, smsg)
}
+func (c *FullNodeStruct) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
+ return c.Internal.MpoolPushUntrusted(ctx, smsg)
+}
+
func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
return c.Internal.MpoolPushMessage(ctx, msg, spec)
}
@@ -602,7 +635,7 @@ func (c *FullNodeStruct) WalletSignMessage(ctx context.Context, k address.Addres
return c.Internal.WalletSignMessage(ctx, k, msg)
}
-func (c *FullNodeStruct) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) bool {
+func (c *FullNodeStruct) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) {
return c.Internal.WalletVerify(ctx, k, msg, sig)
}
@@ -626,6 +659,10 @@ func (c *FullNodeStruct) WalletDelete(ctx context.Context, addr address.Address)
return c.Internal.WalletDelete(ctx, addr)
}
+func (c *FullNodeStruct) WalletValidateAddress(ctx context.Context, str string) (address.Address, error) {
+ return c.Internal.WalletValidateAddress(ctx, str)
+}
+
func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
return c.Internal.MpoolGetNonce(ctx, addr)
}
@@ -658,6 +695,10 @@ func (c *FullNodeStruct) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte,
return c.Internal.ChainReadObj(ctx, obj)
}
+func (c *FullNodeStruct) ChainDeleteObj(ctx context.Context, obj cid.Cid) error {
+ return c.Internal.ChainDeleteObj(ctx, obj)
+}
+
func (c *FullNodeStruct) ChainHasObj(ctx context.Context, o cid.Cid) (bool, error) {
return c.Internal.ChainHasObj(ctx, o)
}
@@ -690,8 +731,8 @@ func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey,
return c.Internal.ChainGetPath(ctx, from, to)
}
-func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error) {
- return c.Internal.ChainExport(ctx, nroots, tsk)
+func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, iom bool, tsk types.TipSetKey) (<-chan []byte, error) {
+ return c.Internal.ChainExport(ctx, nroots, iom, tsk)
}
func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
@@ -710,27 +751,39 @@ func (c *FullNodeStruct) SyncIncomingBlocks(ctx context.Context) (<-chan *types.
return c.Internal.SyncIncomingBlocks(ctx)
}
+func (c *FullNodeStruct) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error {
+ return c.Internal.SyncCheckpoint(ctx, tsk)
+}
+
func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error {
return c.Internal.SyncMarkBad(ctx, bcid)
}
+func (c *FullNodeStruct) SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error {
+ return c.Internal.SyncUnmarkBad(ctx, bcid)
+}
+
func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) {
return c.Internal.SyncCheckBad(ctx, bcid)
}
+func (c *FullNodeStruct) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) {
+ return c.Internal.SyncValidateTipset(ctx, tsk)
+}
+
func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
return c.Internal.StateNetworkName(ctx)
}
-func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, filter *abi.BitField, filterOut bool, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
- return c.Internal.StateMinerSectors(ctx, addr, filter, filterOut, tsk)
+func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return c.Internal.StateMinerSectors(ctx, addr, sectorNos, tsk)
}
-func (c *FullNodeStruct) StateMinerActiveSectors(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
+func (c *FullNodeStruct) StateMinerActiveSectors(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
return c.Internal.StateMinerActiveSectors(ctx, addr, tsk)
}
-func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*miner.DeadlineInfo, error) {
+func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) {
return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk)
}
@@ -738,19 +791,19 @@ func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address,
return c.Internal.StateMinerPower(ctx, a, tsk)
}
-func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) {
+func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
return c.Internal.StateMinerInfo(ctx, actor, tsk)
}
-func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]*miner.Deadline, error) {
- return c.Internal.StateMinerDeadlines(ctx, m, tsk)
+func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, actor address.Address, tsk types.TipSetKey) ([]api.Deadline, error) {
+ return c.Internal.StateMinerDeadlines(ctx, actor, tsk)
}
-func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]*miner.Partition, error) {
+func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) {
return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk)
}
-func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (abi.BitField, error) {
+func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
return c.Internal.StateMinerFaults(ctx, actor, tsk)
}
@@ -758,7 +811,7 @@ func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.Cha
return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk)
}
-func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (abi.BitField, error) {
+func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
return c.Internal.StateMinerRecoveries(ctx, actor, tsk)
}
@@ -782,11 +835,11 @@ func (c *FullNodeStruct) StateSectorGetInfo(ctx context.Context, maddr address.A
return c.Internal.StateSectorGetInfo(ctx, maddr, n, tsk)
}
-func (c *FullNodeStruct) StateSectorExpiration(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*api.SectorExpiration, error) {
+func (c *FullNodeStruct) StateSectorExpiration(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorExpiration, error) {
return c.Internal.StateSectorExpiration(ctx, maddr, n, tsk)
}
-func (c *FullNodeStruct) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*api.SectorLocation, error) {
+func (c *FullNodeStruct) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) {
return c.Internal.StateSectorPartition(ctx, maddr, sectorNumber, tok)
}
@@ -806,6 +859,10 @@ func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Addres
return c.Internal.StateReadState(ctx, addr, tsk)
}
+func (c *FullNodeStruct) StateMsgGasCost(ctx context.Context, msgc cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) {
+ return c.Internal.StateMsgGasCost(ctx, msgc, tsk)
+}
+
func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) {
return c.Internal.StateWaitMsg(ctx, msgc, confidence)
}
@@ -862,10 +919,18 @@ func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch
return c.Internal.StateCompute(ctx, height, msgs, tsk)
}
-func (c *FullNodeStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*verifreg.DataCap, error) {
+func (c *FullNodeStruct) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
+ return c.Internal.StateVerifierStatus(ctx, addr, tsk)
+}
+
+func (c *FullNodeStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
return c.Internal.StateVerifiedClientStatus(ctx, addr, tsk)
}
+func (c *FullNodeStruct) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) {
+ return c.Internal.StateVerifiedRegistryRootKey(ctx, tsk)
+}
+
func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) {
return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
}
@@ -874,10 +939,22 @@ func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.T
return c.Internal.StateCirculatingSupply(ctx, tsk)
}
+func (c *FullNodeStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) {
+ return c.Internal.StateNetworkVersion(ctx, tsk)
+}
+
func (c *FullNodeStruct) MsigGetAvailableBalance(ctx context.Context, a address.Address, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.MsigGetAvailableBalance(ctx, a, tsk)
}
+func (c *FullNodeStruct) MsigGetVestingSchedule(ctx context.Context, a address.Address, tsk types.TipSetKey) (api.MsigVesting, error) {
+ return c.Internal.MsigGetVestingSchedule(ctx, a, tsk)
+}
+
+func (c *FullNodeStruct) MsigGetVested(ctx context.Context, a address.Address, sTsk types.TipSetKey, eTsk types.TipSetKey) (types.BigInt, error) {
+ return c.Internal.MsigGetVested(ctx, a, sTsk, eTsk)
+}
+
func (c *FullNodeStruct) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
return c.Internal.MsigCreate(ctx, req, addrs, duration, val, src, gp)
}
@@ -894,6 +971,18 @@ func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, t
return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
}
+func (c *FullNodeStruct) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
+ return c.Internal.MsigAddPropose(ctx, msig, src, newAdd, inc)
+}
+
+func (c *FullNodeStruct) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
+ return c.Internal.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc)
+}
+
+func (c *FullNodeStruct) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
+ return c.Internal.MsigAddCancel(ctx, msig, src, txID, newAdd, inc)
+}
+
func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
}
@@ -918,8 +1007,12 @@ func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid
return c.Internal.PaychGetWaitReady(ctx, sentinel)
}
-func (c *FullNodeStruct) PaychAvailableFunds(from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) {
- return c.Internal.PaychAvailableFunds(from, to)
+func (c *FullNodeStruct) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
+ return c.Internal.PaychAvailableFunds(ctx, ch)
+}
+
+func (c *FullNodeStruct) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
+ return c.Internal.PaychAvailableFundsByFromTo(ctx, from, to)
}
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
@@ -970,6 +1063,10 @@ func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Addr
return c.Internal.PaychVoucherSubmit(ctx, ch, sv, secret, proof)
}
+func (c *FullNodeStruct) CreateBackup(ctx context.Context, fpath string) error {
+ return c.Internal.CreateBackup(ctx, fpath)
+}
+
// StorageMinerStruct
func (c *StorageMinerStruct) ActorAddress(ctx context.Context) (address.Address, error) {
@@ -1102,7 +1199,7 @@ func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid c
return c.Internal.MarketImportDealData(ctx, propcid, path)
}
-func (c *StorageMinerStruct) MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) {
+func (c *StorageMinerStruct) MarketListDeals(ctx context.Context) ([]api.MarketDeal, error) {
return c.Internal.MarketListDeals(ctx)
}
@@ -1146,7 +1243,7 @@ func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid ci
return c.Internal.DealsImportData(ctx, dealPropCid, file)
}
-func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) {
+func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]api.MarketDeal, error) {
return c.Internal.DealsList(ctx)
}
@@ -1210,6 +1307,10 @@ func (c *StorageMinerStruct) PiecesGetCIDInfo(ctx context.Context, payloadCid ci
return c.Internal.PiecesGetCIDInfo(ctx, payloadCid)
}
+func (c *StorageMinerStruct) CreateBackup(ctx context.Context, fpath string) error {
+ return c.Internal.CreateBackup(ctx, fpath)
+}
+
// WorkerStruct
func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) {
diff --git a/api/cbor_gen.go b/api/cbor_gen.go
index 8889e6021..7ab575b28 100644
--- a/api/cbor_gen.go
+++ b/api/cbor_gen.go
@@ -6,7 +6,7 @@ import (
"fmt"
"io"
- abi "github.com/filecoin-project/specs-actors/actors/abi"
+ abi "github.com/filecoin-project/go-state-types/abi"
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go
index c3b4962d5..ced536cc3 100644
--- a/api/docgen/docgen.go
+++ b/api/docgen/docgen.go
@@ -28,9 +28,9 @@ import (
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-multistore"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apistruct"
@@ -105,7 +105,7 @@ func init() {
addExample(network.Connected)
addExample(dtypes.NetworkName("lotus"))
addExample(api.SyncStateStage(1))
- addExample(build.APIVersion)
+ addExample(build.FullAPIVersion)
addExample(api.PCHInbound)
addExample(time.Minute)
addExample(datatransfer.TransferID(3))
@@ -114,6 +114,7 @@ func init() {
addExample(retrievalmarket.ClientEventDealAccepted)
addExample(retrievalmarket.DealStatusNew)
addExample(network.ReachabilityPublic)
+ addExample(build.NewestNetworkVersion)
addExample(&types.ExecutionTrace{
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
diff --git a/api/test/blockminer.go b/api/test/blockminer.go
index c6433efea..6b28a5794 100644
--- a/api/test/blockminer.go
+++ b/api/test/blockminer.go
@@ -7,8 +7,8 @@ import (
"testing"
"time"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/miner"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
type BlockMiner struct {
diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go
index 3666aa3db..97fb665ed 100644
--- a/api/test/ccupgrade.go
+++ b/api/test/ccupgrade.go
@@ -10,17 +10,38 @@ import (
"github.com/stretchr/testify/require"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
)
func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
+ for _, height := range []abi.ChainEpoch{
+ 1, // before
+ 162, // while sealing
+ 520, // after upgrade deal
+ 5000, // after
+ } {
+ height := height // make linters happy by copying
+ t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
+ testCCUpgrade(t, b, blocktime, height)
+ })
+ }
+}
+
+func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
ctx := context.Background()
- n, sn := b(t, 1, OneMiner)
+ n, sn := b(t, 1, OneMiner, node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
+ Network: build.ActorUpgradeNetworkVersion,
+ Height: upgradeHeight,
+ Migration: stmgr.UpgradeActorsV2,
+ }}))
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
@@ -85,6 +106,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
{
exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK)
require.NoError(t, err)
+ require.NotNil(t, exp)
require.Greater(t, 50000, int(exp.OnTime))
}
{
@@ -93,6 +115,22 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
require.Less(t, 50000, int(exp.OnTime))
}
+ dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
+ require.NoError(t, err)
+
+ // Sector should expire.
+ for {
+ // Wait for the sector to expire.
+ status, err := miner.SectorsStatus(ctx, CC, true)
+ require.NoError(t, err)
+ if status.OnTime == 0 && status.Early == 0 {
+ break
+ }
+ t.Log("waiting for sector to expire")
+ // wait one deadline per loop.
+ time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
+ }
+
fmt.Println("shutting down mining")
atomic.AddInt64(&mine, -1)
<-done
diff --git a/api/test/deals.go b/api/test/deals.go
index 1dcc1c8d7..aa5bfa716 100644
--- a/api/test/deals.go
+++ b/api/test/deals.go
@@ -20,11 +20,11 @@ import (
"github.com/ipld/go-car"
"github.com/filecoin-project/go-fil-markets/storagemarket"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/miner"
- "github.com/filecoin-project/specs-actors/actors/abi"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
@@ -402,9 +402,12 @@ func testRetrieval(t *testing.T, ctx context.Context, client *impl.FullNodeAPI,
IsCAR: carExport,
}
updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
+ if err != nil {
+ t.Fatal(err)
+ }
for update := range updates {
if update.Err != "" {
- t.Fatalf("%v", err)
+ t.Fatalf("retrieval failed: %s", update.Err)
}
}
diff --git a/api/test/mining.go b/api/test/mining.go
index f912ff305..e19774a76 100644
--- a/api/test/mining.go
+++ b/api/test/mining.go
@@ -12,7 +12,7 @@ import (
logging "github.com/ipfs/go-log/v2"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/build"
diff --git a/api/test/paych.go b/api/test/paych.go
index b0ccc0a5c..e95773b6a 100644
--- a/api/test/paych.go
+++ b/api/test/paych.go
@@ -8,17 +8,21 @@ import (
"testing"
"time"
- "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
+ cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/types"
@@ -67,7 +71,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal(err)
}
- channelAmt := int64(100000)
+ channelAmt := int64(7000)
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
if err != nil {
t.Fatal(err)
@@ -133,17 +137,26 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal("Unable to settle payment channel")
}
+ creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(paymentCreator)))
+
// wait for the receiver to submit their vouchers
ev := events.NewEvents(ctx, paymentCreator)
preds := state.NewStatePredicates(paymentCreator)
finished := make(chan struct{})
err = ev.StateChanged(func(ts *types.TipSet) (done bool, more bool, err error) {
- act, err := paymentCreator.StateReadState(ctx, channel, ts.Key())
+ act, err := paymentCreator.StateGetActor(ctx, channel, ts.Key())
if err != nil {
return false, false, err
}
- state := act.State.(paych.State)
- if state.ToSend.GreaterThanEqual(abi.NewTokenAmount(6000)) {
+ state, err := paych.Load(creatorStore, act)
+ if err != nil {
+ return false, false, err
+ }
+ toSend, err := state.ToSend()
+ if err != nil {
+ return false, false, err
+ }
+ if toSend.GreaterThanEqual(abi.NewTokenAmount(6000)) {
return true, false, nil
}
return false, true, nil
@@ -156,7 +169,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
return true, nil
}, func(ctx context.Context, ts *types.TipSet) error {
return nil
- }, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
+ }, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
})
if err != nil {
@@ -169,8 +182,53 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal("Timed out waiting for receiver to submit vouchers")
}
+ // Create a new voucher now that some vouchers have already been submitted
+ vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if vouchRes.Voucher == nil {
+ t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall))
+ }
+ vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !vdelta.Equals(abi.NewTokenAmount(1000)) {
+ t.Fatal("voucher didn't have the right amount")
+ }
+
+ // Create a new voucher whose value would exceed the channel balance
+ excessAmt := abi.NewTokenAmount(1000)
+ vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if vouchRes.Voucher != nil {
+ t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance")
+ }
+ if !vouchRes.Shortfall.Equals(excessAmt) {
+ t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall))
+ }
+
+ // Add a voucher whose value would exceed the channel balance
+ vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
+ vb, err := vouch.SigningBytes()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
+ if err != nil {
+ t.Fatal(err)
+ }
+ vouch.Signature = sig
+ _, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
+ if err == nil {
+ t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt))
+ }
+
// wait for the settlement period to pass before collecting
- waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, paych.SettleDelay)
+ waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, paych0.SettleDelay)
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
if err != nil {
@@ -226,7 +284,7 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec
// Add a real block
m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{
- To: builtin.BurntFundsActorAddr,
+ To: builtin0.BurntFundsActorAddr,
From: receiverAddr,
Value: types.NewInt(0),
}, nil)
diff --git a/api/test/test.go b/api/test/test.go
index 98a9a2e48..853267eff 100644
--- a/api/test/test.go
+++ b/api/test/test.go
@@ -12,6 +12,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/miner"
+ "github.com/filecoin-project/lotus/node"
)
type TestNode struct {
@@ -44,7 +45,7 @@ type StorageMiner struct {
//
// storage array defines storage nodes, numbers in the array specify full node
// index the storage node 'belongs' to
-type APIBuilder func(t *testing.T, nFull int, storage []StorageMiner) ([]TestNode, []TestStorageNode)
+type APIBuilder func(t *testing.T, nFull int, storage []StorageMiner, opts ...node.Option) ([]TestNode, []TestStorageNode)
type testSuite struct {
makeNodes APIBuilder
}
@@ -65,6 +66,8 @@ func TestApis(t *testing.T, b APIBuilder) {
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
func (ts *testSuite) testVersion(t *testing.T) {
+ build.RunningNodeType = build.NodeFull
+
ctx := context.Background()
apis, _ := ts.makeNodes(t, 1, OneMiner)
api := apis[0]
diff --git a/api/test/util.go b/api/test/util.go
index 57a6fcae3..8695e2e2e 100644
--- a/api/test/util.go
+++ b/api/test/util.go
@@ -5,7 +5,7 @@ import (
"testing"
"time"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
diff --git a/api/test/window_post.go b/api/test/window_post.go
index c5c8ec071..eadcdbb05 100644
--- a/api/test/window_post.go
+++ b/api/test/window_post.go
@@ -12,13 +12,14 @@ import (
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
- "github.com/filecoin-project/specs-actors/actors/abi"
- miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl"
@@ -115,8 +116,29 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n,
}
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
- ctx := context.Background()
- n, sn := b(t, 1, OneMiner)
+ for _, height := range []abi.ChainEpoch{
+ 1, // before
+ 162, // while sealing
+ 5000, // while proving
+ } {
+ height := height // copy to satisfy lints
+ t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
+ testWindowPostUpgrade(t, b, blocktime, nSectors, height)
+ })
+ }
+
+}
+func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int,
+ upgradeHeight abi.ChainEpoch) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ n, sn := b(t, 1, OneMiner, node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
+ Network: build.ActorUpgradeNetworkVersion,
+ Height: upgradeHeight,
+ Migration: stmgr.UpgradeActorsV2,
+ }}))
+
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
@@ -130,17 +152,24 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
}
build.Clock.Sleep(time.Second)
- mine := true
done := make(chan struct{})
go func() {
defer close(done)
- for mine {
+ for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, MineNext); err != nil {
+ if ctx.Err() != nil {
+ // context was canceled, ignore the error.
+ return
+ }
t.Error(err)
}
}
}()
+ defer func() {
+ cancel()
+ <-done
+ }()
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
@@ -154,18 +183,16 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
+ fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
- if head.Height() > di.PeriodStart+(miner2.WPoStProvingPeriod)+2 {
+ if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
-
- if head.Height()%100 == 0 {
- fmt.Printf("@%d\n", head.Height())
- }
build.Clock.Sleep(blocktime)
}
@@ -186,13 +213,14 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.NoError(t, err)
require.Greater(t, len(parts), 0)
- n, err := parts[0].Sectors.Count()
+ secs := parts[0].AllSectors
+ n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
// Drop the partition
- err = parts[0].Sectors.ForEach(func(sid uint64) error {
- return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(abi.SectorID{
+ err = secs.ForEach(func(sid uint64) error {
+ return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(sid),
}, true)
@@ -208,15 +236,16 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.NoError(t, err)
require.Greater(t, len(parts), 0)
- n, err := parts[0].Sectors.Count()
+ secs := parts[0].AllSectors
+ n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
// Drop the sector
- sn, err := parts[0].Sectors.First()
+ sn, err := secs.First()
require.NoError(t, err)
- all, err := parts[0].Sectors.All(2)
+ all, err := secs.All(2)
require.NoError(t, err)
fmt.Println("the sectors", all)
@@ -233,18 +262,17 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.NoError(t, err)
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
+ fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
- if head.Height() > di.PeriodStart+(miner2.WPoStProvingPeriod)+2 {
+ if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
- if head.Height()%100 == 0 {
- fmt.Printf("@%d\n", head.Height())
- }
build.Clock.Sleep(blocktime)
}
@@ -264,17 +292,17 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
+ fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
+
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
- if head.Height() > di.PeriodStart+(miner2.WPoStProvingPeriod)+2 {
+ if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
- if head.Height()%100 == 0 {
- fmt.Printf("@%d\n", head.Height())
- }
build.Clock.Sleep(blocktime)
}
@@ -291,18 +319,19 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
pledgeSectors(t, ctx, miner, 1, nSectors, nil)
{
- // wait a bit more
-
- head, err := client.ChainHead(ctx)
+ // Wait until proven.
+ di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
- waitUntil := head.Height() + 10
+ waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
+ fmt.Printf("End for head.Height > %d\n", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > waitUntil {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
}
@@ -315,7 +344,4 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
-
- mine = false
- <-done
}
diff --git a/api/types.go b/api/types.go
index 37cc4a7fa..a69aa28d9 100644
--- a/api/types.go
+++ b/api/types.go
@@ -4,11 +4,9 @@ import (
"encoding/json"
"fmt"
- "github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
@@ -49,48 +47,6 @@ type PubsubScore struct {
Score *pubsub.PeerScoreSnapshot
}
-type MinerInfo struct {
- Owner address.Address // Must be an ID-address.
- Worker address.Address // Must be an ID-address.
- NewWorker address.Address // Must be an ID-address.
- ControlAddresses []address.Address // Must be an ID-addresses.
- WorkerChangeEpoch abi.ChainEpoch
- PeerId *peer.ID
- Multiaddrs []abi.Multiaddrs
- SealProofType abi.RegisteredSealProof
- SectorSize abi.SectorSize
- WindowPoStPartitionSectors uint64
-}
-
-func NewApiMinerInfo(info *miner.MinerInfo) MinerInfo {
- var pid *peer.ID
- if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
- pid = &peerID
- }
-
- mi := MinerInfo{
- Owner: info.Owner,
- Worker: info.Worker,
- ControlAddresses: info.ControlAddresses,
-
- NewWorker: address.Undef,
- WorkerChangeEpoch: -1,
-
- PeerId: pid,
- Multiaddrs: info.Multiaddrs,
- SealProofType: info.SealProofType,
- SectorSize: info.SectorSize,
- WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
- }
-
- if info.PendingWorkerKey != nil {
- mi.NewWorker = info.PendingWorkerKey.NewWorker
- mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
- }
-
- return mi
-}
-
type MessageSendSpec struct {
MaxFee abi.TokenAmount
}
diff --git a/api/utils.go b/api/utils.go
index 13d5c92cb..a9d02c31b 100644
--- a/api/utils.go
+++ b/api/utils.go
@@ -4,7 +4,7 @@ import (
"context"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
)
type SignFunc = func(context.Context, []byte) (*crypto.Signature, error)
diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi
index 465f3b5e9..f3f8b8a55 100644
--- a/build/bootstrap/bootstrappers.pi
+++ b/build/bootstrap/bootstrappers.pi
@@ -4,3 +4,9 @@
/dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34
/dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T
/dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W
+/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C
+/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
+/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
+/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
+/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d
+/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP
diff --git a/build/drand.go b/build/drand.go
index ef3f2c498..73299249a 100644
--- a/build/drand.go
+++ b/build/drand.go
@@ -1,15 +1,26 @@
package build
-import "github.com/filecoin-project/lotus/node/modules/dtypes"
+import (
+ "sort"
-var DrandNetwork = DrandIncentinet
-
-func DrandConfig() dtypes.DrandConfig {
- return DrandConfigs[DrandNetwork]
-}
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+)
type DrandEnum int
+func DrandConfigSchedule() dtypes.DrandSchedule {
+ out := dtypes.DrandSchedule{}
+ for start, config := range DrandSchedule {
+ out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]})
+ }
+
+ sort.Slice(out, func(i, j int) bool {
+ return out[i].Start < out[j].Start
+ })
+
+ return out
+}
+
const (
DrandMainnet DrandEnum = iota + 1
DrandTestnet
diff --git a/build/params_2k.go b/build/params_2k.go
index 12005f005..c6538dc08 100644
--- a/build/params_2k.go
+++ b/build/params_2k.go
@@ -3,19 +3,37 @@
package build
import (
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ "math"
+ "os"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/chain/actors/policy"
)
+const UpgradeBreezeHeight = -1
+const BreezeGasTampingDuration = 0
+
+const UpgradeSmokeHeight = -1
+const UpgradeIgnitionHeight = -2
+const UpgradeRefuelHeight = -3
+
+var UpgradeActorsV2Height = abi.ChainEpoch(10)
+var UpgradeLiftoffHeight = abi.ChainEpoch(-4)
+
+var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
+ 0: DrandMainnet,
+}
+
func init() {
- power.ConsensusMinerMinPower = big.NewInt(2048)
- miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
+
+ if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
+ UpgradeActorsV2Height = math.MaxInt64
+ UpgradeLiftoffHeight = 11
}
- verifreg.MinVerifiedDealSize = big.NewInt(256)
BuildType |= Build2k
}
diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go
index cdb8e70d3..40ccca50b 100644
--- a/build/params_shared_funcs.go
+++ b/build/params_shared_funcs.go
@@ -3,17 +3,19 @@ package build
import (
"sort"
+ "github.com/filecoin-project/go-address"
+
"github.com/libp2p/go-libp2p-core/protocol"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/go-state-types/abi"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
func DefaultSectorSize() abi.SectorSize {
- szs := make([]abi.SectorSize, 0, len(miner.SupportedProofTypes))
- for spt := range miner.SupportedProofTypes {
+ szs := make([]abi.SectorSize, 0, len(miner0.SupportedProofTypes))
+ for spt := range miner0.SupportedProofTypes {
ss, err := spt.SectorSize()
if err != nil {
panic(err)
@@ -36,3 +38,15 @@ func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + st
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
return protocol.ID("/fil/kad/" + string(netName))
}
+
+func UseNewestNetwork() bool {
+ // TODO: Put these in a container we can iterate over
+ if UpgradeBreezeHeight <= 0 && UpgradeSmokeHeight <= 0 && UpgradeActorsV2Height <= 0 {
+ return true
+ }
+ return false
+}
+
+func SetAddressNetwork(n address.Network) {
+ address.CurrentNetwork = n
+}
diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go
index 2fce61ee7..722590575 100644
--- a/build/params_shared_vals.go
+++ b/build/params_shared_vals.go
@@ -4,10 +4,15 @@ package build
import (
"math/big"
+ "os"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
)
// /////
@@ -20,6 +25,8 @@ const UnixfsLinksPerLevel = 1024
// Consensus / Network
const AllowableClockDriftSecs = uint64(1)
+const NewestNetworkVersion = network.Version4
+const ActorUpgradeNetworkVersion = network.Version4
// Epochs
const ForkLengthThreshold = Finality
@@ -28,7 +35,7 @@ const ForkLengthThreshold = Finality
var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
// Epochs
-const Finality = miner.ChainFinality
+const Finality = policy.ChainFinality
const MessageConfidence = uint64(5)
// constants for Weight calculation
@@ -40,13 +47,8 @@ const WRatioDen = uint64(2)
// Proofs
// Epochs
-const SealRandomnessLookback = Finality
-
-// Epochs
-const SealRandomnessLookbackLimit = SealRandomnessLookback + 2000 // TODO: Get from spec specs-actors
-
-// Maximum lookback that randomness can be sourced from for a seal proof submission
-const MaxSealLookback = SealRandomnessLookbackLimit + 2000 // TODO: Get from specs-actors
+// TODO: unused
+const SealRandomnessLookback = policy.SealRandomnessLookback
// /////
// Mining
@@ -54,23 +56,37 @@ const MaxSealLookback = SealRandomnessLookbackLimit + 2000 // TODO: Get from spe
// Epochs
const TicketRandomnessLookback = abi.ChainEpoch(1)
-const WinningPoStSectorSetLookback = abi.ChainEpoch(10)
+// /////
+// Address
+
+const AddressMainnetEnvVar = "_mainnet_"
// /////
// Devnet settings
+var Devnet = true
+
const FilBase = uint64(2_000_000_000)
const FilAllocStorageMining = uint64(1_100_000_000)
const FilecoinPrecision = uint64(1_000_000_000_000_000_000)
+const FilReserved = uint64(300_000_000)
var InitialRewardBalance *big.Int
+var InitialFilReserved *big.Int
// TODO: Move other important consts here
func init() {
InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining))
InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision)))
+
+ InitialFilReserved = big.NewInt(int64(FilReserved))
+ InitialFilReserved = InitialFilReserved.Mul(InitialFilReserved, big.NewInt(int64(FilecoinPrecision)))
+
+ if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar {
+ SetAddressNetwork(address.Mainnet)
+ }
}
// Sync
diff --git a/build/params_testground.go b/build/params_testground.go
index bdd56fbb1..6109cbc04 100644
--- a/build/params_testground.go
+++ b/build/params_testground.go
@@ -10,9 +10,11 @@ package build
import (
"math/big"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+
+ "github.com/filecoin-project/lotus/chain/actors/policy"
)
var (
@@ -31,7 +33,7 @@ var (
AllowableClockDriftSecs = uint64(1)
- Finality = miner.ChainFinality
+ Finality = policy.ChainFinality
ForkLengthThreshold = Finality
SlashablePowerDelay = 20
@@ -46,15 +48,13 @@ var (
BlsSignatureCacheSize = 40000
VerifSigCacheSize = 32000
- SealRandomnessLookback = Finality
- SealRandomnessLookbackLimit = SealRandomnessLookback + 2000
- MaxSealLookback = SealRandomnessLookbackLimit + 2000
+ SealRandomnessLookback = policy.SealRandomnessLookback
- TicketRandomnessLookback = abi.ChainEpoch(1)
- WinningPoStSectorSetLookback = abi.ChainEpoch(10)
+ TicketRandomnessLookback = abi.ChainEpoch(1)
FilBase uint64 = 2_000_000_000
FilAllocStorageMining uint64 = 1_400_000_000
+ FilReserved uint64 = 300_000_000
FilecoinPrecision uint64 = 1_000_000_000_000_000_000
@@ -63,10 +63,35 @@ var (
v = v.Mul(v, big.NewInt(int64(FilecoinPrecision)))
return v
}()
+
+ InitialFilReserved = func() *big.Int {
+ v := big.NewInt(int64(FilReserved))
+ v = v.Mul(v, big.NewInt(int64(FilecoinPrecision)))
+ return v
+ }()
+
// Actor consts
// TODO: Pull from actors when its made not private
MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay)
PackingEfficiencyNum int64 = 4
PackingEfficiencyDenom int64 = 5
+
+ UpgradeBreezeHeight abi.ChainEpoch = -1
+ BreezeGasTampingDuration abi.ChainEpoch = 0
+
+ UpgradeSmokeHeight abi.ChainEpoch = -1
+ UpgradeIgnitionHeight abi.ChainEpoch = -2
+ UpgradeRefuelHeight abi.ChainEpoch = -3
+ UpgradeActorsV2Height abi.ChainEpoch = 10
+ UpgradeLiftoffHeight abi.ChainEpoch = -4
+
+ DrandSchedule = map[abi.ChainEpoch]DrandEnum{
+ 0: DrandMainnet,
+ }
+
+ NewestNetworkVersion = network.Version4
+ ActorUpgradeNetworkVersion = network.Version4
+
+ Devnet = true
)
diff --git a/build/params_testnet.go b/build/params_testnet.go
index f422b3861..fe70deaef 100644
--- a/build/params_testnet.go
+++ b/build/params_testnet.go
@@ -5,21 +5,54 @@
package build
import (
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ "math"
+ "os"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
)
-func init() {
- power.ConsensusMinerMinPower = big.NewInt(10 << 40)
- miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg32GiBV1: {},
- abi.RegisteredSealProof_StackedDrg64GiBV1: {},
- }
+var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
+ 0: DrandIncentinet,
+ UpgradeSmokeHeight: DrandMainnet,
}
-const BlockDelaySecs = uint64(builtin.EpochDurationSeconds)
+const UpgradeBreezeHeight = 41280
+const BreezeGasTampingDuration = 120
+
+const UpgradeSmokeHeight = 51000
+
+const UpgradeIgnitionHeight = 94000
+const UpgradeRefuelHeight = 130800
+
+var UpgradeActorsV2Height = abi.ChainEpoch(138720)
+
+// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
+// Miners, clients, developers, custodians all need time to prepare.
+// We still have upgrades and state changes to do, but can happen after signaling timing here.
+const UpgradeLiftoffHeight = 148888
+
+func init() {
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
+ policy.SetSupportedProofTypes(
+ abi.RegisteredSealProof_StackedDrg32GiBV1,
+ abi.RegisteredSealProof_StackedDrg64GiBV1,
+ )
+
+ if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
+ SetAddressNetwork(address.Mainnet)
+ }
+
+ if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
+ UpgradeActorsV2Height = math.MaxInt64
+ }
+
+ Devnet = false
+}
+
+const BlockDelaySecs = uint64(builtin0.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
diff --git a/build/version.go b/build/version.go
index a3c5d1552..1602551e4 100644
--- a/build/version.go
+++ b/build/version.go
@@ -1,6 +1,10 @@
package build
-import "fmt"
+import (
+ "fmt"
+
+ "golang.org/x/xerrors"
+)
var CurrentCommit string
var BuildType int
@@ -25,7 +29,7 @@ func buildType() string {
}
// BuildVersion is the local build version, set by build system
-const BuildVersion = "0.5.10"
+const BuildVersion = "0.9.0"
func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit
@@ -52,8 +56,37 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
return ve&minorMask == v2&minorMask
}
-// APIVersion is a semver version of the rpc api exposed
-var APIVersion Version = newVer(0, 14, 0)
+type NodeType int
+
+const (
+ NodeUnknown NodeType = iota
+
+ NodeFull
+ NodeMiner
+ NodeWorker
+)
+
+var RunningNodeType NodeType
+
+func VersionForType(nodeType NodeType) (Version, error) {
+ switch nodeType {
+ case NodeFull:
+ return FullAPIVersion, nil
+ case NodeMiner:
+ return MinerAPIVersion, nil
+ case NodeWorker:
+ return WorkerAPIVersion, nil
+ default:
+ return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
+ }
+}
+
+// semver versions of the rpc api exposed
+var (
+ FullAPIVersion = newVer(0, 16, 0)
+ MinerAPIVersion = newVer(0, 15, 0)
+ WorkerAPIVersion = newVer(0, 15, 0)
+)
//nolint:varcheck,deadcode
const (
diff --git a/chain/actors/adt/adt.go b/chain/actors/adt/adt.go
new file mode 100644
index 000000000..6a454ac26
--- /dev/null
+++ b/chain/actors/adt/adt.go
@@ -0,0 +1,76 @@
+package adt
+
+import (
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+)
+
+type Map interface {
+ Root() (cid.Cid, error)
+
+ Put(k abi.Keyer, v cbor.Marshaler) error
+ Get(k abi.Keyer, v cbor.Unmarshaler) (bool, error)
+ Delete(k abi.Keyer) error
+
+ ForEach(v cbor.Unmarshaler, fn func(key string) error) error
+}
+
+func AsMap(store Store, root cid.Cid, version actors.Version) (Map, error) {
+ switch version {
+ case actors.Version0:
+ return adt0.AsMap(store, root)
+ case actors.Version2:
+ return adt2.AsMap(store, root)
+ }
+ return nil, xerrors.Errorf("unknown network version: %d", version)
+}
+
+func NewMap(store Store, version actors.Version) (Map, error) {
+ switch version {
+ case actors.Version0:
+ return adt0.MakeEmptyMap(store), nil
+ case actors.Version2:
+ return adt2.MakeEmptyMap(store), nil
+ }
+ return nil, xerrors.Errorf("unknown network version: %d", version)
+}
+
+type Array interface {
+ Root() (cid.Cid, error)
+
+ Set(idx uint64, v cbor.Marshaler) error
+ Get(idx uint64, v cbor.Unmarshaler) (bool, error)
+ Delete(idx uint64) error
+ Length() uint64
+
+ ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error
+}
+
+func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) {
+ switch actors.VersionForNetwork(version) {
+ case actors.Version0:
+ return adt0.AsArray(store, root)
+ case actors.Version2:
+ return adt2.AsArray(store, root)
+ }
+ return nil, xerrors.Errorf("unknown network version: %d", version)
+}
+
+func NewArray(store Store, version actors.Version) (Array, error) {
+ switch version {
+ case actors.Version0:
+ return adt0.MakeEmptyArray(store), nil
+ case actors.Version2:
+ return adt2.MakeEmptyArray(store), nil
+ }
+ return nil, xerrors.Errorf("unknown network version: %d", version)
+}
diff --git a/chain/events/state/diff_adt.go b/chain/actors/adt/diff_adt.go
similarity index 87%
rename from chain/events/state/diff_adt.go
rename to chain/actors/adt/diff_adt.go
index 39d7e8556..160e12e19 100644
--- a/chain/events/state/diff_adt.go
+++ b/chain/actors/adt/diff_adt.go
@@ -1,9 +1,9 @@
-package state
+package adt
import (
"bytes"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
typegen "github.com/whyrusleeping/cbor-gen"
)
@@ -26,7 +26,8 @@ type AdtArrayDiff interface {
// - All values that exist in curArr nnd not in prevArr are passed to adtArrayDiff.Add()
// - All values that exist in preArr and in curArr are passed to AdtArrayDiff.Modify()
// - It is the responsibility of AdtArrayDiff.Modify() to determine if the values it was passed have been modified.
-func DiffAdtArray(preArr, curArr *adt.Array, out AdtArrayDiff) error {
+func DiffAdtArray(preArr, curArr Array, out AdtArrayDiff) error {
+ notNew := make(map[int64]struct{}, curArr.Length())
prevVal := new(typegen.Deferred)
if err := preArr.ForEach(prevVal, func(i int64) error {
curVal := new(typegen.Deferred)
@@ -47,14 +48,17 @@ func DiffAdtArray(preArr, curArr *adt.Array, out AdtArrayDiff) error {
return err
}
}
-
- return curArr.Delete(uint64(i))
+ notNew[i] = struct{}{}
+ return nil
}); err != nil {
return err
}
curVal := new(typegen.Deferred)
return curArr.ForEach(curVal, func(i int64) error {
+ if _, ok := notNew[i]; ok {
+ return nil
+ }
return out.Add(uint64(i), curVal)
})
}
@@ -69,13 +73,14 @@ func DiffAdtArray(preArr, curArr *adt.Array, out AdtArrayDiff) error {
// Modify should be called when a value is modified in the map
// Remove should be called when a value is removed from the map
type AdtMapDiff interface {
- AsKey(key string) (adt.Keyer, error)
+ AsKey(key string) (abi.Keyer, error)
Add(key string, val *typegen.Deferred) error
Modify(key string, from, to *typegen.Deferred) error
Remove(key string, val *typegen.Deferred) error
}
-func DiffAdtMap(preMap, curMap *adt.Map, out AdtMapDiff) error {
+func DiffAdtMap(preMap, curMap Map, out AdtMapDiff) error {
+ notNew := make(map[string]struct{})
prevVal := new(typegen.Deferred)
if err := preMap.ForEach(prevVal, func(key string) error {
curVal := new(typegen.Deferred)
@@ -101,14 +106,17 @@ func DiffAdtMap(preMap, curMap *adt.Map, out AdtMapDiff) error {
return err
}
}
-
- return curMap.Delete(k)
+ notNew[key] = struct{}{}
+ return nil
}); err != nil {
return err
}
curVal := new(typegen.Deferred)
return curMap.ForEach(curVal, func(key string) error {
+ if _, ok := notNew[key]; ok {
+ return nil
+ }
return out.Add(key, curVal)
})
}
diff --git a/chain/events/state/diff_adt_test.go b/chain/actors/adt/diff_adt_test.go
similarity index 85%
rename from chain/events/state/diff_adt_test.go
rename to chain/actors/adt/diff_adt_test.go
index 56a03bf33..1c0726003 100644
--- a/chain/events/state/diff_adt_test.go
+++ b/chain/actors/adt/diff_adt_test.go
@@ -1,4 +1,4 @@
-package state
+package adt
import (
"bytes"
@@ -11,8 +11,9 @@ import (
cbornode "github.com/ipfs/go-ipld-cbor"
typegen "github.com/whyrusleeping/cbor-gen"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/runtime"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
bstore "github.com/filecoin-project/lotus/lib/blockstore"
)
@@ -21,8 +22,8 @@ func TestDiffAdtArray(t *testing.T) {
ctxstoreA := newContextStore()
ctxstoreB := newContextStore()
- arrA := adt.MakeEmptyArray(ctxstoreA)
- arrB := adt.MakeEmptyArray(ctxstoreB)
+ arrA := adt0.MakeEmptyArray(ctxstoreA)
+ arrB := adt0.MakeEmptyArray(ctxstoreB)
require.NoError(t, arrA.Set(0, runtime.CBORBytes([]byte{0}))) // delete
@@ -75,24 +76,24 @@ func TestDiffAdtMap(t *testing.T) {
ctxstoreA := newContextStore()
ctxstoreB := newContextStore()
- mapA := adt.MakeEmptyMap(ctxstoreA)
- mapB := adt.MakeEmptyMap(ctxstoreB)
+ mapA := adt0.MakeEmptyMap(ctxstoreA)
+ mapB := adt0.MakeEmptyMap(ctxstoreB)
- require.NoError(t, mapA.Put(adt.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
+ require.NoError(t, mapA.Put(abi.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
- require.NoError(t, mapA.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
- require.NoError(t, mapB.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{1})))
+ require.NoError(t, mapA.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
+ require.NoError(t, mapB.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{1})))
- require.NoError(t, mapA.Put(adt.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
+ require.NoError(t, mapA.Put(abi.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
- require.NoError(t, mapA.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
- require.NoError(t, mapB.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0})))
+ require.NoError(t, mapA.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
+ require.NoError(t, mapB.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0})))
- require.NoError(t, mapA.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
- require.NoError(t, mapB.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{6})))
+ require.NoError(t, mapA.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
+ require.NoError(t, mapB.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{6})))
- require.NoError(t, mapB.Put(adt.UIntKey(5), runtime.CBORBytes{8})) // add
- require.NoError(t, mapB.Put(adt.UIntKey(6), runtime.CBORBytes{9})) // add
+ require.NoError(t, mapB.Put(abi.UIntKey(5), runtime.CBORBytes{8})) // add
+ require.NoError(t, mapB.Put(abi.UIntKey(6), runtime.CBORBytes{9})) // add
changes := new(TestDiffMap)
@@ -134,12 +135,12 @@ type TestDiffMap struct {
var _ AdtMapDiff = &TestDiffMap{}
-func (t *TestDiffMap) AsKey(key string) (adt.Keyer, error) {
- k, err := adt.ParseUIntKey(key)
+func (t *TestDiffMap) AsKey(key string) (abi.Keyer, error) {
+ k, err := abi.ParseUIntKey(key)
if err != nil {
return nil, err
}
- return adt.UIntKey(k), nil
+ return abi.UIntKey(k), nil
}
func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
@@ -148,7 +149,7 @@ func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
if err != nil {
return err
}
- k, err := adt.ParseUIntKey(key)
+ k, err := abi.ParseUIntKey(key)
if err != nil {
return err
}
@@ -172,7 +173,7 @@ func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error {
return err
}
- k, err := adt.ParseUIntKey(key)
+ k, err := abi.ParseUIntKey(key)
if err != nil {
return err
}
@@ -198,7 +199,7 @@ func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error {
if err != nil {
return err
}
- k, err := adt.ParseUIntKey(key)
+ k, err := abi.ParseUIntKey(key)
if err != nil {
return err
}
@@ -291,12 +292,9 @@ func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error {
return nil
}
-func newContextStore() *contextStore {
+func newContextStore() Store {
ctx := context.Background()
bs := bstore.NewTemporarySync()
store := cbornode.NewCborStore(bs)
- return &contextStore{
- ctx: ctx,
- cst: store,
- }
+ return WrapStore(ctx, store)
}
diff --git a/chain/actors/adt/store.go b/chain/actors/adt/store.go
new file mode 100644
index 000000000..8dd9841a1
--- /dev/null
+++ b/chain/actors/adt/store.go
@@ -0,0 +1,17 @@
+package adt
+
+import (
+ "context"
+
+ adt "github.com/filecoin-project/specs-actors/actors/util/adt"
+ cbor "github.com/ipfs/go-ipld-cbor"
+)
+
+type Store interface {
+ Context() context.Context
+ cbor.IpldStore
+}
+
+func WrapStore(ctx context.Context, store cbor.IpldStore) Store {
+ return adt.WrapStore(ctx, store)
+}
diff --git a/chain/actors/aerrors/error.go b/chain/actors/aerrors/error.go
index e687982c8..12f802c8f 100644
--- a/chain/actors/aerrors/error.go
+++ b/chain/actors/aerrors/error.go
@@ -3,7 +3,7 @@ package aerrors
import (
"fmt"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/exitcode"
"golang.org/x/xerrors"
)
diff --git a/chain/actors/aerrors/error_test.go b/chain/actors/aerrors/error_test.go
index 4d87ac396..3bfd3d042 100644
--- a/chain/actors/aerrors/error_test.go
+++ b/chain/actors/aerrors/error_test.go
@@ -3,8 +3,8 @@ package aerrors_test
import (
"testing"
+ "github.com/filecoin-project/go-state-types/exitcode"
. "github.com/filecoin-project/lotus/chain/actors/aerrors"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/stretchr/testify/assert"
"golang.org/x/xerrors"
diff --git a/chain/actors/aerrors/wrap.go b/chain/actors/aerrors/wrap.go
index 338659966..0552829f9 100644
--- a/chain/actors/aerrors/wrap.go
+++ b/chain/actors/aerrors/wrap.go
@@ -4,7 +4,7 @@ import (
"errors"
"fmt"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/exitcode"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
)
diff --git a/chain/actors/builtin/README.md b/chain/actors/builtin/README.md
new file mode 100644
index 000000000..21b3fd38f
--- /dev/null
+++ b/chain/actors/builtin/README.md
@@ -0,0 +1,29 @@
+# Actors
+
+This package contains shims for abstracting over different actor versions.
+
+## Design
+
+Shims in this package follow a few common design principles.
+
+### Structure Agnostic
+
+Shims interfaces defined in this package should (ideally) not change even if the
+structure of the underlying data changes. For example:
+
+* All shims store an internal "store" object. That way, state can be moved into
+ a separate object without needing to add a store to the function signature.
+* All functions must return an error, even if unused for now.
+
+### Minimal
+
+These interfaces should be expanded only as necessary to reduce maintenance burden.
+
+### Queries, not field assessors.
+
+When possible, functions should query the state instead of simply acting as
+field assessors. These queries are more likely to remain stable across
+specs-actor upgrades than specific state fields.
+
+Note: there is a trade-off here. Avoid implementing _complicated_ query logic
+inside these shims, as it will need to be replicated in every shim.
diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go
new file mode 100644
index 000000000..7b1b2a792
--- /dev/null
+++ b/chain/actors/builtin/account/account.go
@@ -0,0 +1,41 @@
+package account
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+ case builtin0.AccountActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.AccountActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ PubkeyAddress() (address.Address, error)
+}
diff --git a/chain/actors/builtin/account/v0.go b/chain/actors/builtin/account/v0.go
new file mode 100644
index 000000000..67c555c5d
--- /dev/null
+++ b/chain/actors/builtin/account/v0.go
@@ -0,0 +1,30 @@
+package account
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ account0 "github.com/filecoin-project/specs-actors/actors/builtin/account"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ account0.State
+ store adt.Store
+}
+
+func (s *state0) PubkeyAddress() (address.Address, error) {
+ return s.Address, nil
+}
diff --git a/chain/actors/builtin/account/v2.go b/chain/actors/builtin/account/v2.go
new file mode 100644
index 000000000..2664631bc
--- /dev/null
+++ b/chain/actors/builtin/account/v2.go
@@ -0,0 +1,30 @@
+package account
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ account2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/account"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ account2.State
+ store adt.Store
+}
+
+func (s *state2) PubkeyAddress() (address.Address, error) {
+ return s.Address, nil
+}
diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go
new file mode 100644
index 000000000..cb24a2c33
--- /dev/null
+++ b/chain/actors/builtin/builtin.go
@@ -0,0 +1,100 @@
+package builtin
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+ smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
+)
+
+var SystemActorAddr = builtin0.SystemActorAddr
+var BurntFundsActorAddr = builtin0.BurntFundsActorAddr
+var ReserveAddress = makeAddress("t090")
+var RootVerifierAddress = makeAddress("t080")
+
+// TODO: Why does actors have 2 different versions of this?
+type SectorInfo = proof0.SectorInfo
+type PoStProof = proof0.PoStProof
+type FilterEstimate = smoothing0.FilterEstimate
+
+func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
+ return (FilterEstimate)(v0)
+}
+
+// Doesn't change between actors v0 and v1
+func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
+ return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
+}
+
+func FromV2FilterEstimate(v1 smoothing2.FilterEstimate) FilterEstimate {
+ return (FilterEstimate)(v1)
+}
+
+type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
+
+var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader)
+
+func RegisterActorState(code cid.Cid, loader ActorStateLoader) {
+ ActorStateLoaders[code] = loader
+}
+
+func Load(store adt.Store, act *types.Actor) (cbor.Marshaler, error) {
+ loader, found := ActorStateLoaders[act.Code]
+ if !found {
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+ }
+ return loader(store, act.Head)
+}
+
+func ActorNameByCode(c cid.Cid) string {
+ switch {
+ case builtin0.IsBuiltinActor(c):
+ return builtin0.ActorNameByCode(c)
+ case builtin2.IsBuiltinActor(c):
+ return builtin2.ActorNameByCode(c)
+ default:
+ return ""
+ }
+}
+
+func IsBuiltinActor(c cid.Cid) bool {
+ return builtin0.IsBuiltinActor(c) || builtin2.IsBuiltinActor(c)
+}
+
+func IsAccountActor(c cid.Cid) bool {
+ return c == builtin0.AccountActorCodeID || c == builtin2.AccountActorCodeID
+}
+
+func IsStorageMinerActor(c cid.Cid) bool {
+ return c == builtin0.StorageMinerActorCodeID || c == builtin2.StorageMinerActorCodeID
+}
+
+func IsMultisigActor(c cid.Cid) bool {
+ return c == builtin0.MultisigActorCodeID || c == builtin2.MultisigActorCodeID
+
+}
+
+func IsPaymentChannelActor(c cid.Cid) bool {
+ return c == builtin0.PaymentChannelActorCodeID || c == builtin2.PaymentChannelActorCodeID
+}
+
+func makeAddress(addr string) address.Address {
+ ret, err := address.NewFromString(addr)
+ if err != nil {
+ panic(err)
+ }
+
+ return ret
+}
diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go
new file mode 100644
index 000000000..5777bb890
--- /dev/null
+++ b/chain/actors/builtin/init/init.go
@@ -0,0 +1,57 @@
+package init
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+var Address = builtin0.InitActorAddr
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+ case builtin0.InitActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.InitActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ ResolveAddress(address address.Address) (address.Address, bool, error)
+ MapAddressToNewID(address address.Address) (address.Address, error)
+ NetworkName() (dtypes.NetworkName, error)
+
+ ForEachActor(func(id abi.ActorID, address address.Address) error) error
+
+ // Remove exists to support tooling that manipulates state for testing.
+ // It should not be used in production code, as init actor entries are
+ // immutable.
+ Remove(addrs ...address.Address) error
+
+ // Sets the network's name. This should only be used on upgrade/fork.
+ SetNetworkName(name string) error
+}
diff --git a/chain/actors/builtin/init/v0.go b/chain/actors/builtin/init/v0.go
new file mode 100644
index 000000000..ceb87f970
--- /dev/null
+++ b/chain/actors/builtin/init/v0.go
@@ -0,0 +1,81 @@
+package init
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+ init0 "github.com/filecoin-project/specs-actors/actors/builtin/init"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ init0.State
+ store adt.Store
+}
+
+func (s *state0) ResolveAddress(address address.Address) (address.Address, bool, error) {
+ return s.State.ResolveAddress(s.store, address)
+}
+
+func (s *state0) MapAddressToNewID(address address.Address) (address.Address, error) {
+ return s.State.MapAddressToNewID(s.store, address)
+}
+
+func (s *state0) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
+ addrs, err := adt0.AsMap(s.store, s.State.AddressMap)
+ if err != nil {
+ return err
+ }
+ var actorID cbg.CborInt
+ return addrs.ForEach(&actorID, func(key string) error {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(abi.ActorID(actorID), addr)
+ })
+}
+
+func (s *state0) NetworkName() (dtypes.NetworkName, error) {
+ return dtypes.NetworkName(s.State.NetworkName), nil
+}
+
+func (s *state0) SetNetworkName(name string) error {
+ s.State.NetworkName = name
+ return nil
+}
+
+func (s *state0) Remove(addrs ...address.Address) (err error) {
+ m, err := adt0.AsMap(s.store, s.State.AddressMap)
+ if err != nil {
+ return err
+ }
+ for _, addr := range addrs {
+ if err = m.Delete(abi.AddrKey(addr)); err != nil {
+ return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
+ }
+ }
+ amr, err := m.Root()
+ if err != nil {
+ return xerrors.Errorf("failed to get address map root: %w", err)
+ }
+ s.State.AddressMap = amr
+ return nil
+}
diff --git a/chain/actors/builtin/init/v2.go b/chain/actors/builtin/init/v2.go
new file mode 100644
index 000000000..5aa0ddc18
--- /dev/null
+++ b/chain/actors/builtin/init/v2.go
@@ -0,0 +1,81 @@
+package init
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+ init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ init2.State
+ store adt.Store
+}
+
+func (s *state2) ResolveAddress(address address.Address) (address.Address, bool, error) {
+ return s.State.ResolveAddress(s.store, address)
+}
+
+func (s *state2) MapAddressToNewID(address address.Address) (address.Address, error) {
+ return s.State.MapAddressToNewID(s.store, address)
+}
+
+func (s *state2) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
+ addrs, err := adt2.AsMap(s.store, s.State.AddressMap)
+ if err != nil {
+ return err
+ }
+ var actorID cbg.CborInt
+ return addrs.ForEach(&actorID, func(key string) error {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(abi.ActorID(actorID), addr)
+ })
+}
+
+func (s *state2) NetworkName() (dtypes.NetworkName, error) {
+ return dtypes.NetworkName(s.State.NetworkName), nil
+}
+
+func (s *state2) SetNetworkName(name string) error {
+ s.State.NetworkName = name
+ return nil
+}
+
+func (s *state2) Remove(addrs ...address.Address) (err error) {
+ m, err := adt2.AsMap(s.store, s.State.AddressMap)
+ if err != nil {
+ return err
+ }
+ for _, addr := range addrs {
+ if err = m.Delete(abi.AddrKey(addr)); err != nil {
+ return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
+ }
+ }
+ amr, err := m.Root()
+ if err != nil {
+ return xerrors.Errorf("failed to get address map root: %w", err)
+ }
+ s.State.AddressMap = amr
+ return nil
+}
diff --git a/chain/actors/builtin/market/diff.go b/chain/actors/builtin/market/diff.go
new file mode 100644
index 000000000..d0b4a2fd3
--- /dev/null
+++ b/chain/actors/builtin/market/diff.go
@@ -0,0 +1,91 @@
+package market
+
+import (
+ "fmt"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ cbg "github.com/whyrusleeping/cbor-gen"
+)
+
+func DiffDealProposals(pre, cur DealProposals) (*DealProposalChanges, error) {
+ results := new(DealProposalChanges)
+ if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketProposalsDiffer{results, pre, cur}); err != nil {
+ return nil, fmt.Errorf("diffing deal states: %w", err)
+ }
+ return results, nil
+}
+
+type marketProposalsDiffer struct {
+ Results *DealProposalChanges
+ pre, cur DealProposals
+}
+
+func (d *marketProposalsDiffer) Add(key uint64, val *cbg.Deferred) error {
+ dp, err := d.cur.decode(val)
+ if err != nil {
+ return err
+ }
+ d.Results.Added = append(d.Results.Added, ProposalIDState{abi.DealID(key), *dp})
+ return nil
+}
+
+func (d *marketProposalsDiffer) Modify(key uint64, from, to *cbg.Deferred) error {
+ // short circuit, DealProposals are static
+ return nil
+}
+
+func (d *marketProposalsDiffer) Remove(key uint64, val *cbg.Deferred) error {
+ dp, err := d.pre.decode(val)
+ if err != nil {
+ return err
+ }
+ d.Results.Removed = append(d.Results.Removed, ProposalIDState{abi.DealID(key), *dp})
+ return nil
+}
+
+func DiffDealStates(pre, cur DealStates) (*DealStateChanges, error) {
+ results := new(DealStateChanges)
+ if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketStatesDiffer{results, pre, cur}); err != nil {
+ return nil, fmt.Errorf("diffing deal states: %w", err)
+ }
+ return results, nil
+}
+
+type marketStatesDiffer struct {
+ Results *DealStateChanges
+ pre, cur DealStates
+}
+
+func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error {
+ ds, err := d.cur.decode(val)
+ if err != nil {
+ return err
+ }
+ d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds})
+ return nil
+}
+
+func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error {
+ dsFrom, err := d.pre.decode(from)
+ if err != nil {
+ return err
+ }
+ dsTo, err := d.cur.decode(to)
+ if err != nil {
+ return err
+ }
+ if *dsFrom != *dsTo {
+ d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo})
+ }
+ return nil
+}
+
+func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error {
+ ds, err := d.pre.decode(val)
+ if err != nil {
+ return err
+ }
+ d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds})
+ return nil
+}
diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go
new file mode 100644
index 000000000..fd08a0119
--- /dev/null
+++ b/chain/actors/builtin/market/market.go
@@ -0,0 +1,138 @@
+package market
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+var Address = builtin0.StorageMarketActorAddr
+
+func Load(store adt.Store, act *types.Actor) (st State, err error) {
+ switch act.Code {
+ case builtin0.StorageMarketActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.StorageMarketActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+ BalancesChanged(State) (bool, error)
+ EscrowTable() (BalanceTable, error)
+ LockedTable() (BalanceTable, error)
+ TotalLocked() (abi.TokenAmount, error)
+ StatesChanged(State) (bool, error)
+ States() (DealStates, error)
+ ProposalsChanged(State) (bool, error)
+ Proposals() (DealProposals, error)
+ VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+ ) (weight, verifiedWeight abi.DealWeight, err error)
+}
+
+type BalanceTable interface {
+ ForEach(cb func(address.Address, abi.TokenAmount) error) error
+ Get(key address.Address) (abi.TokenAmount, error)
+}
+
+type DealStates interface {
+ ForEach(cb func(id abi.DealID, ds DealState) error) error
+ Get(id abi.DealID) (*DealState, bool, error)
+
+ array() adt.Array
+ decode(*cbg.Deferred) (*DealState, error)
+}
+
+type DealProposals interface {
+ ForEach(cb func(id abi.DealID, dp DealProposal) error) error
+ Get(id abi.DealID) (*DealProposal, bool, error)
+
+ array() adt.Array
+ decode(*cbg.Deferred) (*DealProposal, error)
+}
+
+type PublishStorageDealsParams = market0.PublishStorageDealsParams
+type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
+type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
+
+type ClientDealProposal = market0.ClientDealProposal
+
+type DealState struct {
+ SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector
+ LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated
+ SlashEpoch abi.ChainEpoch // -1 if deal never slashed
+}
+
+type DealProposal struct {
+ PieceCID cid.Cid
+ PieceSize abi.PaddedPieceSize
+ VerifiedDeal bool
+ Client address.Address
+ Provider address.Address
+ Label string
+ StartEpoch abi.ChainEpoch
+ EndEpoch abi.ChainEpoch
+ StoragePricePerEpoch abi.TokenAmount
+ ProviderCollateral abi.TokenAmount
+ ClientCollateral abi.TokenAmount
+}
+
+type DealStateChanges struct {
+ Added []DealIDState
+ Modified []DealStateChange
+ Removed []DealIDState
+}
+
+type DealIDState struct {
+ ID abi.DealID
+ Deal DealState
+}
+
+// DealStateChange is a change in deal state from -> to
+type DealStateChange struct {
+ ID abi.DealID
+ From *DealState
+ To *DealState
+}
+
+type DealProposalChanges struct {
+ Added []ProposalIDState
+ Removed []ProposalIDState
+}
+
+type ProposalIDState struct {
+ ID abi.DealID
+ Proposal DealProposal
+}
+
+func EmptyDealState() *DealState {
+ return &DealState{
+ SectorStartEpoch: -1,
+ SlashEpoch: -1,
+ LastUpdatedEpoch: -1,
+ }
+}
diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go
new file mode 100644
index 000000000..20d38b5f1
--- /dev/null
+++ b/chain/actors/builtin/market/v0.go
@@ -0,0 +1,204 @@
+package market
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ market0.State
+ store adt.Store
+}
+
+func (s *state0) TotalLocked() (abi.TokenAmount, error) {
+ fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
+ fml = types.BigAdd(fml, s.TotalClientStorageFee)
+ return fml, nil
+}
+
+func (s *state0) BalancesChanged(otherState State) (bool, error) {
+ otherState0, ok := otherState.(*state0)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.EscrowTable.Equals(otherState0.State.EscrowTable) || !s.State.LockedTable.Equals(otherState0.State.LockedTable), nil
+}
+
+func (s *state0) StatesChanged(otherState State) (bool, error) {
+ otherState0, ok := otherState.(*state0)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.States.Equals(otherState0.State.States), nil
+}
+
+func (s *state0) States() (DealStates, error) {
+ stateArray, err := adt0.AsArray(s.store, s.State.States)
+ if err != nil {
+ return nil, err
+ }
+ return &dealStates0{stateArray}, nil
+}
+
+func (s *state0) ProposalsChanged(otherState State) (bool, error) {
+ otherState0, ok := otherState.(*state0)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.Proposals.Equals(otherState0.State.Proposals), nil
+}
+
+func (s *state0) Proposals() (DealProposals, error) {
+ proposalArray, err := adt0.AsArray(s.store, s.State.Proposals)
+ if err != nil {
+ return nil, err
+ }
+ return &dealProposals0{proposalArray}, nil
+}
+
+func (s *state0) EscrowTable() (BalanceTable, error) {
+ bt, err := adt0.AsBalanceTable(s.store, s.State.EscrowTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable0{bt}, nil
+}
+
+func (s *state0) LockedTable() (BalanceTable, error) {
+ bt, err := adt0.AsBalanceTable(s.store, s.State.LockedTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable0{bt}, nil
+}
+
+func (s *state0) VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+) (weight, verifiedWeight abi.DealWeight, err error) {
+ return market0.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+}
+
+type balanceTable0 struct {
+ *adt0.BalanceTable
+}
+
+func (bt *balanceTable0) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
+ asMap := (*adt0.Map)(bt.BalanceTable)
+ var ta abi.TokenAmount
+ return asMap.ForEach(&ta, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, ta)
+ })
+}
+
+type dealStates0 struct {
+ adt.Array
+}
+
+func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) {
+ var deal0 market0.DealState
+ found, err := s.Array.Get(uint64(dealID), &deal0)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ deal := fromV0DealState(deal0)
+ return &deal, true, nil
+}
+
+func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
+ var ds0 market0.DealState
+ return s.Array.ForEach(&ds0, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV0DealState(ds0))
+ })
+}
+
+func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) {
+ var ds0 market0.DealState
+ if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ ds := fromV0DealState(ds0)
+ return &ds, nil
+}
+
+func (s *dealStates0) array() adt.Array {
+ return s.Array
+}
+
+func fromV0DealState(v0 market0.DealState) DealState {
+ return (DealState)(v0)
+}
+
+type dealProposals0 struct {
+ adt.Array
+}
+
+func (s *dealProposals0) Get(dealID abi.DealID) (*DealProposal, bool, error) {
+ var proposal0 market0.DealProposal
+ found, err := s.Array.Get(uint64(dealID), &proposal0)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ proposal := fromV0DealProposal(proposal0)
+ return &proposal, true, nil
+}
+
+func (s *dealProposals0) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
+ var dp0 market0.DealProposal
+ return s.Array.ForEach(&dp0, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV0DealProposal(dp0))
+ })
+}
+
+func (s *dealProposals0) decode(val *cbg.Deferred) (*DealProposal, error) {
+ var dp0 market0.DealProposal
+ if err := dp0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ dp := fromV0DealProposal(dp0)
+ return &dp, nil
+}
+
+func (s *dealProposals0) array() adt.Array {
+ return s.Array
+}
+
+func fromV0DealProposal(v0 market0.DealProposal) DealProposal {
+ return (DealProposal)(v0)
+}
diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go
new file mode 100644
index 000000000..a5e5c7b45
--- /dev/null
+++ b/chain/actors/builtin/market/v2.go
@@ -0,0 +1,205 @@
+package market
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ market2.State
+ store adt.Store
+}
+
+func (s *state2) TotalLocked() (abi.TokenAmount, error) {
+ fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
+ fml = types.BigAdd(fml, s.TotalClientStorageFee)
+ return fml, nil
+}
+
+func (s *state2) BalancesChanged(otherState State) (bool, error) {
+ otherState2, ok := otherState.(*state2)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.EscrowTable.Equals(otherState2.State.EscrowTable) || !s.State.LockedTable.Equals(otherState2.State.LockedTable), nil
+}
+
+func (s *state2) StatesChanged(otherState State) (bool, error) {
+ otherState2, ok := otherState.(*state2)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.States.Equals(otherState2.State.States), nil
+}
+
+func (s *state2) States() (DealStates, error) {
+ stateArray, err := adt2.AsArray(s.store, s.State.States)
+ if err != nil {
+ return nil, err
+ }
+ return &dealStates2{stateArray}, nil
+}
+
+func (s *state2) ProposalsChanged(otherState State) (bool, error) {
+ otherState2, ok := otherState.(*state2)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.Proposals.Equals(otherState2.State.Proposals), nil
+}
+
+func (s *state2) Proposals() (DealProposals, error) {
+ proposalArray, err := adt2.AsArray(s.store, s.State.Proposals)
+ if err != nil {
+ return nil, err
+ }
+ return &dealProposals2{proposalArray}, nil
+}
+
+func (s *state2) EscrowTable() (BalanceTable, error) {
+ bt, err := adt2.AsBalanceTable(s.store, s.State.EscrowTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable2{bt}, nil
+}
+
+func (s *state2) LockedTable() (BalanceTable, error) {
+ bt, err := adt2.AsBalanceTable(s.store, s.State.LockedTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable2{bt}, nil
+}
+
+func (s *state2) VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+) (weight, verifiedWeight abi.DealWeight, err error) {
+ w, vw, _, err := market2.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ return w, vw, err
+}
+
+type balanceTable2 struct {
+ *adt2.BalanceTable
+}
+
+func (bt *balanceTable2) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
+ asMap := (*adt2.Map)(bt.BalanceTable)
+ var ta abi.TokenAmount
+ return asMap.ForEach(&ta, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, ta)
+ })
+}
+
+type dealStates2 struct {
+ adt.Array
+}
+
+func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) {
+ var deal2 market2.DealState
+ found, err := s.Array.Get(uint64(dealID), &deal2)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ deal := fromV2DealState(deal2)
+ return &deal, true, nil
+}
+
+func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
+ var ds1 market2.DealState
+ return s.Array.ForEach(&ds1, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV2DealState(ds1))
+ })
+}
+
+func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) {
+ var ds1 market2.DealState
+ if err := ds1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ ds := fromV2DealState(ds1)
+ return &ds, nil
+}
+
+func (s *dealStates2) array() adt.Array {
+ return s.Array
+}
+
+func fromV2DealState(v1 market2.DealState) DealState {
+ return (DealState)(v1)
+}
+
+type dealProposals2 struct {
+ adt.Array
+}
+
+func (s *dealProposals2) Get(dealID abi.DealID) (*DealProposal, bool, error) {
+ var proposal2 market2.DealProposal
+ found, err := s.Array.Get(uint64(dealID), &proposal2)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ proposal := fromV2DealProposal(proposal2)
+ return &proposal, true, nil
+}
+
+func (s *dealProposals2) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
+ var dp1 market2.DealProposal
+ return s.Array.ForEach(&dp1, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV2DealProposal(dp1))
+ })
+}
+
+func (s *dealProposals2) decode(val *cbg.Deferred) (*DealProposal, error) {
+ var dp1 market2.DealProposal
+ if err := dp1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ dp := fromV2DealProposal(dp1)
+ return &dp, nil
+}
+
+func (s *dealProposals2) array() adt.Array {
+ return s.Array
+}
+
+func fromV2DealProposal(v1 market2.DealProposal) DealProposal {
+ return (DealProposal)(v1)
+}
diff --git a/chain/actors/builtin/miner/diff.go b/chain/actors/builtin/miner/diff.go
new file mode 100644
index 000000000..dde4db890
--- /dev/null
+++ b/chain/actors/builtin/miner/diff.go
@@ -0,0 +1,127 @@
+package miner
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ cbg "github.com/whyrusleeping/cbor-gen"
+)
+
+func DiffPreCommits(pre, cur State) (*PreCommitChanges, error) {
+ results := new(PreCommitChanges)
+
+ prep, err := pre.precommits()
+ if err != nil {
+ return nil, err
+ }
+
+ curp, err := cur.precommits()
+ if err != nil {
+ return nil, err
+ }
+
+ err = adt.DiffAdtMap(prep, curp, &preCommitDiffer{results, pre, cur})
+ if err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+type preCommitDiffer struct {
+ Results *PreCommitChanges
+ pre, after State
+}
+
+func (m *preCommitDiffer) AsKey(key string) (abi.Keyer, error) {
+ sector, err := abi.ParseUIntKey(key)
+ if err != nil {
+ return nil, err
+ }
+ return abi.UIntKey(sector), nil
+}
+
+func (m *preCommitDiffer) Add(key string, val *cbg.Deferred) error {
+ sp, err := m.after.decodeSectorPreCommitOnChainInfo(val)
+ if err != nil {
+ return err
+ }
+ m.Results.Added = append(m.Results.Added, sp)
+ return nil
+}
+
+func (m *preCommitDiffer) Modify(key string, from, to *cbg.Deferred) error {
+ return nil
+}
+
+func (m *preCommitDiffer) Remove(key string, val *cbg.Deferred) error {
+ sp, err := m.pre.decodeSectorPreCommitOnChainInfo(val)
+ if err != nil {
+ return err
+ }
+ m.Results.Removed = append(m.Results.Removed, sp)
+ return nil
+}
+
+func DiffSectors(pre, cur State) (*SectorChanges, error) {
+ results := new(SectorChanges)
+
+ pres, err := pre.sectors()
+ if err != nil {
+ return nil, err
+ }
+
+ curs, err := cur.sectors()
+ if err != nil {
+ return nil, err
+ }
+
+ err = adt.DiffAdtArray(pres, curs, §orDiffer{results, pre, cur})
+ if err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+type sectorDiffer struct {
+ Results *SectorChanges
+ pre, after State
+}
+
+func (m *sectorDiffer) Add(key uint64, val *cbg.Deferred) error {
+ si, err := m.after.decodeSectorOnChainInfo(val)
+ if err != nil {
+ return err
+ }
+ m.Results.Added = append(m.Results.Added, si)
+ return nil
+}
+
+func (m *sectorDiffer) Modify(key uint64, from, to *cbg.Deferred) error {
+ siFrom, err := m.pre.decodeSectorOnChainInfo(from)
+ if err != nil {
+ return err
+ }
+
+ siTo, err := m.after.decodeSectorOnChainInfo(to)
+ if err != nil {
+ return err
+ }
+
+ if siFrom.Expiration != siTo.Expiration {
+ m.Results.Extended = append(m.Results.Extended, SectorExtensions{
+ From: siFrom,
+ To: siTo,
+ })
+ }
+ return nil
+}
+
+func (m *sectorDiffer) Remove(key uint64, val *cbg.Deferred) error {
+ si, err := m.pre.decodeSectorOnChainInfo(val)
+ if err != nil {
+ return err
+ }
+ m.Results.Removed = append(m.Results.Removed, si)
+ return nil
+}
diff --git a/chain/actors/builtin/miner/diff_deadlines.go b/chain/actors/builtin/miner/diff_deadlines.go
new file mode 100644
index 000000000..e1e839960
--- /dev/null
+++ b/chain/actors/builtin/miner/diff_deadlines.go
@@ -0,0 +1,180 @@
+package miner
+
+import (
+ "errors"
+
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/exitcode"
+)
+
+type DeadlinesDiff map[uint64]*DeadlineDiff
+
+func DiffDeadlines(pre, cur State) (*DeadlinesDiff, error) {
+ changed, err := pre.DeadlinesChanged(cur)
+ if err != nil {
+ return nil, err
+ }
+ if !changed {
+ return nil, nil
+ }
+
+ numDl, err := pre.NumDeadlines()
+ if err != nil {
+ return nil, err
+ }
+ dlDiff := make(DeadlinesDiff, numDl)
+ if err := pre.ForEachDeadline(func(idx uint64, preDl Deadline) error {
+ curDl, err := cur.LoadDeadline(idx)
+ if err != nil {
+ return err
+ }
+
+ diff, err := DiffDeadline(preDl, curDl)
+ if err != nil {
+ return err
+ }
+
+ dlDiff[idx] = diff
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return &dlDiff, nil
+}
+
+type DeadlineDiff map[uint64]*PartitionDiff
+
+func DiffDeadline(pre, cur Deadline) (*DeadlineDiff, error) {
+ changed, err := pre.PartitionsChanged(cur)
+ if err != nil {
+ return nil, err
+ }
+ if !changed {
+ return nil, nil
+ }
+
+ partDiff := make(DeadlineDiff)
+ if err := pre.ForEachPartition(func(idx uint64, prePart Partition) error {
+ // try loading current partition at this index
+ curPart, err := cur.LoadPartition(idx)
+ if err != nil {
+ if errors.Is(err, exitcode.ErrNotFound) {
+ // TODO correctness?
+ return nil // the partition was removed.
+ }
+ return err
+ }
+
+ // compare it with the previous partition
+ diff, err := DiffPartition(prePart, curPart)
+ if err != nil {
+ return err
+ }
+
+ partDiff[idx] = diff
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ // all previous partitions have been walked.
+ // all partitions in cur and not in prev are new... can they be faulty already?
+ // TODO is this correct?
+ if err := cur.ForEachPartition(func(idx uint64, curPart Partition) error {
+ if _, found := partDiff[idx]; found {
+ return nil
+ }
+ faults, err := curPart.FaultySectors()
+ if err != nil {
+ return err
+ }
+ recovering, err := curPart.RecoveringSectors()
+ if err != nil {
+ return err
+ }
+ partDiff[idx] = &PartitionDiff{
+ Removed: bitfield.New(),
+ Recovered: bitfield.New(),
+ Faulted: faults,
+ Recovering: recovering,
+ }
+
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ return &partDiff, nil
+}
+
+type PartitionDiff struct {
+ Removed bitfield.BitField
+ Recovered bitfield.BitField
+ Faulted bitfield.BitField
+ Recovering bitfield.BitField
+}
+
+func DiffPartition(pre, cur Partition) (*PartitionDiff, error) {
+ prevLiveSectors, err := pre.LiveSectors()
+ if err != nil {
+ return nil, err
+ }
+ curLiveSectors, err := cur.LiveSectors()
+ if err != nil {
+ return nil, err
+ }
+
+ removed, err := bitfield.SubtractBitField(prevLiveSectors, curLiveSectors)
+ if err != nil {
+ return nil, err
+ }
+
+ prevRecoveries, err := pre.RecoveringSectors()
+ if err != nil {
+ return nil, err
+ }
+
+ curRecoveries, err := cur.RecoveringSectors()
+ if err != nil {
+ return nil, err
+ }
+
+ recovering, err := bitfield.SubtractBitField(curRecoveries, prevRecoveries)
+ if err != nil {
+ return nil, err
+ }
+
+ prevFaults, err := pre.FaultySectors()
+ if err != nil {
+ return nil, err
+ }
+
+ curFaults, err := cur.FaultySectors()
+ if err != nil {
+ return nil, err
+ }
+
+ faulted, err := bitfield.SubtractBitField(curFaults, prevFaults)
+ if err != nil {
+ return nil, err
+ }
+
+ // all current good sectors
+ curActiveSectors, err := cur.ActiveSectors()
+ if err != nil {
+ return nil, err
+ }
+
+ // sectors that were previously fault and are now currently active are considered recovered.
+ recovered, err := bitfield.IntersectBitField(prevFaults, curActiveSectors)
+ if err != nil {
+ return nil, err
+ }
+
+ return &PartitionDiff{
+ Removed: removed,
+ Recovered: recovered,
+ Faulted: faulted,
+ Recovering: recovering,
+ }, nil
+}
diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go
new file mode 100644
index 000000000..8649d4351
--- /dev/null
+++ b/chain/actors/builtin/miner/miner.go
@@ -0,0 +1,184 @@
+package miner
+
+import (
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/filecoin-project/go-state-types/dline"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+// Unchanged between v0 and v2 actors
+var WPoStProvingPeriod = miner0.WPoStProvingPeriod
+var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines
+var WPoStChallengeWindow = miner0.WPoStChallengeWindow
+var WPoStChallengeLookback = miner0.WPoStChallengeLookback
+var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff
+
+const MinSectorExpiration = miner0.MinSectorExpiration
+
+func Load(store adt.Store, act *types.Actor) (st State, err error) {
+ switch act.Code {
+ case builtin0.StorageMinerActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.StorageMinerActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ // Total available balance to spend.
+ AvailableBalance(abi.TokenAmount) (abi.TokenAmount, error)
+ // Funds that will vest by the given epoch.
+ VestedFunds(abi.ChainEpoch) (abi.TokenAmount, error)
+ // Funds locked for various reasons.
+ LockedFunds() (LockedFunds, error)
+ FeeDebt() (abi.TokenAmount, error)
+
+ GetSector(abi.SectorNumber) (*SectorOnChainInfo, error)
+ FindSector(abi.SectorNumber) (*SectorLocation, error)
+ GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
+ GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
+ LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
+ NumLiveSectors() (uint64, error)
+ IsAllocated(abi.SectorNumber) (bool, error)
+
+ LoadDeadline(idx uint64) (Deadline, error)
+ ForEachDeadline(cb func(idx uint64, dl Deadline) error) error
+ NumDeadlines() (uint64, error)
+ DeadlinesChanged(State) (bool, error)
+
+ Info() (MinerInfo, error)
+
+ DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error)
+
+ // Diff helpers. Used by Diff* functions internally.
+ sectors() (adt.Array, error)
+ decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error)
+ precommits() (adt.Map, error)
+ decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error)
+}
+
+type Deadline interface {
+ LoadPartition(idx uint64) (Partition, error)
+ ForEachPartition(cb func(idx uint64, part Partition) error) error
+ PostSubmissions() (bitfield.BitField, error)
+
+ PartitionsChanged(Deadline) (bool, error)
+}
+
+type Partition interface {
+ AllSectors() (bitfield.BitField, error)
+ FaultySectors() (bitfield.BitField, error)
+ RecoveringSectors() (bitfield.BitField, error)
+ LiveSectors() (bitfield.BitField, error)
+ ActiveSectors() (bitfield.BitField, error)
+}
+
+type SectorOnChainInfo struct {
+ SectorNumber abi.SectorNumber
+ SealProof abi.RegisteredSealProof
+ SealedCID cid.Cid
+ DealIDs []abi.DealID
+ Activation abi.ChainEpoch
+ Expiration abi.ChainEpoch
+ DealWeight abi.DealWeight
+ VerifiedDealWeight abi.DealWeight
+ InitialPledge abi.TokenAmount
+ ExpectedDayReward abi.TokenAmount
+ ExpectedStoragePledge abi.TokenAmount
+}
+
+type SectorPreCommitInfo = miner0.SectorPreCommitInfo
+
+type SectorPreCommitOnChainInfo struct {
+ Info SectorPreCommitInfo
+ PreCommitDeposit abi.TokenAmount
+ PreCommitEpoch abi.ChainEpoch
+ DealWeight abi.DealWeight
+ VerifiedDealWeight abi.DealWeight
+}
+
+type PoStPartition = miner0.PoStPartition
+type RecoveryDeclaration = miner0.RecoveryDeclaration
+type FaultDeclaration = miner0.FaultDeclaration
+
+// Params
+type DeclareFaultsParams = miner0.DeclareFaultsParams
+type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
+type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
+type ProveCommitSectorParams = miner0.ProveCommitSectorParams
+
+type MinerInfo struct {
+ Owner address.Address // Must be an ID-address.
+ Worker address.Address // Must be an ID-address.
+ NewWorker address.Address // Must be an ID-address.
+ ControlAddresses []address.Address // Must be an ID-addresses.
+ WorkerChangeEpoch abi.ChainEpoch
+ PeerId *peer.ID
+ Multiaddrs []abi.Multiaddrs
+ SealProofType abi.RegisteredSealProof
+ SectorSize abi.SectorSize
+ WindowPoStPartitionSectors uint64
+ ConsensusFaultElapsed abi.ChainEpoch
+}
+
+type SectorExpiration struct {
+ OnTime abi.ChainEpoch
+
+ // non-zero if sector is faulty, epoch at which it will be permanently
+ // removed if it doesn't recover
+ Early abi.ChainEpoch
+}
+
+type SectorLocation struct {
+ Deadline uint64
+ Partition uint64
+}
+
+type SectorChanges struct {
+ Added []SectorOnChainInfo
+ Extended []SectorExtensions
+ Removed []SectorOnChainInfo
+}
+
+type SectorExtensions struct {
+ From SectorOnChainInfo
+ To SectorOnChainInfo
+}
+
+type PreCommitChanges struct {
+ Added []SectorPreCommitOnChainInfo
+ Removed []SectorPreCommitOnChainInfo
+}
+
+type LockedFunds struct {
+ VestingFunds abi.TokenAmount
+ InitialPledgeRequirement abi.TokenAmount
+ PreCommitDeposits abi.TokenAmount
+}
diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go
new file mode 100644
index 000000000..f9c6b3da3
--- /dev/null
+++ b/chain/actors/builtin/miner/utils.go
@@ -0,0 +1,28 @@
+package miner
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-bitfield"
+)
+
+func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
+ var parts []bitfield.BitField
+
+ err := mas.ForEachDeadline(func(dlidx uint64, dl Deadline) error {
+ return dl.ForEachPartition(func(partidx uint64, part Partition) error {
+ s, err := sget(part)
+ if err != nil {
+ return xerrors.Errorf("getting sector list (dl: %d, part %d): %w", dlidx, partidx, err)
+ }
+
+ parts = append(parts, s)
+ return nil
+ })
+ })
+ if err != nil {
+ return bitfield.BitField{}, err
+ }
+
+ return bitfield.MultiMerge(parts...)
+}
diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go
new file mode 100644
index 000000000..7e71c7611
--- /dev/null
+++ b/chain/actors/builtin/miner/v0.go
@@ -0,0 +1,390 @@
+package miner
+
+import (
+ "bytes"
+ "errors"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ miner0.State
+ store adt.Store
+}
+
+type deadline0 struct {
+ miner0.Deadline
+ store adt.Store
+}
+
+type partition0 struct {
+ miner0.Partition
+ store adt.Store
+}
+
+func (s *state0) AvailableBalance(bal abi.TokenAmount) (abi.TokenAmount, error) {
+ return s.GetAvailableBalance(bal), nil
+}
+
+func (s *state0) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.CheckVestedFunds(s.store, epoch)
+}
+
+func (s *state0) LockedFunds() (LockedFunds, error) {
+ return LockedFunds{
+ VestingFunds: s.State.LockedFunds,
+ InitialPledgeRequirement: s.State.InitialPledgeRequirement,
+ PreCommitDeposits: s.State.PreCommitDeposits,
+ }, nil
+}
+
+func (s *state0) FeeDebt() (abi.TokenAmount, error) {
+ return big.Zero(), nil
+}
+
+func (s *state0) InitialPledge() (abi.TokenAmount, error) {
+ return s.State.InitialPledgeRequirement, nil
+}
+
+func (s *state0) PreCommitDeposits() (abi.TokenAmount, error) {
+ return s.State.PreCommitDeposits, nil
+}
+
+func (s *state0) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
+ info, ok, err := s.State.GetSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV0SectorOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state0) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
+ dlIdx, partIdx, err := s.State.FindSector(s.store, num)
+ if err != nil {
+ return nil, err
+ }
+ return &SectorLocation{
+ Deadline: dlIdx,
+ Partition: partIdx,
+ }, nil
+}
+
+func (s *state0) NumLiveSectors() (uint64, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error {
+ total += dl.LiveSectors
+ return nil
+ }); err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// GetSectorExpiration returns the effective expiration of the given sector.
+//
+// If the sector does not expire early, the Early expiration field is 0.
+func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ // NOTE: this can be optimized significantly.
+ // 1. If the sector is non-faulty, it will either expire on-time (can be
+ // learned from the sector info), or in the next quantized expiration
+ // epoch (i.e., the first element in the partition's expiration queue.
+ // 2. If it's faulty, it will expire early within the first 14 entries
+ // of the expiration queue.
+ stopErr := errors.New("stop")
+ out := SectorExpiration{}
+ err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error {
+ partitions, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+ quant := s.State.QuantSpecForDeadline(dlIdx)
+ var part miner0.Partition
+ return partitions.ForEach(&part, func(partIdx int64) error {
+ if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if !found {
+ return nil
+ }
+ if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
+ return err
+ } else if found {
+ // already terminated
+ return stopErr
+ }
+
+ q, err := miner0.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant)
+ if err != nil {
+ return err
+ }
+ var exp miner0.ExpirationSet
+ return q.ForEach(&exp, func(epoch int64) error {
+ if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if early {
+ out.Early = abi.ChainEpoch(epoch)
+ return nil
+ }
+ if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if onTime {
+ out.OnTime = abi.ChainEpoch(epoch)
+ return stopErr
+ }
+ return nil
+ })
+ })
+ })
+ if err == stopErr {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if out.Early == 0 && out.OnTime == 0 {
+ return nil, xerrors.Errorf("failed to find sector %d", num)
+ }
+ return &out, nil
+}
+
+func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
+ info, ok, err := s.State.GetPrecommittedSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV0SectorPreCommitOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
+ sectors, err := miner0.LoadSectors(s.store, s.State.Sectors)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no sector numbers are specified, load all.
+ if snos == nil {
+ infos := make([]*SectorOnChainInfo, 0, sectors.Length())
+ var info0 miner0.SectorOnChainInfo
+ if err := sectors.ForEach(&info0, func(_ int64) error {
+ info := fromV0SectorOnChainInfo(info0)
+ infos = append(infos, &info)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return infos, nil
+ }
+
+ // Otherwise, load selected.
+ infos0, err := sectors.Load(*snos)
+ if err != nil {
+ return nil, err
+ }
+ infos := make([]*SectorOnChainInfo, len(infos0))
+ for i, info0 := range infos0 {
+ info := fromV0SectorOnChainInfo(*info0)
+ infos[i] = &info
+ }
+ return infos, nil
+}
+
+func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) {
+ var allocatedSectors bitfield.BitField
+ if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state0) LoadDeadline(idx uint64) (Deadline, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ dl, err := dls.LoadDeadline(s.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &deadline0{*dl, s.store}, nil
+}
+
+func (s *state0) ForEachDeadline(cb func(uint64, Deadline) error) error {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+ return dls.ForEach(s.store, func(i uint64, dl *miner0.Deadline) error {
+ return cb(i, &deadline0{*dl, s.store})
+ })
+}
+
+func (s *state0) NumDeadlines() (uint64, error) {
+ return miner0.WPoStPeriodDeadlines, nil
+}
+
+func (s *state0) DeadlinesChanged(other State) (bool, error) {
+ other0, ok := other.(*state0)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return s.State.Deadlines.Equals(other0.Deadlines), nil
+}
+
+func (s *state0) Info() (MinerInfo, error) {
+ info, err := s.State.GetInfo(s.store)
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
+ var pid *peer.ID
+ if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
+ pid = &peerID
+ }
+
+ mi := MinerInfo{
+ Owner: info.Owner,
+ Worker: info.Worker,
+ ControlAddresses: info.ControlAddresses,
+
+ NewWorker: address.Undef,
+ WorkerChangeEpoch: -1,
+
+ PeerId: pid,
+ Multiaddrs: info.Multiaddrs,
+ SealProofType: info.SealProofType,
+ SectorSize: info.SectorSize,
+ WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
+ ConsensusFaultElapsed: -1,
+ }
+
+ if info.PendingWorkerKey != nil {
+ mi.NewWorker = info.PendingWorkerKey.NewWorker
+ mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
+ }
+
+ return mi, nil
+}
+
+func (s *state0) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
+ return s.State.DeadlineInfo(epoch), nil
+}
+
+func (s *state0) sectors() (adt.Array, error) {
+ return adt0.AsArray(s.store, s.Sectors)
+}
+
+func (s *state0) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
+ var si miner0.SectorOnChainInfo
+ err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorOnChainInfo{}, err
+ }
+
+ return fromV0SectorOnChainInfo(si), nil
+}
+
+func (s *state0) precommits() (adt.Map, error) {
+ return adt0.AsMap(s.store, s.PreCommittedSectors)
+}
+
+func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
+ var sp miner0.SectorPreCommitOnChainInfo
+ err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorPreCommitOnChainInfo{}, err
+ }
+
+ return fromV0SectorPreCommitOnChainInfo(sp), nil
+}
+
+func (d *deadline0) LoadPartition(idx uint64) (Partition, error) {
+ p, err := d.Deadline.LoadPartition(d.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &partition0{*p, d.store}, nil
+}
+
+func (d *deadline0) ForEachPartition(cb func(uint64, Partition) error) error {
+ ps, err := d.Deadline.PartitionsArray(d.store)
+ if err != nil {
+ return err
+ }
+ var part miner0.Partition
+ return ps.ForEach(&part, func(i int64) error {
+ return cb(uint64(i), &partition0{part, d.store})
+ })
+}
+
+func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) {
+ other0, ok := other.(*deadline0)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil
+}
+
+func (d *deadline0) PostSubmissions() (bitfield.BitField, error) {
+ return d.Deadline.PostSubmissions, nil
+}
+
+func (p *partition0) AllSectors() (bitfield.BitField, error) {
+ return p.Partition.Sectors, nil
+}
+
+func (p *partition0) FaultySectors() (bitfield.BitField, error) {
+ return p.Partition.Faults, nil
+}
+
+func (p *partition0) RecoveringSectors() (bitfield.BitField, error) {
+ return p.Partition.Recoveries, nil
+}
+
+func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
+ return (SectorOnChainInfo)(v0)
+}
+
+func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+ return (SectorPreCommitOnChainInfo)(v0)
+}
diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go
new file mode 100644
index 000000000..eed82257f
--- /dev/null
+++ b/chain/actors/builtin/miner/v2.go
@@ -0,0 +1,407 @@
+package miner
+
+import (
+ "bytes"
+ "errors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ miner2.State
+ store adt.Store
+}
+
+type deadline2 struct {
+ miner2.Deadline
+ store adt.Store
+}
+
+type partition2 struct {
+ miner2.Partition
+ store adt.Store
+}
+
+func (s *state2) AvailableBalance(bal abi.TokenAmount) (abi.TokenAmount, error) {
+ return s.GetAvailableBalance(bal)
+}
+
+func (s *state2) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.CheckVestedFunds(s.store, epoch)
+}
+
+func (s *state2) LockedFunds() (LockedFunds, error) {
+ return LockedFunds{
+ VestingFunds: s.State.LockedFunds,
+ InitialPledgeRequirement: s.State.InitialPledge,
+ PreCommitDeposits: s.State.PreCommitDeposits,
+ }, nil
+}
+
+func (s *state2) FeeDebt() (abi.TokenAmount, error) {
+ return s.State.FeeDebt, nil
+}
+
+func (s *state2) InitialPledge() (abi.TokenAmount, error) {
+ return s.State.InitialPledge, nil
+}
+
+func (s *state2) PreCommitDeposits() (abi.TokenAmount, error) {
+ return s.State.PreCommitDeposits, nil
+}
+
+func (s *state2) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
+ info, ok, err := s.State.GetSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV2SectorOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state2) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
+ dlIdx, partIdx, err := s.State.FindSector(s.store, num)
+ if err != nil {
+ return nil, err
+ }
+ return &SectorLocation{
+ Deadline: dlIdx,
+ Partition: partIdx,
+ }, nil
+}
+
+func (s *state2) NumLiveSectors() (uint64, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error {
+ total += dl.LiveSectors
+ return nil
+ }); err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// GetSectorExpiration returns the effective expiration of the given sector.
+//
+// If the sector does not expire early, the Early expiration field is 0.
+func (s *state2) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ // NOTE: this can be optimized significantly.
+ // 1. If the sector is non-faulty, it will either expire on-time (can be
+ // learned from the sector info), or in the next quantized expiration
+ // epoch (i.e., the first element in the partition's expiration queue.
+ // 2. If it's faulty, it will expire early within the first 14 entries
+ // of the expiration queue.
+ stopErr := errors.New("stop")
+ out := SectorExpiration{}
+ err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error {
+ partitions, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+ quant := s.State.QuantSpecForDeadline(dlIdx)
+ var part miner2.Partition
+ return partitions.ForEach(&part, func(partIdx int64) error {
+ if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if !found {
+ return nil
+ }
+ if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
+ return err
+ } else if found {
+ // already terminated
+ return stopErr
+ }
+
+ q, err := miner2.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant)
+ if err != nil {
+ return err
+ }
+ var exp miner2.ExpirationSet
+ return q.ForEach(&exp, func(epoch int64) error {
+ if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if early {
+ out.Early = abi.ChainEpoch(epoch)
+ return nil
+ }
+ if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if onTime {
+ out.OnTime = abi.ChainEpoch(epoch)
+ return stopErr
+ }
+ return nil
+ })
+ })
+ })
+ if err == stopErr {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if out.Early == 0 && out.OnTime == 0 {
+ return nil, xerrors.Errorf("failed to find sector %d", num)
+ }
+ return &out, nil
+}
+
+func (s *state2) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
+ info, ok, err := s.State.GetPrecommittedSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV2SectorPreCommitOnChainInfo(*info)
+
+ return &ret, nil
+}
+
+func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
+ sectors, err := miner2.LoadSectors(s.store, s.State.Sectors)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no sector numbers are specified, load all.
+ if snos == nil {
+ infos := make([]*SectorOnChainInfo, 0, sectors.Length())
+ var info2 miner2.SectorOnChainInfo
+ if err := sectors.ForEach(&info2, func(_ int64) error {
+ info := fromV2SectorOnChainInfo(info2)
+ infos = append(infos, &info)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return infos, nil
+ }
+
+ // Otherwise, load selected.
+ infos2, err := sectors.Load(*snos)
+ if err != nil {
+ return nil, err
+ }
+ infos := make([]*SectorOnChainInfo, len(infos2))
+ for i, info2 := range infos2 {
+ info := fromV2SectorOnChainInfo(*info2)
+ infos[i] = &info
+ }
+ return infos, nil
+}
+
+func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) {
+ var allocatedSectors bitfield.BitField
+ if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state2) LoadDeadline(idx uint64) (Deadline, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ dl, err := dls.LoadDeadline(s.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &deadline2{*dl, s.store}, nil
+}
+
+func (s *state2) ForEachDeadline(cb func(uint64, Deadline) error) error {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+ return dls.ForEach(s.store, func(i uint64, dl *miner2.Deadline) error {
+ return cb(i, &deadline2{*dl, s.store})
+ })
+}
+
+func (s *state2) NumDeadlines() (uint64, error) {
+ return miner2.WPoStPeriodDeadlines, nil
+}
+
+func (s *state2) DeadlinesChanged(other State) (bool, error) {
+ other2, ok := other.(*state2)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return s.State.Deadlines.Equals(other2.Deadlines), nil
+}
+
+func (s *state2) Info() (MinerInfo, error) {
+ info, err := s.State.GetInfo(s.store)
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
+ var pid *peer.ID
+ if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
+ pid = &peerID
+ }
+
+ mi := MinerInfo{
+ Owner: info.Owner,
+ Worker: info.Worker,
+ ControlAddresses: info.ControlAddresses,
+
+ NewWorker: address.Undef,
+ WorkerChangeEpoch: -1,
+
+ PeerId: pid,
+ Multiaddrs: info.Multiaddrs,
+ SealProofType: info.SealProofType,
+ SectorSize: info.SectorSize,
+ WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
+ ConsensusFaultElapsed: info.ConsensusFaultElapsed,
+ }
+
+ if info.PendingWorkerKey != nil {
+ mi.NewWorker = info.PendingWorkerKey.NewWorker
+ mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
+ }
+
+ return mi, nil
+}
+
+func (s *state2) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
+ return s.State.DeadlineInfo(epoch), nil
+}
+
+func (s *state2) sectors() (adt.Array, error) {
+ return adt2.AsArray(s.store, s.Sectors)
+}
+
+func (s *state2) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
+ var si miner2.SectorOnChainInfo
+ err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorOnChainInfo{}, err
+ }
+
+ return fromV2SectorOnChainInfo(si), nil
+}
+
+func (s *state2) precommits() (adt.Map, error) {
+ return adt2.AsMap(s.store, s.PreCommittedSectors)
+}
+
+func (s *state2) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
+ var sp miner2.SectorPreCommitOnChainInfo
+ err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorPreCommitOnChainInfo{}, err
+ }
+
+ return fromV2SectorPreCommitOnChainInfo(sp), nil
+}
+
+func (d *deadline2) LoadPartition(idx uint64) (Partition, error) {
+ p, err := d.Deadline.LoadPartition(d.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &partition2{*p, d.store}, nil
+}
+
+func (d *deadline2) ForEachPartition(cb func(uint64, Partition) error) error {
+ ps, err := d.Deadline.PartitionsArray(d.store)
+ if err != nil {
+ return err
+ }
+ var part miner2.Partition
+ return ps.ForEach(&part, func(i int64) error {
+ return cb(uint64(i), &partition2{part, d.store})
+ })
+}
+
+func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) {
+ other2, ok := other.(*deadline2)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil
+}
+
+func (d *deadline2) PostSubmissions() (bitfield.BitField, error) {
+ return d.Deadline.PostSubmissions, nil
+}
+
+func (p *partition2) AllSectors() (bitfield.BitField, error) {
+ return p.Partition.Sectors, nil
+}
+
+func (p *partition2) FaultySectors() (bitfield.BitField, error) {
+ return p.Partition.Faults, nil
+}
+
+func (p *partition2) RecoveringSectors() (bitfield.BitField, error) {
+ return p.Partition.Recoveries, nil
+}
+
+func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
+ return SectorOnChainInfo{
+ SectorNumber: v2.SectorNumber,
+ SealProof: v2.SealProof,
+ SealedCID: v2.SealedCID,
+ DealIDs: v2.DealIDs,
+ Activation: v2.Activation,
+ Expiration: v2.Expiration,
+ DealWeight: v2.DealWeight,
+ VerifiedDealWeight: v2.VerifiedDealWeight,
+ InitialPledge: v2.InitialPledge,
+ ExpectedDayReward: v2.ExpectedDayReward,
+ ExpectedStoragePledge: v2.ExpectedStoragePledge,
+ }
+}
+
+func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+ return SectorPreCommitOnChainInfo{
+ Info: (SectorPreCommitInfo)(v2.Info),
+ PreCommitDeposit: v2.PreCommitDeposit,
+ PreCommitEpoch: v2.PreCommitEpoch,
+ DealWeight: v2.DealWeight,
+ VerifiedDealWeight: v2.VerifiedDealWeight,
+ }
+}
diff --git a/chain/actors/builtin/multisig/message.go b/chain/actors/builtin/multisig/message.go
new file mode 100644
index 000000000..b19287432
--- /dev/null
+++ b/chain/actors/builtin/multisig/message.go
@@ -0,0 +1,70 @@
+package multisig
+
+import (
+ "fmt"
+
+ "github.com/minio/blake2b-simd"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func Message(version actors.Version, from address.Address) MessageBuilder {
+ switch version {
+ case actors.Version0:
+ return message0{from}
+ case actors.Version2:
+ return message2{message0{from}}
+ default:
+ panic(fmt.Sprintf("unsupported actors version: %d", version))
+ }
+}
+
+type MessageBuilder interface {
+ // Create a new multisig with the specified parameters.
+ Create(signers []address.Address, threshold uint64,
+ vestingStart, vestingDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount) (*types.Message, error)
+
+ // Propose a transaction to the given multisig.
+ Propose(msig, target address.Address, amt abi.TokenAmount,
+ method abi.MethodNum, params []byte) (*types.Message, error)
+
+ // Approve a multisig transaction. The "hash" is optional.
+ Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
+
+ // Cancel a multisig transaction. The "hash" is optional.
+ Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
+}
+
+// this type is the same between v0 and v2
+type ProposalHashData = multisig2.ProposalHashData
+
+func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
+ params := multisig2.TxnIDParams{ID: multisig2.TxnID(id)}
+ if data != nil {
+ if data.Requester.Protocol() != address.ID {
+ return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
+ }
+ if data.Value.Sign() == -1 {
+ return nil, xerrors.Errorf("proposal value must be non-negative, was %s", data.Value)
+ }
+ if data.To == address.Undef {
+ return nil, xerrors.Errorf("proposed destination address must be set")
+ }
+ pser, err := data.Serialize()
+ if err != nil {
+ return nil, err
+ }
+ hash := blake2b.Sum256(pser)
+ params.ProposalHash = hash[:]
+ }
+
+ return actors.SerializeParams(¶ms)
+}
diff --git a/chain/actors/builtin/multisig/message0.go b/chain/actors/builtin/multisig/message0.go
new file mode 100644
index 000000000..dc43a9d5d
--- /dev/null
+++ b/chain/actors/builtin/multisig/message0.go
@@ -0,0 +1,142 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ init0 "github.com/filecoin-project/specs-actors/actors/builtin/init"
+ multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message0 struct{ from address.Address }
+
+func (m message0) Create(
+ signers []address.Address, threshold uint64,
+ unlockStart, unlockDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount,
+) (*types.Message, error) {
+
+ lenAddrs := uint64(len(signers))
+
+ if lenAddrs < threshold {
+ return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ }
+
+ if threshold == 0 {
+ threshold = lenAddrs
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ if unlockStart != 0 {
+ return nil, xerrors.Errorf("actors v0 does not support a non-zero vesting start time")
+ }
+
+ // Set up constructor parameters for multisig
+ msigParams := &multisig0.ConstructorParams{
+ Signers: signers,
+ NumApprovalsThreshold: threshold,
+ UnlockDuration: unlockDuration,
+ }
+
+ enc, actErr := actors.SerializeParams(msigParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ // new actors are created by invoking 'exec' on the init actor with the constructor params
+ execParams := &init0.ExecParams{
+ CodeCID: builtin0.MultisigActorCodeID,
+ ConstructorParams: enc,
+ }
+
+ enc, actErr = actors.SerializeParams(execParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Method: builtin0.MethodsInit.Exec,
+ Params: enc,
+ Value: initialAmount,
+ }, nil
+}
+
+func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount,
+ method abi.MethodNum, params []byte) (*types.Message, error) {
+
+ if msig == address.Undef {
+ return nil, xerrors.Errorf("must provide a multisig address for proposal")
+ }
+
+ if to == address.Undef {
+ return nil, xerrors.Errorf("must provide a target address for proposal")
+ }
+
+ if amt.Sign() == -1 {
+ return nil, xerrors.Errorf("must provide a non-negative amount for proposed send")
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{
+ To: to,
+ Value: amt,
+ Method: method,
+ Params: params,
+ })
+ if actErr != nil {
+ return nil, xerrors.Errorf("failed to serialize parameters: %w", actErr)
+ }
+
+ return &types.Message{
+ To: msig,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin0.MethodsMultisig.Propose,
+ Params: enc,
+ }, nil
+}
+
+func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) {
+ enc, err := txnParams(txID, hashData)
+ if err != nil {
+ return nil, err
+ }
+
+ return &types.Message{
+ To: msig,
+ From: m.from,
+ Value: types.NewInt(0),
+ Method: builtin0.MethodsMultisig.Approve,
+ Params: enc,
+ }, nil
+}
+
+func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) {
+ enc, err := txnParams(txID, hashData)
+ if err != nil {
+ return nil, err
+ }
+
+ return &types.Message{
+ To: msig,
+ From: m.from,
+ Value: types.NewInt(0),
+ Method: builtin0.MethodsMultisig.Cancel,
+ Params: enc,
+ }, nil
+}
diff --git a/chain/actors/builtin/multisig/message2.go b/chain/actors/builtin/multisig/message2.go
new file mode 100644
index 000000000..da2700d06
--- /dev/null
+++ b/chain/actors/builtin/multisig/message2.go
@@ -0,0 +1,71 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
+ multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message2 struct{ message0 }
+
+func (m message2) Create(
+ signers []address.Address, threshold uint64,
+ unlockStart, unlockDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount,
+) (*types.Message, error) {
+
+ lenAddrs := uint64(len(signers))
+
+ if lenAddrs < threshold {
+ return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ }
+
+ if threshold == 0 {
+ threshold = lenAddrs
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ // Set up constructor parameters for multisig
+ msigParams := &multisig2.ConstructorParams{
+ Signers: signers,
+ NumApprovalsThreshold: threshold,
+ UnlockDuration: unlockDuration,
+ StartEpoch: unlockStart,
+ }
+
+ enc, actErr := actors.SerializeParams(msigParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ // new actors are created by invoking 'exec' on the init actor with the constructor params
+ execParams := &init2.ExecParams{
+ CodeCID: builtin2.MultisigActorCodeID,
+ ConstructorParams: enc,
+ }
+
+ enc, actErr = actors.SerializeParams(execParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Method: builtin2.MethodsInit.Exec,
+ Params: enc,
+ Value: initialAmount,
+ }, nil
+}
diff --git a/chain/actors/builtin/multisig/state.go b/chain/actors/builtin/multisig/state.go
new file mode 100644
index 000000000..89a7eedad
--- /dev/null
+++ b/chain/actors/builtin/multisig/state.go
@@ -0,0 +1,52 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+ case builtin0.MultisigActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.MultisigActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error)
+ StartEpoch() (abi.ChainEpoch, error)
+ UnlockDuration() (abi.ChainEpoch, error)
+ InitialBalance() (abi.TokenAmount, error)
+ Threshold() (uint64, error)
+ Signers() ([]address.Address, error)
+
+ ForEachPendingTxn(func(id int64, txn Transaction) error) error
+}
+
+type Transaction = msig0.Transaction
diff --git a/chain/actors/builtin/multisig/state0.go b/chain/actors/builtin/multisig/state0.go
new file mode 100644
index 000000000..c934343e7
--- /dev/null
+++ b/chain/actors/builtin/multisig/state0.go
@@ -0,0 +1,70 @@
+package multisig
+
+import (
+ "encoding/binary"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ msig0.State
+ store adt.Store
+}
+
+func (s *state0) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
+}
+
+func (s *state0) StartEpoch() (abi.ChainEpoch, error) {
+ return s.State.StartEpoch, nil
+}
+
+func (s *state0) UnlockDuration() (abi.ChainEpoch, error) {
+ return s.State.UnlockDuration, nil
+}
+
+func (s *state0) InitialBalance() (abi.TokenAmount, error) {
+ return s.State.InitialBalance, nil
+}
+
+func (s *state0) Threshold() (uint64, error) {
+ return s.State.NumApprovalsThreshold, nil
+}
+
+func (s *state0) Signers() ([]address.Address, error) {
+ return s.State.Signers, nil
+}
+
+func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
+ arr, err := adt0.AsMap(s.store, s.State.PendingTxns)
+ if err != nil {
+ return err
+ }
+ var out msig0.Transaction
+ return arr.ForEach(&out, func(key string) error {
+ txid, n := binary.Varint([]byte(key))
+ if n <= 0 {
+ return xerrors.Errorf("invalid pending transaction key: %v", key)
+ }
+ return cb(txid, (Transaction)(out))
+ })
+}
diff --git a/chain/actors/builtin/multisig/state2.go b/chain/actors/builtin/multisig/state2.go
new file mode 100644
index 000000000..a78b07d55
--- /dev/null
+++ b/chain/actors/builtin/multisig/state2.go
@@ -0,0 +1,70 @@
+package multisig
+
+import (
+ "encoding/binary"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ msig2.State
+ store adt.Store
+}
+
+func (s *state2) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
+}
+
+func (s *state2) StartEpoch() (abi.ChainEpoch, error) {
+ return s.State.StartEpoch, nil
+}
+
+func (s *state2) UnlockDuration() (abi.ChainEpoch, error) {
+ return s.State.UnlockDuration, nil
+}
+
+func (s *state2) InitialBalance() (abi.TokenAmount, error) {
+ return s.State.InitialBalance, nil
+}
+
+func (s *state2) Threshold() (uint64, error) {
+ return s.State.NumApprovalsThreshold, nil
+}
+
+func (s *state2) Signers() ([]address.Address, error) {
+ return s.State.Signers, nil
+}
+
+func (s *state2) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
+ arr, err := adt2.AsMap(s.store, s.State.PendingTxns)
+ if err != nil {
+ return err
+ }
+ var out msig2.Transaction
+ return arr.ForEach(&out, func(key string) error {
+ txid, n := binary.Varint([]byte(key))
+ if n <= 0 {
+ return xerrors.Errorf("invalid pending transaction key: %v", key)
+ }
+ return cb(txid, (Transaction)(out))
+ })
+}
diff --git a/chain/actors/builtin/paych/message.go b/chain/actors/builtin/paych/message.go
new file mode 100644
index 000000000..23b360394
--- /dev/null
+++ b/chain/actors/builtin/paych/message.go
@@ -0,0 +1,28 @@
+package paych
+
+import (
+ "fmt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func Message(version actors.Version, from address.Address) MessageBuilder {
+ switch version {
+ case actors.Version0:
+ return message0{from}
+ case actors.Version2:
+ return message2{from}
+ default:
+ panic(fmt.Sprintf("unsupported actors version: %d", version))
+ }
+}
+
+type MessageBuilder interface {
+ Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error)
+ Update(paych address.Address, voucher *SignedVoucher, secret []byte) (*types.Message, error)
+ Settle(paych address.Address) (*types.Message, error)
+ Collect(paych address.Address) (*types.Message, error)
+}
diff --git a/chain/actors/builtin/paych/message0.go b/chain/actors/builtin/paych/message0.go
new file mode 100644
index 000000000..bfeb2731e
--- /dev/null
+++ b/chain/actors/builtin/paych/message0.go
@@ -0,0 +1,74 @@
+package paych
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ init0 "github.com/filecoin-project/specs-actors/actors/builtin/init"
+ paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message0 struct{ from address.Address }
+
+func (m message0) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych0.ConstructorParams{From: m.from, To: to})
+ if aerr != nil {
+ return nil, aerr
+ }
+ enc, aerr := actors.SerializeParams(&init0.ExecParams{
+ CodeCID: builtin0.PaymentChannelActorCodeID,
+ ConstructorParams: params,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Value: initialAmount,
+ Method: builtin0.MethodsInit.Exec,
+ Params: enc,
+ }, nil
+}
+
+func (m message0) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych0.UpdateChannelStateParams{
+ Sv: *sv,
+ Secret: secret,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin0.MethodsPaych.UpdateChannelState,
+ Params: params,
+ }, nil
+}
+
+func (m message0) Settle(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin0.MethodsPaych.Settle,
+ }, nil
+}
+
+func (m message0) Collect(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin0.MethodsPaych.Collect,
+ }, nil
+}
diff --git a/chain/actors/builtin/paych/message2.go b/chain/actors/builtin/paych/message2.go
new file mode 100644
index 000000000..2cf3ef22e
--- /dev/null
+++ b/chain/actors/builtin/paych/message2.go
@@ -0,0 +1,74 @@
+package paych
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
+ paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message2 struct{ from address.Address }
+
+func (m message2) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych2.ConstructorParams{From: m.from, To: to})
+ if aerr != nil {
+ return nil, aerr
+ }
+ enc, aerr := actors.SerializeParams(&init2.ExecParams{
+ CodeCID: builtin2.PaymentChannelActorCodeID,
+ ConstructorParams: params,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Value: initialAmount,
+ Method: builtin2.MethodsInit.Exec,
+ Params: enc,
+ }, nil
+}
+
+func (m message2) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych2.UpdateChannelStateParams{
+ Sv: *sv,
+ Secret: secret,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin2.MethodsPaych.UpdateChannelState,
+ Params: params,
+ }, nil
+}
+
+func (m message2) Settle(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin2.MethodsPaych.Settle,
+ }, nil
+}
+
+func (m message2) Collect(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin2.MethodsPaych.Collect,
+ }, nil
+}
diff --git a/chain/actors/builtin/paych/mock/mock.go b/chain/actors/builtin/paych/mock/mock.go
new file mode 100644
index 000000000..3b82511ff
--- /dev/null
+++ b/chain/actors/builtin/paych/mock/mock.go
@@ -0,0 +1,88 @@
+package mock
+
+import (
+ "io"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+)
+
+type mockState struct {
+ from address.Address
+ to address.Address
+ settlingAt abi.ChainEpoch
+ toSend abi.TokenAmount
+ lanes map[uint64]paych.LaneState
+}
+
+type mockLaneState struct {
+ redeemed big.Int
+ nonce uint64
+}
+
+// NewMockPayChState constructs a state for a payment channel with the set fixed values
+// that satisfies the paych.State interface.
+func NewMockPayChState(from address.Address,
+ to address.Address,
+ settlingAt abi.ChainEpoch,
+ lanes map[uint64]paych.LaneState,
+) paych.State {
+ return &mockState{from: from, to: to, settlingAt: settlingAt, toSend: big.NewInt(0), lanes: lanes}
+}
+
+// NewMockLaneState constructs a state for a payment channel lane with the set fixed values
+// that satisfies the paych.LaneState interface. Useful for populating lanes when
+// calling NewMockPayChState
+func NewMockLaneState(redeemed big.Int, nonce uint64) paych.LaneState {
+ return &mockLaneState{redeemed, nonce}
+}
+
+func (ms *mockState) MarshalCBOR(io.Writer) error {
+ panic("not implemented")
+}
+
+// Channel owner, who has funded the actor
+func (ms *mockState) From() (address.Address, error) {
+ return ms.from, nil
+}
+
+// Recipient of payouts from channel
+func (ms *mockState) To() (address.Address, error) {
+ return ms.to, nil
+}
+
+// Height at which the channel can be `Collected`
+func (ms *mockState) SettlingAt() (abi.ChainEpoch, error) {
+ return ms.settlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (ms *mockState) ToSend() (abi.TokenAmount, error) {
+ return ms.toSend, nil
+}
+
+// Get total number of lanes
+func (ms *mockState) LaneCount() (uint64, error) {
+ return uint64(len(ms.lanes)), nil
+}
+
+// Iterate lane states
+func (ms *mockState) ForEachLaneState(cb func(idx uint64, dl paych.LaneState) error) error {
+ var lastErr error
+ for lane, state := range ms.lanes {
+ if err := cb(lane, state); err != nil {
+ lastErr = err
+ }
+ }
+ return lastErr
+}
+
+func (mls *mockLaneState) Redeemed() (big.Int, error) {
+ return mls.redeemed, nil
+}
+
+func (mls *mockLaneState) Nonce() (uint64, error) {
+ return mls.nonce, nil
+}
diff --git a/chain/actors/builtin/paych/state.go b/chain/actors/builtin/paych/state.go
new file mode 100644
index 000000000..20c7a74b7
--- /dev/null
+++ b/chain/actors/builtin/paych/state.go
@@ -0,0 +1,88 @@
+package paych
+
+import (
+ "encoding/base64"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ big "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+ ipldcbor "github.com/ipfs/go-ipld-cbor"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+// Load returns an abstract copy of payment channel state, irregardless of actor version
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+ case builtin0.PaymentChannelActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.PaymentChannelActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+// State is an abstract version of payment channel state that works across
+// versions
+type State interface {
+ cbor.Marshaler
+ // Channel owner, who has funded the actor
+ From() (address.Address, error)
+ // Recipient of payouts from channel
+ To() (address.Address, error)
+
+ // Height at which the channel can be `Collected`
+ SettlingAt() (abi.ChainEpoch, error)
+
+ // Amount successfully redeemed through the payment channel, paid out on `Collect()`
+ ToSend() (abi.TokenAmount, error)
+
+ // Get total number of lanes
+ LaneCount() (uint64, error)
+
+ // Iterate lane states
+ ForEachLaneState(cb func(idx uint64, dl LaneState) error) error
+}
+
+// LaneState is an abstract copy of the state of a single lane
+type LaneState interface {
+ Redeemed() (big.Int, error)
+ Nonce() (uint64, error)
+}
+
+type SignedVoucher = paych0.SignedVoucher
+type ModVerifyParams = paych0.ModVerifyParams
+
+// DecodeSignedVoucher decodes base64 encoded signed voucher.
+func DecodeSignedVoucher(s string) (*SignedVoucher, error) {
+ data, err := base64.RawURLEncoding.DecodeString(s)
+ if err != nil {
+ return nil, err
+ }
+
+ var sv SignedVoucher
+ if err := ipldcbor.DecodeInto(data, &sv); err != nil {
+ return nil, err
+ }
+
+ return &sv, nil
+}
diff --git a/chain/actors/builtin/paych/state0.go b/chain/actors/builtin/paych/state0.go
new file mode 100644
index 000000000..8e0e3434e
--- /dev/null
+++ b/chain/actors/builtin/paych/state0.go
@@ -0,0 +1,104 @@
+package paych
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ paych0.State
+ store adt.Store
+ lsAmt *adt0.Array
+}
+
+// Channel owner, who has funded the actor
+func (s *state0) From() (address.Address, error) {
+ return s.State.From, nil
+}
+
+// Recipient of payouts from channel
+func (s *state0) To() (address.Address, error) {
+ return s.State.To, nil
+}
+
+// Height at which the channel can be `Collected`
+func (s *state0) SettlingAt() (abi.ChainEpoch, error) {
+ return s.State.SettlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (s *state0) ToSend() (abi.TokenAmount, error) {
+ return s.State.ToSend, nil
+}
+
+func (s *state0) getOrLoadLsAmt() (*adt0.Array, error) {
+ if s.lsAmt != nil {
+ return s.lsAmt, nil
+ }
+
+ // Get the lane state from the chain
+ lsamt, err := adt0.AsArray(s.store, s.State.LaneStates)
+ if err != nil {
+ return nil, err
+ }
+
+ s.lsAmt = lsamt
+ return lsamt, nil
+}
+
+// Get total number of lanes
+func (s *state0) LaneCount() (uint64, error) {
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return 0, err
+ }
+ return lsamt.Length(), nil
+}
+
+// Iterate lane states
+func (s *state0) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
+ // Get the lane state from the chain
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return err
+ }
+
+ // Note: we use a map instead of an array to store laneStates because the
+ // client sets the lane ID (the index) and potentially they could use a
+ // very large index.
+ var ls paych0.LaneState
+ return lsamt.ForEach(&ls, func(i int64) error {
+ return cb(uint64(i), &laneState0{ls})
+ })
+}
+
+type laneState0 struct {
+ paych0.LaneState
+}
+
+func (ls *laneState0) Redeemed() (big.Int, error) {
+ return ls.LaneState.Redeemed, nil
+}
+
+func (ls *laneState0) Nonce() (uint64, error) {
+ return ls.LaneState.Nonce, nil
+}
diff --git a/chain/actors/builtin/paych/state2.go b/chain/actors/builtin/paych/state2.go
new file mode 100644
index 000000000..fbf4b9fde
--- /dev/null
+++ b/chain/actors/builtin/paych/state2.go
@@ -0,0 +1,104 @@
+package paych
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ paych2.State
+ store adt.Store
+ lsAmt *adt2.Array
+}
+
+// Channel owner, who has funded the actor
+func (s *state2) From() (address.Address, error) {
+ return s.State.From, nil
+}
+
+// Recipient of payouts from channel
+func (s *state2) To() (address.Address, error) {
+ return s.State.To, nil
+}
+
+// Height at which the channel can be `Collected`
+func (s *state2) SettlingAt() (abi.ChainEpoch, error) {
+ return s.State.SettlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (s *state2) ToSend() (abi.TokenAmount, error) {
+ return s.State.ToSend, nil
+}
+
+func (s *state2) getOrLoadLsAmt() (*adt2.Array, error) {
+ if s.lsAmt != nil {
+ return s.lsAmt, nil
+ }
+
+ // Get the lane state from the chain
+ lsamt, err := adt2.AsArray(s.store, s.State.LaneStates)
+ if err != nil {
+ return nil, err
+ }
+
+ s.lsAmt = lsamt
+ return lsamt, nil
+}
+
+// Get total number of lanes
+func (s *state2) LaneCount() (uint64, error) {
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return 0, err
+ }
+ return lsamt.Length(), nil
+}
+
+// Iterate lane states
+func (s *state2) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
+ // Get the lane state from the chain
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return err
+ }
+
+ // Note: we use a map instead of an array to store laneStates because the
+ // client sets the lane ID (the index) and potentially they could use a
+ // very large index.
+ var ls paych2.LaneState
+ return lsamt.ForEach(&ls, func(i int64) error {
+ return cb(uint64(i), &laneState2{ls})
+ })
+}
+
+type laneState2 struct {
+ paych2.LaneState
+}
+
+func (ls *laneState2) Redeemed() (big.Int, error) {
+ return ls.LaneState.Redeemed, nil
+}
+
+func (ls *laneState2) Nonce() (uint64, error) {
+ return ls.LaneState.Nonce, nil
+}
diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go
new file mode 100644
index 000000000..e683cfd96
--- /dev/null
+++ b/chain/actors/builtin/power/power.go
@@ -0,0 +1,62 @@
+package power
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+var Address = builtin0.StoragePowerActorAddr
+
+func Load(store adt.Store, act *types.Actor) (st State, err error) {
+ switch act.Code {
+ case builtin0.StoragePowerActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.StoragePowerActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ TotalLocked() (abi.TokenAmount, error)
+ TotalPower() (Claim, error)
+ TotalCommitted() (Claim, error)
+ TotalPowerSmoothed() (builtin.FilterEstimate, error)
+
+ // MinerCounts returns the number of miners. Participating is the number
+ // with power above the minimum miner threshold.
+ MinerCounts() (participating, total uint64, err error)
+ MinerPower(address.Address) (Claim, bool, error)
+ MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error)
+ ListAllMiners() ([]address.Address, error)
+}
+
+type Claim struct {
+ // Sum of raw byte power for a miner's sectors.
+ RawBytePower abi.StoragePower
+
+ // Sum of quality adjusted power for a miner's sectors.
+ QualityAdjPower abi.StoragePower
+}
diff --git a/chain/actors/builtin/power/v0.go b/chain/actors/builtin/power/v0.go
new file mode 100644
index 000000000..e2a9cf382
--- /dev/null
+++ b/chain/actors/builtin/power/v0.go
@@ -0,0 +1,98 @@
+package power
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ power0.State
+ store adt.Store
+}
+
+func (s *state0) TotalLocked() (abi.TokenAmount, error) {
+ return s.TotalPledgeCollateral, nil
+}
+
+func (s *state0) TotalPower() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalRawBytePower,
+ QualityAdjPower: s.TotalQualityAdjPower,
+ }, nil
+}
+
+// Committed power to the network. Includes miners below the minimum threshold.
+func (s *state0) TotalCommitted() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalBytesCommitted,
+ QualityAdjPower: s.TotalQABytesCommitted,
+ }, nil
+}
+
+func (s *state0) MinerPower(addr address.Address) (Claim, bool, error) {
+ claims, err := adt0.AsMap(s.store, s.Claims)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ var claim power0.Claim
+ ok, err := claims.Get(abi.AddrKey(addr), &claim)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ return Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ }, ok, nil
+}
+
+func (s *state0) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
+ return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
+}
+
+func (s *state0) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FromV0FilterEstimate(*s.State.ThisEpochQAPowerSmoothed), nil
+}
+
+func (s *state0) MinerCounts() (uint64, uint64, error) {
+ return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
+}
+
+func (s *state0) ListAllMiners() ([]address.Address, error) {
+ claims, err := adt0.AsMap(s.store, s.Claims)
+ if err != nil {
+ return nil, err
+ }
+
+ var miners []address.Address
+ err = claims.ForEach(nil, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ miners = append(miners, a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return miners, nil
+}
diff --git a/chain/actors/builtin/power/v2.go b/chain/actors/builtin/power/v2.go
new file mode 100644
index 000000000..6346a09b6
--- /dev/null
+++ b/chain/actors/builtin/power/v2.go
@@ -0,0 +1,98 @@
+package power
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ power2.State
+ store adt.Store
+}
+
+func (s *state2) TotalLocked() (abi.TokenAmount, error) {
+ return s.TotalPledgeCollateral, nil
+}
+
+func (s *state2) TotalPower() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalRawBytePower,
+ QualityAdjPower: s.TotalQualityAdjPower,
+ }, nil
+}
+
+// Committed power to the network. Includes miners below the minimum threshold.
+func (s *state2) TotalCommitted() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalBytesCommitted,
+ QualityAdjPower: s.TotalQABytesCommitted,
+ }, nil
+}
+
+func (s *state2) MinerPower(addr address.Address) (Claim, bool, error) {
+ claims, err := adt2.AsMap(s.store, s.Claims)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ var claim power2.Claim
+ ok, err := claims.Get(abi.AddrKey(addr), &claim)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ return Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ }, ok, nil
+}
+
+func (s *state2) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
+ return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
+}
+
+func (s *state2) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FromV2FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
+}
+
+func (s *state2) MinerCounts() (uint64, uint64, error) {
+ return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
+}
+
+func (s *state2) ListAllMiners() ([]address.Address, error) {
+ claims, err := adt2.AsMap(s.store, s.Claims)
+ if err != nil {
+ return nil, err
+ }
+
+ var miners []address.Address
+ err = claims.ForEach(nil, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ miners = append(miners, a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return miners, nil
+}
diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go
new file mode 100644
index 000000000..065f242e2
--- /dev/null
+++ b/chain/actors/builtin/reward/reward.go
@@ -0,0 +1,58 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/cbor"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+var Address = builtin0.RewardActorAddr
+
+func Load(store adt.Store, act *types.Actor) (st State, err error) {
+ switch act.Code {
+ case builtin0.RewardActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.RewardActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ ThisEpochBaselinePower() (abi.StoragePower, error)
+ ThisEpochReward() (abi.StoragePower, error)
+ ThisEpochRewardSmoothed() (builtin.FilterEstimate, error)
+
+ EffectiveBaselinePower() (abi.StoragePower, error)
+ EffectiveNetworkTime() (abi.ChainEpoch, error)
+
+ TotalStoragePowerReward() (abi.TokenAmount, error)
+
+ CumsumBaseline() (abi.StoragePower, error)
+ CumsumRealized() (abi.StoragePower, error)
+
+ InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error)
+ PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error)
+}
+
+type AwardBlockRewardParams = reward0.AwardBlockRewardParams
diff --git a/chain/actors/builtin/reward/v0.go b/chain/actors/builtin/reward/v0.go
new file mode 100644
index 000000000..0efd0b482
--- /dev/null
+++ b/chain/actors/builtin/reward/v0.go
@@ -0,0 +1,83 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
+ smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ reward0.State
+ store adt.Store
+}
+
+func (s *state0) ThisEpochReward() (abi.StoragePower, error) {
+ return s.State.ThisEpochReward, nil
+}
+
+func (s *state0) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FromV0FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil
+}
+
+func (s *state0) ThisEpochBaselinePower() (abi.StoragePower, error) {
+ return s.State.ThisEpochBaselinePower, nil
+}
+
+func (s *state0) TotalStoragePowerReward() (abi.TokenAmount, error) {
+ return s.State.TotalMined, nil
+}
+
+func (s *state0) EffectiveBaselinePower() (abi.StoragePower, error) {
+ return s.State.EffectiveBaselinePower, nil
+}
+
+func (s *state0) EffectiveNetworkTime() (abi.ChainEpoch, error) {
+ return s.State.EffectiveNetworkTime, nil
+}
+
+func (s *state0) CumsumBaseline() (abi.StoragePower, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state0) CumsumRealized() (abi.StoragePower, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state0) InitialPledgeForPower(sectorWeight abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner0.InitialPledgeForPower(
+ sectorWeight,
+ s.State.ThisEpochBaselinePower,
+ networkTotalPledge,
+ s.State.ThisEpochRewardSmoothed,
+ &smoothing0.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply), nil
+}
+
+func (s *state0) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
+ return miner0.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
+ &smoothing0.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ sectorWeight), nil
+}
diff --git a/chain/actors/builtin/reward/v2.go b/chain/actors/builtin/reward/v2.go
new file mode 100644
index 000000000..ec0709c39
--- /dev/null
+++ b/chain/actors/builtin/reward/v2.go
@@ -0,0 +1,86 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
+ reward2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/reward"
+ smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ reward2.State
+ store adt.Store
+}
+
+func (s *state2) ThisEpochReward() (abi.StoragePower, error) {
+ return s.State.ThisEpochReward, nil
+}
+
+func (s *state2) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FilterEstimate{
+ PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
+ VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
+ }, nil
+}
+
+func (s *state2) ThisEpochBaselinePower() (abi.StoragePower, error) {
+ return s.State.ThisEpochBaselinePower, nil
+}
+
+func (s *state2) TotalStoragePowerReward() (abi.TokenAmount, error) {
+ return s.State.TotalStoragePowerReward, nil
+}
+
+func (s *state2) EffectiveBaselinePower() (abi.StoragePower, error) {
+ return s.State.EffectiveBaselinePower, nil
+}
+
+func (s *state2) EffectiveNetworkTime() (abi.ChainEpoch, error) {
+ return s.State.EffectiveNetworkTime, nil
+}
+
+func (s *state2) CumsumBaseline() (abi.StoragePower, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state2) CumsumRealized() (abi.StoragePower, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state2) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner2.InitialPledgeForPower(
+ qaPower,
+ s.State.ThisEpochBaselinePower,
+ s.State.ThisEpochRewardSmoothed,
+ smoothing2.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply,
+ ), nil
+}
+
+func (s *state2) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
+ return miner2.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
+ smoothing2.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ sectorWeight), nil
+}
diff --git a/chain/actors/builtin/verifreg/util.go b/chain/actors/builtin/verifreg/util.go
new file mode 100644
index 000000000..4136c0c30
--- /dev/null
+++ b/chain/actors/builtin/verifreg/util.go
@@ -0,0 +1,46 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+)
+
+func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) {
+ if addr.Protocol() != address.ID {
+ return false, big.Zero(), xerrors.Errorf("can only look up ID addresses")
+ }
+
+ vh, err := adt.AsMap(store, root, ver)
+ if err != nil {
+ return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err)
+ }
+
+ var dcap abi.StoragePower
+ if found, err := vh.Get(abi.AddrKey(addr), &dcap); err != nil {
+ return false, big.Zero(), xerrors.Errorf("looking up addr: %w", err)
+ } else if !found {
+ return false, big.Zero(), nil
+ }
+
+ return true, dcap, nil
+}
+
+func forEachCap(store adt.Store, ver actors.Version, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ vh, err := adt.AsMap(store, root, ver)
+ if err != nil {
+ return xerrors.Errorf("loading verified clients: %w", err)
+ }
+ var dcap abi.StoragePower
+ return vh.ForEach(&dcap, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, dcap)
+ })
+}
diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go
new file mode 100644
index 000000000..64def4706
--- /dev/null
+++ b/chain/actors/builtin/verifreg/v0.go
@@ -0,0 +1,48 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state0 struct {
+ verifreg0.State
+ store adt.Store
+}
+
+func (s *state0) RootKey() (address.Address, error) {
+ return s.State.RootKey, nil
+}
+
+func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr)
+}
+
+func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version0, s.State.Verifiers, addr)
+}
+
+func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version0, s.State.Verifiers, cb)
+}
+
+func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version0, s.State.VerifiedClients, cb)
+}
diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go
new file mode 100644
index 000000000..5ee3bad05
--- /dev/null
+++ b/chain/actors/builtin/verifreg/v2.go
@@ -0,0 +1,48 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+type state2 struct {
+ verifreg2.State
+ store adt.Store
+}
+
+func (s *state2) RootKey() (address.Address, error) {
+ return s.State.RootKey, nil
+}
+
+func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr)
+}
+
+func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version2, s.State.Verifiers, addr)
+}
+
+func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version2, s.State.Verifiers, cb)
+}
+
+func (s *state2) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version2, s.State.VerifiedClients, cb)
+}
diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go
new file mode 100644
index 000000000..204cdae95
--- /dev/null
+++ b/chain/actors/builtin/verifreg/verifreg.go
@@ -0,0 +1,47 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/cbor"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+ builtin.RegisterActorState(builtin0.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+ builtin.RegisterActorState(builtin2.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+}
+
+var Address = builtin0.VerifiedRegistryActorAddr
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+ case builtin0.VerifiedRegistryActorCodeID:
+ return load0(store, act.Head)
+ case builtin2.VerifiedRegistryActorCodeID:
+ return load2(store, act.Head)
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ RootKey() (address.Address, error)
+ VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
+ VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
+ ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
+ ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
+}
diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go
new file mode 100644
index 000000000..ba09e4424
--- /dev/null
+++ b/chain/actors/policy/policy.go
@@ -0,0 +1,116 @@
+package policy
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/chain/actors"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
+ miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
+ verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
+)
+
+const (
+ ChainFinality = miner0.ChainFinality
+ SealRandomnessLookback = ChainFinality
+)
+
+// SetSupportedProofTypes sets supported proof types, across all actor versions.
+// This should only be used for testing.
+func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
+ newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types))
+ for _, t := range types {
+ newTypes[t] = struct{}{}
+ }
+ // Set for all miner versions.
+ miner0.SupportedProofTypes = newTypes
+ miner2.SupportedProofTypes = newTypes
+}
+
+// AddSupportedProofTypes sets supported proof types, across all actor versions.
+// This should only be used for testing.
+func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
+ for _, t := range types {
+ // Set for all miner versions.
+ miner0.SupportedProofTypes[t] = struct{}{}
+ miner2.SupportedProofTypes[t] = struct{}{}
+ }
+}
+
+// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all
+// actors versions. Use for testing.
+func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
+ // Set for all miner versions.
+ miner0.PreCommitChallengeDelay = delay
+ miner2.PreCommitChallengeDelay = delay
+}
+
+// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
+func GetPreCommitChallengeDelay() abi.ChainEpoch {
+ return miner0.PreCommitChallengeDelay
+}
+
+// SetConsensusMinerMinPower sets the minimum power of an individual miner must
+// meet for leader election, across all actor versions. This should only be used
+// for testing.
+func SetConsensusMinerMinPower(p abi.StoragePower) {
+ power0.ConsensusMinerMinPower = p
+ for _, policy := range builtin2.SealProofPolicies {
+ policy.ConsensusMinerMinPower = p
+ }
+}
+
+// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
+// only be used for testing.
+func SetMinVerifiedDealSize(size abi.StoragePower) {
+ verifreg0.MinVerifiedDealSize = size
+ verifreg2.MinVerifiedDealSize = size
+}
+
+func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch {
+ switch ver {
+ case actors.Version0:
+ return miner0.MaxSealDuration[t]
+ case actors.Version2:
+ return miner2.MaxProveCommitDuration[t]
+ default:
+ panic("unsupported actors version")
+ }
+}
+
+func DealProviderCollateralBounds(
+ size abi.PaddedPieceSize, verified bool,
+ rawBytePower, qaPower, baselinePower abi.StoragePower,
+ circulatingFil abi.TokenAmount, nwVer network.Version,
+) (min, max abi.TokenAmount) {
+ switch actors.VersionForNetwork(nwVer) {
+ case actors.Version0:
+ return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer)
+ case actors.Version2:
+ return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
+ default:
+ panic("unsupported network version")
+ }
+}
+
+// Sets the challenge window and scales the proving period to match (such that
+// there are always 48 challenge windows in a proving period).
+func SetWPoStChallengeWindow(period abi.ChainEpoch) {
+ miner0.WPoStChallengeWindow = period
+ miner0.WPoStProvingPeriod = period * abi.ChainEpoch(miner0.WPoStPeriodDeadlines)
+
+ miner2.WPoStChallengeWindow = period
+ miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines)
+}
+
+func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
+ if nwVer <= network.Version3 {
+ return 10
+ }
+
+ return ChainFinality
+}
diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go
new file mode 100644
index 000000000..62e7f8964
--- /dev/null
+++ b/chain/actors/policy/policy_test.go
@@ -0,0 +1,50 @@
+package policy
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
+ verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
+)
+
+func TestSupportedProofTypes(t *testing.T) {
+ var oldTypes []abi.RegisteredSealProof
+ for t := range miner0.SupportedProofTypes {
+ oldTypes = append(oldTypes, t)
+ }
+ t.Cleanup(func() {
+ SetSupportedProofTypes(oldTypes...)
+ })
+
+ SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ require.EqualValues(t,
+ miner0.SupportedProofTypes,
+ map[abi.RegisteredSealProof]struct{}{
+ abi.RegisteredSealProof_StackedDrg2KiBV1: {},
+ },
+ )
+ AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1)
+ require.EqualValues(t,
+ miner0.SupportedProofTypes,
+ map[abi.RegisteredSealProof]struct{}{
+ abi.RegisteredSealProof_StackedDrg2KiBV1: {},
+ abi.RegisteredSealProof_StackedDrg8MiBV1: {},
+ },
+ )
+}
+
+// Tests assumptions about policies being the same between actor versions.
+func TestAssumptions(t *testing.T) {
+ require.EqualValues(t, miner0.SupportedProofTypes, miner2.SupportedProofTypes)
+ require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay)
+ require.Equal(t, miner0.ChainFinality, miner2.ChainFinality)
+ require.Equal(t, miner0.WPoStChallengeWindow, miner2.WPoStChallengeWindow)
+ require.Equal(t, miner0.WPoStProvingPeriod, miner2.WPoStProvingPeriod)
+ require.Equal(t, miner0.WPoStPeriodDeadlines, miner2.WPoStPeriodDeadlines)
+ require.True(t, verifreg0.MinVerifiedDealSize.Equals(verifreg2.MinVerifiedDealSize))
+}
diff --git a/chain/actors/version.go b/chain/actors/version.go
new file mode 100644
index 000000000..17af8b08b
--- /dev/null
+++ b/chain/actors/version.go
@@ -0,0 +1,26 @@
+package actors
+
+import (
+ "fmt"
+
+ "github.com/filecoin-project/go-state-types/network"
+)
+
+type Version int
+
+const (
+ Version0 Version = 0
+ Version2 Version = 2
+)
+
+// Converts a network version into an actors adt version.
+func VersionForNetwork(version network.Version) Version {
+ switch version {
+ case network.Version0, network.Version1, network.Version2, network.Version3:
+ return Version0
+ case network.Version4:
+ return Version2
+ default:
+ panic(fmt.Sprintf("unsupported network version %d", version))
+ }
+}
diff --git a/chain/badtscache.go b/chain/badtscache.go
index 103237307..3c5bf05ef 100644
--- a/chain/badtscache.go
+++ b/chain/badtscache.go
@@ -56,6 +56,10 @@ func (bts *BadBlockCache) Add(c cid.Cid, bbr BadBlockReason) {
bts.badBlocks.Add(c, bbr)
}
+func (bts *BadBlockCache) Remove(c cid.Cid) {
+ bts.badBlocks.Remove(c)
+}
+
func (bts *BadBlockCache) Has(c cid.Cid) (BadBlockReason, bool) {
rval, ok := bts.badBlocks.Get(c)
if !ok {
diff --git a/chain/beacon/beacon.go b/chain/beacon/beacon.go
index 23b062bea..9543bec54 100644
--- a/chain/beacon/beacon.go
+++ b/chain/beacon/beacon.go
@@ -3,7 +3,7 @@ package beacon
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
logging "github.com/ipfs/go-log"
"golang.org/x/xerrors"
@@ -18,6 +18,23 @@ type Response struct {
Err error
}
+type Schedule []BeaconPoint
+
+func (bs Schedule) BeaconForEpoch(e abi.ChainEpoch) RandomBeacon {
+ for i := len(bs) - 1; i >= 0; i-- {
+ bp := bs[i]
+ if e >= bp.Start {
+ return bp.Beacon
+ }
+ }
+ return bs[0].Beacon
+}
+
+type BeaconPoint struct {
+ Start abi.ChainEpoch
+ Beacon RandomBeacon
+}
+
// RandomBeacon represents a system that provides randomness to Lotus.
// Other components interrogate the RandomBeacon to acquire randomness that's
// valid for a specific chain epoch. Also to verify beacon entries that have
@@ -25,11 +42,30 @@ type Response struct {
type RandomBeacon interface {
Entry(context.Context, uint64) <-chan Response
VerifyEntry(types.BeaconEntry, types.BeaconEntry) error
- MaxBeaconRoundForEpoch(abi.ChainEpoch, types.BeaconEntry) uint64
+ MaxBeaconRoundForEpoch(abi.ChainEpoch) uint64
}
-func ValidateBlockValues(b RandomBeacon, h *types.BlockHeader, prevEntry types.BeaconEntry) error {
- maxRound := b.MaxBeaconRoundForEpoch(h.Height, prevEntry)
+func ValidateBlockValues(bSchedule Schedule, h *types.BlockHeader, parentEpoch abi.ChainEpoch,
+ prevEntry types.BeaconEntry) error {
+ {
+ parentBeacon := bSchedule.BeaconForEpoch(parentEpoch)
+ currBeacon := bSchedule.BeaconForEpoch(h.Height)
+ if parentBeacon != currBeacon {
+ if len(h.BeaconEntries) != 2 {
+ return xerrors.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries))
+ }
+ err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0])
+ if err != nil {
+ return xerrors.Errorf("beacon at fork point invalid: (%v, %v): %w",
+ h.BeaconEntries[1], h.BeaconEntries[0], err)
+ }
+ return nil
+ }
+ }
+
+ // TODO: fork logic
+ b := bSchedule.BeaconForEpoch(h.Height)
+ maxRound := b.MaxBeaconRoundForEpoch(h.Height)
if maxRound == prevEntry.Round {
if len(h.BeaconEntries) != 0 {
return xerrors.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries))
@@ -56,10 +92,35 @@ func ValidateBlockValues(b RandomBeacon, h *types.BlockHeader, prevEntry types.B
return nil
}
-func BeaconEntriesForBlock(ctx context.Context, beacon RandomBeacon, round abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) {
+func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, epoch abi.ChainEpoch, parentEpoch abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) {
+ {
+ parentBeacon := bSchedule.BeaconForEpoch(parentEpoch)
+ currBeacon := bSchedule.BeaconForEpoch(epoch)
+ if parentBeacon != currBeacon {
+ // Fork logic
+ round := currBeacon.MaxBeaconRoundForEpoch(epoch)
+ out := make([]types.BeaconEntry, 2)
+ rch := currBeacon.Entry(ctx, round-1)
+ res := <-rch
+ if res.Err != nil {
+ return nil, xerrors.Errorf("getting entry %d returned error: %w", round-1, res.Err)
+ }
+ out[0] = res.Entry
+ rch = currBeacon.Entry(ctx, round)
+ res = <-rch
+ if res.Err != nil {
+ return nil, xerrors.Errorf("getting entry %d returned error: %w", round, res.Err)
+ }
+ out[1] = res.Entry
+ return out, nil
+ }
+ }
+
+ beacon := bSchedule.BeaconForEpoch(epoch)
+
start := build.Clock.Now()
- maxRound := beacon.MaxBeaconRoundForEpoch(round, prev)
+ maxRound := beacon.MaxBeaconRoundForEpoch(epoch)
if maxRound == prev.Round {
return nil, nil
}
@@ -82,7 +143,7 @@ func BeaconEntriesForBlock(ctx context.Context, beacon RandomBeacon, round abi.C
out = append(out, resp.Entry)
cur = resp.Entry.Round - 1
case <-ctx.Done():
- return nil, xerrors.Errorf("context timed out waiting on beacon entry to come back for round %d: %w", round, ctx.Err())
+ return nil, xerrors.Errorf("context timed out waiting on beacon entry to come back for epoch %d: %w", epoch, ctx.Err())
}
}
diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go
index 76bf01493..6e8e83a20 100644
--- a/chain/beacon/drand/drand.go
+++ b/chain/beacon/drand/drand.go
@@ -19,7 +19,7 @@ import (
logging "github.com/ipfs/go-log"
pubsub "github.com/libp2p/go-libp2p-pubsub"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/beacon"
@@ -187,7 +187,7 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
return err
}
-func (db *DrandBeacon) MaxBeaconRoundForEpoch(filEpoch abi.ChainEpoch, prevEntry types.BeaconEntry) uint64 {
+func (db *DrandBeacon) MaxBeaconRoundForEpoch(filEpoch abi.ChainEpoch) uint64 {
// TODO: sometimes the genesis time for filecoin is zero and this goes negative
latestTs := ((uint64(filEpoch) * db.filRoundTime) + db.filGenTime) - db.filRoundTime
dround := (latestTs - db.drandGenTime) / uint64(db.interval.Seconds())
diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go
index 8d7c1b2cc..0cb9c2ba8 100644
--- a/chain/beacon/drand/drand_test.go
+++ b/chain/beacon/drand/drand_test.go
@@ -12,7 +12,7 @@ import (
)
func TestPrintGroupInfo(t *testing.T) {
- server := build.DrandConfig().Servers[0]
+ server := build.DrandConfigs[build.DrandIncentinet].Servers[0]
c, err := hclient.New(server, nil, nil)
assert.NoError(t, err)
cg := c.(interface {
diff --git a/chain/beacon/mock.go b/chain/beacon/mock.go
index dc45ae895..502ff2ba5 100644
--- a/chain/beacon/mock.go
+++ b/chain/beacon/mock.go
@@ -6,8 +6,8 @@ import (
"encoding/binary"
"time"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/minio/blake2b-simd"
"golang.org/x/xerrors"
)
@@ -53,11 +53,7 @@ func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry)
return nil
}
-func (mb *mockBeacon) IsEntryForEpoch(e types.BeaconEntry, epoch abi.ChainEpoch, nulls int) (bool, error) {
- return int64(e.Round) <= int64(epoch) && int64(epoch)-int64(nulls) >= int64(e.Round), nil
-}
-
-func (mb *mockBeacon) MaxBeaconRoundForEpoch(epoch abi.ChainEpoch, prevEntry types.BeaconEntry) uint64 {
+func (mb *mockBeacon) MaxBeaconRoundForEpoch(epoch abi.ChainEpoch) uint64 {
return uint64(epoch)
}
diff --git a/chain/checkpoint.go b/chain/checkpoint.go
new file mode 100644
index 000000000..8f99d73e4
--- /dev/null
+++ b/chain/checkpoint.go
@@ -0,0 +1,81 @@
+package chain
+
+import (
+ "encoding/json"
+
+ "github.com/filecoin-project/lotus/chain/types"
+
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+ "github.com/ipfs/go-datastore"
+ "golang.org/x/xerrors"
+)
+
+var CheckpointKey = datastore.NewKey("/chain/checks")
+
+func loadCheckpoint(ds dtypes.MetadataDS) (types.TipSetKey, error) {
+ haveChks, err := ds.Has(CheckpointKey)
+ if err != nil {
+ return types.EmptyTSK, err
+ }
+
+ if !haveChks {
+ return types.EmptyTSK, nil
+ }
+
+ tskBytes, err := ds.Get(CheckpointKey)
+ if err != nil {
+ return types.EmptyTSK, err
+ }
+
+ var tsk types.TipSetKey
+ err = json.Unmarshal(tskBytes, &tsk)
+ if err != nil {
+ return types.EmptyTSK, err
+ }
+
+ return tsk, err
+}
+
+func (syncer *Syncer) SetCheckpoint(tsk types.TipSetKey) error {
+ if tsk == types.EmptyTSK {
+ return xerrors.Errorf("called with empty tsk")
+ }
+
+ syncer.checkptLk.Lock()
+ defer syncer.checkptLk.Unlock()
+
+ ts, err := syncer.ChainStore().LoadTipSet(tsk)
+ if err != nil {
+ return xerrors.Errorf("cannot find tipset: %w", err)
+ }
+
+ hts := syncer.ChainStore().GetHeaviestTipSet()
+ anc, err := syncer.ChainStore().IsAncestorOf(ts, hts)
+ if err != nil {
+ return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err)
+ }
+
+ if !hts.Equals(ts) && !anc {
+ return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
+ }
+
+ tskBytes, err := json.Marshal(tsk)
+ if err != nil {
+ return err
+ }
+
+ err = syncer.ds.Put(CheckpointKey, tskBytes)
+ if err != nil {
+ return err
+ }
+
+ syncer.checkpt = tsk
+
+ return nil
+}
+
+func (syncer *Syncer) GetCheckpoint() types.TipSetKey {
+ syncer.checkptLk.Lock()
+ defer syncer.checkptLk.Unlock()
+ return syncer.checkpt
+}
diff --git a/chain/events/events.go b/chain/events/events.go
index 4550fc98a..e35e91366 100644
--- a/chain/events/events.go
+++ b/chain/events/events.go
@@ -5,7 +5,7 @@ import (
"sync"
"time"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
@@ -35,6 +35,7 @@ type eventAPI interface {
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
+ ChainHead(context.Context) (*types.TipSet, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
@@ -57,7 +58,7 @@ type Events struct {
func NewEvents(ctx context.Context, api eventAPI) *Events {
gcConfidence := 2 * build.ForkLengthThreshold
- tsc := newTSCache(gcConfidence, api.ChainGetTipSetByHeight)
+ tsc := newTSCache(gcConfidence, api)
e := &Events{
api: api,
diff --git a/chain/events/events_called.go b/chain/events/events_called.go
index 196034a9a..753206093 100644
--- a/chain/events/events_called.go
+++ b/chain/events/events_called.go
@@ -5,7 +5,7 @@ import (
"math"
"sync"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@@ -307,7 +307,10 @@ func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHa
defer e.lk.Unlock()
// Check if the event has already occurred
- ts := e.tsc.best()
+ ts, err := e.tsc.best()
+ if err != nil {
+ return 0, xerrors.Errorf("error getting best tipset: %w", err)
+ }
done, more, err := check(ts)
if err != nil {
return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err)
diff --git a/chain/events/events_height.go b/chain/events/events_height.go
index 24d758a31..c8dd905d9 100644
--- a/chain/events/events_height.go
+++ b/chain/events/events_height.go
@@ -4,8 +4,9 @@ import (
"context"
"sync"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"go.opencensus.io/trace"
+ "golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -26,7 +27,6 @@ type heightEvents struct {
}
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
-
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
defer span.End()
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
@@ -144,16 +144,19 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
}
// ChainAt invokes the specified `HeightHandler` when the chain reaches the
-// specified height+confidence threshold. If the chain is rolled-back under the
-// specified height, `RevertHandler` will be called.
+// specified height+confidence threshold. If the chain is rolled-back under the
+// specified height, `RevertHandler` will be called.
//
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
-
e.lk.Lock() // Tricky locking, check your locks if you modify this function!
- bestH := e.tsc.best().Height()
+ best, err := e.tsc.best()
+ if err != nil {
+ return xerrors.Errorf("error getting best tipset: %w", err)
+ }
+ bestH := best.Height()
if bestH >= h+abi.ChainEpoch(confidence) {
ts, err := e.tsc.getNonNull(h)
if err != nil {
@@ -172,7 +175,11 @@ func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence
}
e.lk.Lock()
- bestH = e.tsc.best().Height()
+ best, err = e.tsc.best()
+ if err != nil {
+ return xerrors.Errorf("error getting best tipset: %w", err)
+ }
+ bestH = best.Height()
}
defer e.lk.Unlock()
diff --git a/chain/events/events_test.go b/chain/events/events_test.go
index 1204e3938..0e4fd34b2 100644
--- a/chain/events/events_test.go
+++ b/chain/events/events_test.go
@@ -11,8 +11,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@@ -46,6 +46,10 @@ type fakeCS struct {
sub func(rev, app []*types.TipSet)
}
+func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) {
+ panic("implement me")
+}
+
func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
return fcs.tipsets[key], nil
}
@@ -110,7 +114,11 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg
func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) {
out := make(chan []*api.HeadChange, 1)
- out <- []*api.HeadChange{{Type: store.HCCurrent, Val: fcs.tsc.best()}}
+ best, err := fcs.tsc.best()
+ if err != nil {
+ return nil, err
+ }
+ out <- []*api.HeadChange{{Type: store.HCCurrent, Val: best}}
fcs.sub = func(rev, app []*types.TipSet) {
notif := make([]*api.HeadChange, len(rev)+len(app))
@@ -174,7 +182,8 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { /
var revs []*types.TipSet
for i := 0; i < rev; i++ {
- ts := fcs.tsc.best()
+ ts, err := fcs.tsc.best()
+ require.NoError(fcs.t, err)
if _, ok := nullm[int(ts.Height())]; !ok {
revs = append(revs, ts)
@@ -196,7 +205,9 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { /
continue
}
- ts := fcs.makeTs(fcs.t, fcs.tsc.best().Key().Cids(), fcs.h, mc)
+ best, err := fcs.tsc.best()
+ require.NoError(fcs.t, err)
+ ts := fcs.makeTs(fcs.t, best.Key().Cids(), fcs.h, mc)
require.NoError(fcs.t, fcs.tsc.add(ts))
if hasMsgs {
diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go
index 2019a38eb..99b8480dc 100644
--- a/chain/events/state/predicates.go
+++ b/chain/events/state/predicates.go
@@ -4,20 +4,19 @@ import (
"bytes"
"context"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
- "github.com/ipfs/go-cid"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
cbor "github.com/ipfs/go-ipld-cbor"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -49,7 +48,7 @@ func NewStatePredicates(api ChainAPI) *StatePredicates {
// - err
type DiffTipSetKeyFunc func(ctx context.Context, oldState, newState types.TipSetKey) (changed bool, user UserData, err error)
-type DiffActorStateFunc func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error)
+type DiffActorStateFunc func(ctx context.Context, oldActorState *types.Actor, newActorState *types.Actor) (changed bool, user UserData, err error)
// OnActorStateChanged calls diffStateFunc when the state changes for the given actor
func (sp *StatePredicates) OnActorStateChanged(addr address.Address, diffStateFunc DiffActorStateFunc) DiffTipSetKeyFunc {
@@ -66,30 +65,30 @@ func (sp *StatePredicates) OnActorStateChanged(addr address.Address, diffStateFu
if oldActor.Head.Equals(newActor.Head) {
return false, nil, nil
}
- return diffStateFunc(ctx, oldActor.Head, newActor.Head)
+ return diffStateFunc(ctx, oldActor, newActor)
}
}
-type DiffStorageMarketStateFunc func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error)
+type DiffStorageMarketStateFunc func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error)
// OnStorageMarketActorChanged calls diffStorageMarketState when the state changes for the market actor
func (sp *StatePredicates) OnStorageMarketActorChanged(diffStorageMarketState DiffStorageMarketStateFunc) DiffTipSetKeyFunc {
- return sp.OnActorStateChanged(builtin.StorageMarketActorAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) {
- var oldState market.State
- if err := sp.cst.Get(ctx, oldActorStateHead, &oldState); err != nil {
+ return sp.OnActorStateChanged(market.Address, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) {
+ oldState, err := market.Load(adt.WrapStore(ctx, sp.cst), oldActorState)
+ if err != nil {
return false, nil, err
}
- var newState market.State
- if err := sp.cst.Get(ctx, newActorStateHead, &newState); err != nil {
+ newState, err := market.Load(adt.WrapStore(ctx, sp.cst), newActorState)
+ if err != nil {
return false, nil, err
}
- return diffStorageMarketState(ctx, &oldState, &newState)
+ return diffStorageMarketState(ctx, oldState, newState)
})
}
type BalanceTables struct {
- EscrowTable *adt.BalanceTable
- LockedTable *adt.BalanceTable
+ EscrowTable market.BalanceTable
+ LockedTable market.BalanceTable
}
// DiffBalanceTablesFunc compares two balance tables
@@ -97,32 +96,32 @@ type DiffBalanceTablesFunc func(ctx context.Context, oldBalanceTable, newBalance
// OnBalanceChanged runs when the escrow table for available balances changes
func (sp *StatePredicates) OnBalanceChanged(diffBalances DiffBalanceTablesFunc) DiffStorageMarketStateFunc {
- return func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error) {
- if oldState.EscrowTable.Equals(newState.EscrowTable) && oldState.LockedTable.Equals(newState.LockedTable) {
+ return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) {
+ bc, err := oldState.BalancesChanged(newState)
+ if err != nil {
+ return false, nil, err
+ }
+
+ if !bc {
return false, nil, nil
}
- ctxStore := &contextStore{
- ctx: ctx,
- cst: sp.cst,
- }
-
- oldEscrowRoot, err := adt.AsBalanceTable(ctxStore, oldState.EscrowTable)
+ oldEscrowRoot, err := oldState.EscrowTable()
if err != nil {
return false, nil, err
}
- oldLockedRoot, err := adt.AsBalanceTable(ctxStore, oldState.LockedTable)
+ oldLockedRoot, err := oldState.LockedTable()
if err != nil {
return false, nil, err
}
- newEscrowRoot, err := adt.AsBalanceTable(ctxStore, newState.EscrowTable)
+ newEscrowRoot, err := newState.EscrowTable()
if err != nil {
return false, nil, err
}
- newLockedRoot, err := adt.AsBalanceTable(ctxStore, newState.LockedTable)
+ newLockedRoot, err := newState.LockedTable()
if err != nil {
return false, nil, err
}
@@ -131,25 +130,27 @@ func (sp *StatePredicates) OnBalanceChanged(diffBalances DiffBalanceTablesFunc)
}
}
-type DiffAdtArraysFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot *adt.Array) (changed bool, user UserData, err error)
+type DiffDealStatesFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot market.DealStates) (changed bool, user UserData, err error)
+type DiffDealProposalsFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot market.DealProposals) (changed bool, user UserData, err error)
+type DiffAdtArraysFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot adt.Array) (changed bool, user UserData, err error)
// OnDealStateChanged calls diffDealStates when the market deal state changes
-func (sp *StatePredicates) OnDealStateChanged(diffDealStates DiffAdtArraysFunc) DiffStorageMarketStateFunc {
- return func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error) {
- if oldState.States.Equals(newState.States) {
- return false, nil, nil
- }
-
- ctxStore := &contextStore{
- ctx: ctx,
- cst: sp.cst,
- }
-
- oldRoot, err := adt.AsArray(ctxStore, oldState.States)
+func (sp *StatePredicates) OnDealStateChanged(diffDealStates DiffDealStatesFunc) DiffStorageMarketStateFunc {
+ return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) {
+ sc, err := oldState.StatesChanged(newState)
if err != nil {
return false, nil, err
}
- newRoot, err := adt.AsArray(ctxStore, newState.States)
+
+ if !sc {
+ return false, nil, nil
+ }
+
+ oldRoot, err := oldState.States()
+ if err != nil {
+ return false, nil, err
+ }
+ newRoot, err := newState.States()
if err != nil {
return false, nil, err
}
@@ -159,22 +160,22 @@ func (sp *StatePredicates) OnDealStateChanged(diffDealStates DiffAdtArraysFunc)
}
// OnDealProposalChanged calls diffDealProps when the market proposal state changes
-func (sp *StatePredicates) OnDealProposalChanged(diffDealProps DiffAdtArraysFunc) DiffStorageMarketStateFunc {
- return func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error) {
- if oldState.Proposals.Equals(newState.Proposals) {
- return false, nil, nil
- }
-
- ctxStore := &contextStore{
- ctx: ctx,
- cst: sp.cst,
- }
-
- oldRoot, err := adt.AsArray(ctxStore, oldState.Proposals)
+func (sp *StatePredicates) OnDealProposalChanged(diffDealProps DiffDealProposalsFunc) DiffStorageMarketStateFunc {
+ return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) {
+ pc, err := oldState.ProposalsChanged(newState)
if err != nil {
return false, nil, err
}
- newRoot, err := adt.AsArray(ctxStore, newState.Proposals)
+
+ if !pc {
+ return false, nil, nil
+ }
+
+ oldRoot, err := oldState.Proposals()
+ if err != nil {
+ return false, nil, err
+ }
+ newRoot, err := newState.Proposals()
if err != nil {
return false, nil, err
}
@@ -183,51 +184,14 @@ func (sp *StatePredicates) OnDealProposalChanged(diffDealProps DiffAdtArraysFunc
}
}
-var _ AdtArrayDiff = &MarketDealProposalChanges{}
-
-type MarketDealProposalChanges struct {
- Added []ProposalIDState
- Removed []ProposalIDState
-}
-
-type ProposalIDState struct {
- ID abi.DealID
- Proposal market.DealProposal
-}
-
-func (m *MarketDealProposalChanges) Add(key uint64, val *typegen.Deferred) error {
- dp := new(market.DealProposal)
- err := dp.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Added = append(m.Added, ProposalIDState{abi.DealID(key), *dp})
- return nil
-}
-
-func (m *MarketDealProposalChanges) Modify(key uint64, from, to *typegen.Deferred) error {
- // short circuit, DealProposals are static
- return nil
-}
-
-func (m *MarketDealProposalChanges) Remove(key uint64, val *typegen.Deferred) error {
- dp := new(market.DealProposal)
- err := dp.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Removed = append(m.Removed, ProposalIDState{abi.DealID(key), *dp})
- return nil
-}
-
// OnDealProposalAmtChanged detects changes in the deal proposal AMT for all deal proposals and returns a MarketProposalsChanges structure containing:
// - Added Proposals
// - Modified Proposals
// - Removed Proposals
-func (sp *StatePredicates) OnDealProposalAmtChanged() DiffAdtArraysFunc {
- return func(ctx context.Context, oldDealProps, newDealProps *adt.Array) (changed bool, user UserData, err error) {
- proposalChanges := new(MarketDealProposalChanges)
- if err := DiffAdtArray(oldDealProps, newDealProps, proposalChanges); err != nil {
+func (sp *StatePredicates) OnDealProposalAmtChanged() DiffDealProposalsFunc {
+ return func(ctx context.Context, oldDealProps, newDealProps market.DealProposals) (changed bool, user UserData, err error) {
+ proposalChanges, err := market.DiffDealProposals(oldDealProps, newDealProps)
+ if err != nil {
return false, nil, err
}
@@ -239,64 +203,14 @@ func (sp *StatePredicates) OnDealProposalAmtChanged() DiffAdtArraysFunc {
}
}
-var _ AdtArrayDiff = &MarketDealStateChanges{}
-
-type MarketDealStateChanges struct {
- Added []DealIDState
- Modified []DealStateChange
- Removed []DealIDState
-}
-
-type DealIDState struct {
- ID abi.DealID
- Deal market.DealState
-}
-
-func (m *MarketDealStateChanges) Add(key uint64, val *typegen.Deferred) error {
- ds := new(market.DealState)
- err := ds.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Added = append(m.Added, DealIDState{abi.DealID(key), *ds})
- return nil
-}
-
-func (m *MarketDealStateChanges) Modify(key uint64, from, to *typegen.Deferred) error {
- dsFrom := new(market.DealState)
- if err := dsFrom.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
- return err
- }
-
- dsTo := new(market.DealState)
- if err := dsTo.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
- return err
- }
-
- if *dsFrom != *dsTo {
- m.Modified = append(m.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo})
- }
- return nil
-}
-
-func (m *MarketDealStateChanges) Remove(key uint64, val *typegen.Deferred) error {
- ds := new(market.DealState)
- err := ds.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Removed = append(m.Removed, DealIDState{abi.DealID(key), *ds})
- return nil
-}
-
// OnDealStateAmtChanged detects changes in the deal state AMT for all deal states and returns a MarketDealStateChanges structure containing:
// - Added Deals
// - Modified Deals
// - Removed Deals
-func (sp *StatePredicates) OnDealStateAmtChanged() DiffAdtArraysFunc {
- return func(ctx context.Context, oldDealStates, newDealStates *adt.Array) (changed bool, user UserData, err error) {
- dealStateChanges := new(MarketDealStateChanges)
- if err := DiffAdtArray(oldDealStates, newDealStates, dealStateChanges); err != nil {
+func (sp *StatePredicates) OnDealStateAmtChanged() DiffDealStatesFunc {
+ return func(ctx context.Context, oldDealStates, newDealStates market.DealStates) (changed bool, user UserData, err error) {
+ dealStateChanges, err := market.DiffDealStates(oldDealStates, newDealStates)
+ if err != nil {
return false, nil, err
}
@@ -309,42 +223,29 @@ func (sp *StatePredicates) OnDealStateAmtChanged() DiffAdtArraysFunc {
}
// ChangedDeals is a set of changes to deal state
-type ChangedDeals map[abi.DealID]DealStateChange
-
-// DealStateChange is a change in deal state from -> to
-type DealStateChange struct {
- ID abi.DealID
- From *market.DealState
- To *market.DealState
-}
+type ChangedDeals map[abi.DealID]market.DealStateChange
// DealStateChangedForIDs detects changes in the deal state AMT for the given deal IDs
-func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffAdtArraysFunc {
- return func(ctx context.Context, oldDealStateArray, newDealStateArray *adt.Array) (changed bool, user UserData, err error) {
+func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDealStatesFunc {
+ return func(ctx context.Context, oldDealStates, newDealStates market.DealStates) (changed bool, user UserData, err error) {
changedDeals := make(ChangedDeals)
for _, dealID := range dealIds {
- var oldDealPtr, newDealPtr *market.DealState
- var oldDeal, newDeal market.DealState
// If the deal has been removed, we just set it to nil
- found, err := oldDealStateArray.Get(uint64(dealID), &oldDeal)
+ oldDeal, oldFound, err := oldDealStates.Get(dealID)
if err != nil {
return false, nil, err
}
- if found {
- oldDealPtr = &oldDeal
- }
- found, err = newDealStateArray.Get(uint64(dealID), &newDeal)
+ newDeal, newFound, err := newDealStates.Get(dealID)
if err != nil {
return false, nil, err
}
- if found {
- newDealPtr = &newDeal
- }
- if oldDeal != newDeal {
- changedDeals[dealID] = DealStateChange{dealID, oldDealPtr, newDealPtr}
+ existenceChanged := oldFound != newFound
+ valueChanged := (oldFound && newFound) && *oldDeal != *newDeal
+ if existenceChanged || valueChanged {
+ changedDeals[dealID] = market.DealStateChange{ID: dealID, From: oldDeal, To: newDeal}
}
}
if len(changedDeals) > 0 {
@@ -405,124 +306,43 @@ func (sp *StatePredicates) AvailableBalanceChangedForAddresses(getAddrs func() [
}
}
-type DiffMinerActorStateFunc func(ctx context.Context, oldState *miner.State, newState *miner.State) (changed bool, user UserData, err error)
+type DiffMinerActorStateFunc func(ctx context.Context, oldState miner.State, newState miner.State) (changed bool, user UserData, err error)
func (sp *StatePredicates) OnInitActorChange(diffInitActorState DiffInitActorStateFunc) DiffTipSetKeyFunc {
- return sp.OnActorStateChanged(builtin.InitActorAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) {
- var oldState init_.State
- if err := sp.cst.Get(ctx, oldActorStateHead, &oldState); err != nil {
+ return sp.OnActorStateChanged(init_.Address, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) {
+ oldState, err := init_.Load(adt.WrapStore(ctx, sp.cst), oldActorState)
+ if err != nil {
return false, nil, err
}
- var newState init_.State
- if err := sp.cst.Get(ctx, newActorStateHead, &newState); err != nil {
+ newState, err := init_.Load(adt.WrapStore(ctx, sp.cst), newActorState)
+ if err != nil {
return false, nil, err
}
- return diffInitActorState(ctx, &oldState, &newState)
+ return diffInitActorState(ctx, oldState, newState)
})
}
func (sp *StatePredicates) OnMinerActorChange(minerAddr address.Address, diffMinerActorState DiffMinerActorStateFunc) DiffTipSetKeyFunc {
- return sp.OnActorStateChanged(minerAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) {
- var oldState miner.State
- if err := sp.cst.Get(ctx, oldActorStateHead, &oldState); err != nil {
+ return sp.OnActorStateChanged(minerAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) {
+ oldState, err := miner.Load(adt.WrapStore(ctx, sp.cst), oldActorState)
+ if err != nil {
return false, nil, err
}
- var newState miner.State
- if err := sp.cst.Get(ctx, newActorStateHead, &newState); err != nil {
+ newState, err := miner.Load(adt.WrapStore(ctx, sp.cst), newActorState)
+ if err != nil {
return false, nil, err
}
- return diffMinerActorState(ctx, &oldState, &newState)
+ return diffMinerActorState(ctx, oldState, newState)
})
}
-type MinerSectorChanges struct {
- Added []miner.SectorOnChainInfo
- Extended []SectorExtensions
- Removed []miner.SectorOnChainInfo
-}
-
-var _ AdtArrayDiff = &MinerSectorChanges{}
-
-type SectorExtensions struct {
- From miner.SectorOnChainInfo
- To miner.SectorOnChainInfo
-}
-
-func (m *MinerSectorChanges) Add(key uint64, val *typegen.Deferred) error {
- si := new(miner.SectorOnChainInfo)
- err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Added = append(m.Added, *si)
- return nil
-}
-
-func (m *MinerSectorChanges) Modify(key uint64, from, to *typegen.Deferred) error {
- siFrom := new(miner.SectorOnChainInfo)
- err := siFrom.UnmarshalCBOR(bytes.NewReader(from.Raw))
- if err != nil {
- return err
- }
-
- siTo := new(miner.SectorOnChainInfo)
- err = siTo.UnmarshalCBOR(bytes.NewReader(to.Raw))
- if err != nil {
- return err
- }
-
- if siFrom.Expiration != siTo.Expiration {
- m.Extended = append(m.Extended, SectorExtensions{
- From: *siFrom,
- To: *siTo,
- })
- }
- return nil
-}
-
-func (m *MinerSectorChanges) Remove(key uint64, val *typegen.Deferred) error {
- si := new(miner.SectorOnChainInfo)
- err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Removed = append(m.Removed, *si)
- return nil
-}
-
func (sp *StatePredicates) OnMinerSectorChange() DiffMinerActorStateFunc {
- return func(ctx context.Context, oldState, newState *miner.State) (changed bool, user UserData, err error) {
- ctxStore := &contextStore{
- ctx: ctx,
- cst: sp.cst,
- }
-
- sectorChanges := &MinerSectorChanges{
- Added: []miner.SectorOnChainInfo{},
- Extended: []SectorExtensions{},
- Removed: []miner.SectorOnChainInfo{},
- }
-
- // no sector changes
- if oldState.Sectors.Equals(newState.Sectors) {
- return false, nil, nil
- }
-
- oldSectors, err := adt.AsArray(ctxStore, oldState.Sectors)
+ return func(ctx context.Context, oldState, newState miner.State) (changed bool, user UserData, err error) {
+ sectorChanges, err := miner.DiffSectors(oldState, newState)
if err != nil {
return false, nil, err
}
-
- newSectors, err := adt.AsArray(ctxStore, newState.Sectors)
- if err != nil {
- return false, nil, err
- }
-
- if err := DiffAdtArray(oldSectors, newSectors, sectorChanges); err != nil {
- return false, nil, err
- }
-
// nothing changed
if len(sectorChanges.Added)+len(sectorChanges.Extended)+len(sectorChanges.Removed) == 0 {
return false, nil, nil
@@ -532,73 +352,13 @@ func (sp *StatePredicates) OnMinerSectorChange() DiffMinerActorStateFunc {
}
}
-type MinerPreCommitChanges struct {
- Added []miner.SectorPreCommitOnChainInfo
- Removed []miner.SectorPreCommitOnChainInfo
-}
-
-func (m *MinerPreCommitChanges) AsKey(key string) (adt.Keyer, error) {
- sector, err := adt.ParseUIntKey(key)
- if err != nil {
- return nil, err
- }
- return miner.SectorKey(abi.SectorNumber(sector)), nil
-}
-
-func (m *MinerPreCommitChanges) Add(key string, val *typegen.Deferred) error {
- sp := new(miner.SectorPreCommitOnChainInfo)
- err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Added = append(m.Added, *sp)
- return nil
-}
-
-func (m *MinerPreCommitChanges) Modify(key string, from, to *typegen.Deferred) error {
- return nil
-}
-
-func (m *MinerPreCommitChanges) Remove(key string, val *typegen.Deferred) error {
- sp := new(miner.SectorPreCommitOnChainInfo)
- err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
- if err != nil {
- return err
- }
- m.Removed = append(m.Removed, *sp)
- return nil
-}
-
func (sp *StatePredicates) OnMinerPreCommitChange() DiffMinerActorStateFunc {
- return func(ctx context.Context, oldState, newState *miner.State) (changed bool, user UserData, err error) {
- ctxStore := &contextStore{
- ctx: ctx,
- cst: sp.cst,
- }
-
- precommitChanges := &MinerPreCommitChanges{
- Added: []miner.SectorPreCommitOnChainInfo{},
- Removed: []miner.SectorPreCommitOnChainInfo{},
- }
-
- if oldState.PreCommittedSectors.Equals(newState.PreCommittedSectors) {
- return false, nil, nil
- }
-
- oldPrecommits, err := adt.AsMap(ctxStore, oldState.PreCommittedSectors)
+ return func(ctx context.Context, oldState, newState miner.State) (changed bool, user UserData, err error) {
+ precommitChanges, err := miner.DiffPreCommits(oldState, newState)
if err != nil {
return false, nil, err
}
- newPrecommits, err := adt.AsMap(ctxStore, newState.PreCommittedSectors)
- if err != nil {
- return false, nil, err
- }
-
- if err := DiffAdtMap(oldPrecommits, newPrecommits, precommitChanges); err != nil {
- return false, nil, err
- }
-
if len(precommitChanges.Added)+len(precommitChanges.Removed) == 0 {
return false, nil, nil
}
@@ -608,20 +368,20 @@ func (sp *StatePredicates) OnMinerPreCommitChange() DiffMinerActorStateFunc {
}
// DiffPaymentChannelStateFunc is function that compares two states for the payment channel
-type DiffPaymentChannelStateFunc func(ctx context.Context, oldState *paych.State, newState *paych.State) (changed bool, user UserData, err error)
+type DiffPaymentChannelStateFunc func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error)
// OnPaymentChannelActorChanged calls diffPaymentChannelState when the state changes for the the payment channel actor
func (sp *StatePredicates) OnPaymentChannelActorChanged(paychAddr address.Address, diffPaymentChannelState DiffPaymentChannelStateFunc) DiffTipSetKeyFunc {
- return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) {
- var oldState paych.State
- if err := sp.cst.Get(ctx, oldActorStateHead, &oldState); err != nil {
+ return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) {
+ oldState, err := paych.Load(adt.WrapStore(ctx, sp.cst), oldActorState)
+ if err != nil {
return false, nil, err
}
- var newState paych.State
- if err := sp.cst.Get(ctx, newActorStateHead, &newState); err != nil {
+ newState, err := paych.Load(adt.WrapStore(ctx, sp.cst), newActorState)
+ if err != nil {
return false, nil, err
}
- return diffPaymentChannelState(ctx, &oldState, &newState)
+ return diffPaymentChannelState(ctx, oldState, newState)
})
}
@@ -633,13 +393,23 @@ type PayChToSendChange struct {
// OnToSendAmountChanges monitors changes on the total amount to send from one party to the other on a payment channel
func (sp *StatePredicates) OnToSendAmountChanges() DiffPaymentChannelStateFunc {
- return func(ctx context.Context, oldState *paych.State, newState *paych.State) (changed bool, user UserData, err error) {
- if oldState.ToSend.Equals(newState.ToSend) {
+ return func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error) {
+ ots, err := oldState.ToSend()
+ if err != nil {
+ return false, nil, err
+ }
+
+ nts, err := newState.ToSend()
+ if err != nil {
+ return false, nil, err
+ }
+
+ if ots.Equals(nts) {
return false, nil, nil
}
return true, &PayChToSendChange{
- OldToSend: oldState.ToSend,
- NewToSend: newState.ToSend,
+ OldToSend: ots,
+ NewToSend: nts,
}, nil
}
}
@@ -660,14 +430,14 @@ type AddressChange struct {
To AddressPair
}
-type DiffInitActorStateFunc func(ctx context.Context, oldState *init_.State, newState *init_.State) (changed bool, user UserData, err error)
+type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error)
-func (i *InitActorAddressChanges) AsKey(key string) (adt.Keyer, error) {
+func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) {
addr, err := address.NewFromBytes([]byte(key))
if err != nil {
return nil, err
}
- return adt.AddrKey(addr), nil
+ return abi.AddrKey(addr), nil
}
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
@@ -748,33 +518,73 @@ func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) erro
}
func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc {
- return func(ctx context.Context, oldState, newState *init_.State) (changed bool, user UserData, err error) {
- ctxStore := &contextStore{
- ctx: ctx,
- cst: sp.cst,
- }
-
+ return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) {
addressChanges := &InitActorAddressChanges{
Added: []AddressPair{},
Modified: []AddressChange{},
Removed: []AddressPair{},
}
- if oldState.AddressMap.Equals(newState.AddressMap) {
- return false, nil, nil
- }
+ err = oldState.ForEachActor(func(oldId abi.ActorID, oldAddress address.Address) error {
+ oldIdAddress, err := address.NewIDAddress(uint64(oldId))
+ if err != nil {
+ return err
+ }
+
+ newIdAddress, found, err := newState.ResolveAddress(oldAddress)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ addressChanges.Removed = append(addressChanges.Removed, AddressPair{
+ ID: oldIdAddress,
+ PK: oldAddress,
+ })
+ }
+
+ if oldIdAddress != newIdAddress {
+ addressChanges.Modified = append(addressChanges.Modified, AddressChange{
+ From: AddressPair{
+ ID: oldIdAddress,
+ PK: oldAddress,
+ },
+ To: AddressPair{
+ ID: newIdAddress,
+ PK: oldAddress,
+ },
+ })
+ }
+
+ return nil
+ })
- oldAddrs, err := adt.AsMap(ctxStore, oldState.AddressMap)
if err != nil {
return false, nil, err
}
- newAddrs, err := adt.AsMap(ctxStore, newState.AddressMap)
- if err != nil {
- return false, nil, err
- }
+ err = newState.ForEachActor(func(newId abi.ActorID, newAddress address.Address) error {
+ newIdAddress, err := address.NewIDAddress(uint64(newId))
+ if err != nil {
+ return err
+ }
- if err := DiffAdtMap(oldAddrs, newAddrs, addressChanges); err != nil {
+ _, found, err := newState.ResolveAddress(newAddress)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ addressChanges.Added = append(addressChanges.Added, AddressPair{
+ ID: newIdAddress,
+ PK: newAddress,
+ })
+ }
+
+ return nil
+ })
+
+ if err != nil {
return false, nil, err
}
diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go
index 944b7e61c..461ac4997 100644
--- a/chain/events/state/predicates_test.go
+++ b/chain/events/state/predicates_test.go
@@ -4,6 +4,8 @@ import (
"context"
"testing"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
"github.com/filecoin-project/go-bitfield"
"github.com/stretchr/testify/require"
@@ -13,11 +15,14 @@ import (
cbornode "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/util/adt"
tutils "github.com/filecoin-project/specs-actors/support/testing"
@@ -69,22 +74,22 @@ func TestMarketPredicates(t *testing.T) {
bs := bstore.NewTemporarySync()
store := adt.WrapStore(ctx, cbornode.NewCborStore(bs))
- oldDeal1 := &market.DealState{
+ oldDeal1 := &market0.DealState{
SectorStartEpoch: 1,
LastUpdatedEpoch: 2,
SlashEpoch: 0,
}
- oldDeal2 := &market.DealState{
+ oldDeal2 := &market0.DealState{
SectorStartEpoch: 4,
LastUpdatedEpoch: 5,
SlashEpoch: 0,
}
- oldDeals := map[abi.DealID]*market.DealState{
+ oldDeals := map[abi.DealID]*market0.DealState{
abi.DealID(1): oldDeal1,
abi.DealID(2): oldDeal2,
}
- oldProp1 := &market.DealProposal{
+ oldProp1 := &market0.DealProposal{
PieceCID: dummyCid,
PieceSize: 0,
VerifiedDeal: false,
@@ -96,7 +101,7 @@ func TestMarketPredicates(t *testing.T) {
ProviderCollateral: big.Zero(),
ClientCollateral: big.Zero(),
}
- oldProp2 := &market.DealProposal{
+ oldProp2 := &market0.DealProposal{
PieceCID: dummyCid,
PieceSize: 0,
VerifiedDeal: false,
@@ -108,7 +113,7 @@ func TestMarketPredicates(t *testing.T) {
ProviderCollateral: big.Zero(),
ClientCollateral: big.Zero(),
}
- oldProps := map[abi.DealID]*market.DealProposal{
+ oldProps := map[abi.DealID]*market0.DealProposal{
abi.DealID(1): oldProp1,
abi.DealID(2): oldProp2,
}
@@ -122,7 +127,7 @@ func TestMarketPredicates(t *testing.T) {
oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances)
- newDeal1 := &market.DealState{
+ newDeal1 := &market0.DealState{
SectorStartEpoch: 1,
LastUpdatedEpoch: 3,
SlashEpoch: 0,
@@ -131,19 +136,19 @@ func TestMarketPredicates(t *testing.T) {
// deal 2 removed
// added
- newDeal3 := &market.DealState{
+ newDeal3 := &market0.DealState{
SectorStartEpoch: 1,
LastUpdatedEpoch: 2,
SlashEpoch: 3,
}
- newDeals := map[abi.DealID]*market.DealState{
+ newDeals := map[abi.DealID]*market0.DealState{
abi.DealID(1): newDeal1,
// deal 2 was removed
abi.DealID(3): newDeal3,
}
// added
- newProp3 := &market.DealProposal{
+ newProp3 := &market0.DealProposal{
PieceCID: dummyCid,
PieceSize: 0,
VerifiedDeal: false,
@@ -155,7 +160,7 @@ func TestMarketPredicates(t *testing.T) {
ProviderCollateral: big.Zero(),
ClientCollateral: big.Zero(),
}
- newProps := map[abi.DealID]*market.DealProposal{
+ newProps := map[abi.DealID]*market0.DealProposal{
abi.DealID(1): oldProp1, // 1 was persisted
// prop 2 was removed
abi.DealID(3): newProp3, // new
@@ -178,8 +183,8 @@ func TestMarketPredicates(t *testing.T) {
require.NoError(t, err)
api := newMockAPI(bs)
- api.setActor(oldState.Key(), &types.Actor{Head: oldStateC})
- api.setActor(newState.Key(), &types.Actor{Head: newStateC})
+ api.setActor(oldState.Key(), &types.Actor{Code: builtin0.StorageMarketActorCodeID, Head: oldStateC})
+ api.setActor(newState.Key(), &types.Actor{Code: builtin0.StorageMarketActorCodeID, Head: newStateC})
t.Run("deal ID predicate", func(t *testing.T) {
preds := NewStatePredicates(api)
@@ -221,7 +226,7 @@ func TestMarketPredicates(t *testing.T) {
// Test that OnActorStateChanged does not call the callback if the state has not changed
mockAddr, err := address.NewFromString("t01")
require.NoError(t, err)
- actorDiffFn := preds.OnActorStateChanged(mockAddr, func(context.Context, cid.Cid, cid.Cid) (bool, UserData, error) {
+ actorDiffFn := preds.OnActorStateChanged(mockAddr, func(context.Context, *types.Actor, *types.Actor) (bool, UserData, error) {
t.Fatal("No state change so this should not be called")
return false, nil, nil
})
@@ -230,11 +235,18 @@ func TestMarketPredicates(t *testing.T) {
require.False(t, changed)
// Test that OnDealStateChanged does not call the callback if the state has not changed
- diffDealStateFn := preds.OnDealStateChanged(func(context.Context, *adt.Array, *adt.Array) (bool, UserData, error) {
+ diffDealStateFn := preds.OnDealStateChanged(func(context.Context, market.DealStates, market.DealStates) (bool, UserData, error) {
t.Fatal("No state change so this should not be called")
return false, nil, nil
})
- marketState := createEmptyMarketState(t, store)
+ marketState0 := createEmptyMarketState(t, store)
+ marketCid, err := store.Put(ctx, marketState0)
+ require.NoError(t, err)
+ marketState, err := market.Load(store, &types.Actor{
+ Code: builtin0.StorageMarketActorCodeID,
+ Head: marketCid,
+ })
+ require.NoError(t, err)
changed, _, err = diffDealStateFn(ctx, marketState, marketState)
require.NoError(t, err)
require.False(t, changed)
@@ -252,18 +264,18 @@ func TestMarketPredicates(t *testing.T) {
require.NoError(t, err)
require.True(t, changed)
- changedDeals, ok := valArr.(*MarketDealStateChanges)
+ changedDeals, ok := valArr.(*market.DealStateChanges)
require.True(t, ok)
require.Len(t, changedDeals.Added, 1)
require.Equal(t, abi.DealID(3), changedDeals.Added[0].ID)
- require.Equal(t, *newDeal3, changedDeals.Added[0].Deal)
+ require.True(t, dealEquality(*newDeal3, changedDeals.Added[0].Deal))
require.Len(t, changedDeals.Removed, 1)
require.Len(t, changedDeals.Modified, 1)
require.Equal(t, abi.DealID(1), changedDeals.Modified[0].ID)
- require.Equal(t, newDeal1, changedDeals.Modified[0].To)
- require.Equal(t, oldDeal1, changedDeals.Modified[0].From)
+ require.True(t, dealEquality(*newDeal1, *changedDeals.Modified[0].To))
+ require.True(t, dealEquality(*oldDeal1, *changedDeals.Modified[0].From))
require.Equal(t, abi.DealID(2), changedDeals.Removed[0].ID)
})
@@ -279,17 +291,15 @@ func TestMarketPredicates(t *testing.T) {
require.NoError(t, err)
require.True(t, changed)
- changedProps, ok := valArr.(*MarketDealProposalChanges)
+ changedProps, ok := valArr.(*market.DealProposalChanges)
require.True(t, ok)
require.Len(t, changedProps.Added, 1)
require.Equal(t, abi.DealID(3), changedProps.Added[0].ID)
- require.Equal(t, *newProp3, changedProps.Added[0].Proposal)
// proposals cannot be modified -- no modified testing
require.Len(t, changedProps.Removed, 1)
require.Equal(t, abi.DealID(2), changedProps.Removed[0].ID)
- require.Equal(t, *oldProp2, changedProps.Removed[0].Proposal)
})
t.Run("balances predicate", func(t *testing.T) {
@@ -342,7 +352,14 @@ func TestMarketPredicates(t *testing.T) {
t.Fatal("No state change so this should not be called")
return false, nil, nil
})
- marketState := createEmptyMarketState(t, store)
+ marketState0 := createEmptyMarketState(t, store)
+ marketCid, err := store.Put(ctx, marketState0)
+ require.NoError(t, err)
+ marketState, err := market.Load(store, &types.Actor{
+ Code: builtin0.StorageMarketActorCodeID,
+ Head: marketCid,
+ })
+ require.NoError(t, err)
changed, _, err = diffDealBalancesFn(ctx, marketState, marketState)
require.NoError(t, err)
require.False(t, changed)
@@ -362,12 +379,12 @@ func TestMinerSectorChange(t *testing.T) {
}
owner, worker := nextIDAddrF(), nextIDAddrF()
- si0 := newSectorOnChainInfo(0, tutils.MakeCID("0", &miner.SealedCIDPrefix), big.NewInt(0), abi.ChainEpoch(0), abi.ChainEpoch(10))
- si1 := newSectorOnChainInfo(1, tutils.MakeCID("1", &miner.SealedCIDPrefix), big.NewInt(1), abi.ChainEpoch(1), abi.ChainEpoch(11))
- si2 := newSectorOnChainInfo(2, tutils.MakeCID("2", &miner.SealedCIDPrefix), big.NewInt(2), abi.ChainEpoch(2), abi.ChainEpoch(11))
+ si0 := newSectorOnChainInfo(0, tutils.MakeCID("0", &miner0.SealedCIDPrefix), big.NewInt(0), abi.ChainEpoch(0), abi.ChainEpoch(10))
+ si1 := newSectorOnChainInfo(1, tutils.MakeCID("1", &miner0.SealedCIDPrefix), big.NewInt(1), abi.ChainEpoch(1), abi.ChainEpoch(11))
+ si2 := newSectorOnChainInfo(2, tutils.MakeCID("2", &miner0.SealedCIDPrefix), big.NewInt(2), abi.ChainEpoch(2), abi.ChainEpoch(11))
oldMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si0, si1, si2})
- si3 := newSectorOnChainInfo(3, tutils.MakeCID("3", &miner.SealedCIDPrefix), big.NewInt(3), abi.ChainEpoch(3), abi.ChainEpoch(12))
+ si3 := newSectorOnChainInfo(3, tutils.MakeCID("3", &miner0.SealedCIDPrefix), big.NewInt(3), abi.ChainEpoch(3), abi.ChainEpoch(12))
// 0 delete
// 1 extend
// 2 same
@@ -383,8 +400,8 @@ func TestMinerSectorChange(t *testing.T) {
require.NoError(t, err)
api := newMockAPI(bs)
- api.setActor(oldState.Key(), &types.Actor{Head: oldMinerC})
- api.setActor(newState.Key(), &types.Actor{Head: newMinerC})
+ api.setActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin0.StorageMinerActorCodeID})
+ api.setActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin0.StorageMinerActorCodeID})
preds := NewStatePredicates(api)
@@ -394,7 +411,7 @@ func TestMinerSectorChange(t *testing.T) {
require.True(t, change)
require.NotNil(t, val)
- sectorChanges, ok := val.(*MinerSectorChanges)
+ sectorChanges, ok := val.(*miner.SectorChanges)
require.True(t, ok)
require.Equal(t, len(sectorChanges.Added), 1)
@@ -418,7 +435,7 @@ func TestMinerSectorChange(t *testing.T) {
require.True(t, change)
require.NotNil(t, val)
- sectorChanges, ok = val.(*MinerSectorChanges)
+ sectorChanges, ok = val.(*miner.SectorChanges)
require.True(t, ok)
require.Equal(t, 1, len(sectorChanges.Added))
@@ -450,7 +467,7 @@ type balance struct {
locked abi.TokenAmount
}
-func createMarketState(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market.DealState, props map[abi.DealID]*market.DealProposal, balances map[address.Address]balance) cid.Cid {
+func createMarketState(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market0.DealState, props map[abi.DealID]*market0.DealProposal, balances map[address.Address]balance) cid.Cid {
dealRootCid := createDealAMT(ctx, t, store, deals)
propRootCid := createProposalAMT(ctx, t, store, props)
balancesCids := createBalanceTable(ctx, t, store, balances)
@@ -465,15 +482,15 @@ func createMarketState(ctx context.Context, t *testing.T, store adt.Store, deals
return stateC
}
-func createEmptyMarketState(t *testing.T, store adt.Store) *market.State {
+func createEmptyMarketState(t *testing.T, store adt.Store) *market0.State {
emptyArrayCid, err := adt.MakeEmptyArray(store).Root()
require.NoError(t, err)
emptyMap, err := adt.MakeEmptyMap(store).Root()
require.NoError(t, err)
- return market.ConstructState(emptyArrayCid, emptyMap, emptyMap)
+ return market0.ConstructState(emptyArrayCid, emptyMap, emptyMap)
}
-func createDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market.DealState) cid.Cid {
+func createDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market0.DealState) cid.Cid {
root := adt.MakeEmptyArray(store)
for dealID, dealState := range deals {
err := root.Set(uint64(dealID), dealState)
@@ -484,7 +501,7 @@ func createDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map
return rootCid
}
-func createProposalAMT(ctx context.Context, t *testing.T, store adt.Store, props map[abi.DealID]*market.DealProposal) cid.Cid {
+func createProposalAMT(ctx context.Context, t *testing.T, store adt.Store, props map[abi.DealID]*market0.DealProposal) cid.Cid {
root := adt.MakeEmptyArray(store)
for dealID, prop := range props {
err := root.Set(uint64(dealID), prop)
@@ -532,20 +549,20 @@ func createMinerState(ctx context.Context, t *testing.T, store adt.Store, owner,
return stateC
}
-func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, owner, worker address.Address) *miner.State {
+func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, owner, worker address.Address) *miner0.State {
emptyArrayCid, err := adt.MakeEmptyArray(store).Root()
require.NoError(t, err)
emptyMap, err := adt.MakeEmptyMap(store).Root()
require.NoError(t, err)
- emptyDeadline, err := store.Put(store.Context(), miner.ConstructDeadline(emptyArrayCid))
+ emptyDeadline, err := store.Put(store.Context(), miner0.ConstructDeadline(emptyArrayCid))
require.NoError(t, err)
- emptyVestingFunds := miner.ConstructVestingFunds()
+ emptyVestingFunds := miner0.ConstructVestingFunds()
emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds)
require.NoError(t, err)
- emptyDeadlines := miner.ConstructDeadlines(emptyDeadline)
+ emptyDeadlines := miner0.ConstructDeadlines(emptyDeadline)
emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines)
require.NoError(t, err)
@@ -555,7 +572,7 @@ func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, o
emptyBitfieldCid, err := store.Put(store.Context(), emptyBitfield)
require.NoError(t, err)
- state, err := miner.ConstructState(minerInfo, 123, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid)
+ state, err := miner0.ConstructState(minerInfo, 123, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid)
require.NoError(t, err)
return state
@@ -564,7 +581,7 @@ func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, o
func createSectorsAMT(ctx context.Context, t *testing.T, store adt.Store, sectors []miner.SectorOnChainInfo) cid.Cid {
root := adt.MakeEmptyArray(store)
for _, sector := range sectors {
- sector := sector
+ sector := (miner0.SectorOnChainInfo)(sector)
err := root.Set(uint64(sector.SectorNumber), §or)
require.NoError(t, err)
}
@@ -597,8 +614,8 @@ const (
)
// returns a unique SectorPreCommitInfo with each invocation with SectorNumber set to `sectorNo`.
-func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiration abi.ChainEpoch) *miner.SectorPreCommitInfo {
- return &miner.SectorPreCommitInfo{
+func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiration abi.ChainEpoch) *miner0.SectorPreCommitInfo {
+ return &miner0.SectorPreCommitInfo{
SealProof: abi.RegisteredSealProof_StackedDrg32GiBV1,
SectorNumber: sectorNo,
SealedCID: sealed,
@@ -607,3 +624,9 @@ func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiratio
Expiration: expiration,
}
}
+
+func dealEquality(expected market0.DealState, actual market.DealState) bool {
+ return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch &&
+ expected.SectorStartEpoch == actual.SectorStartEpoch &&
+ expected.SlashEpoch == actual.SlashEpoch
+}
diff --git a/chain/events/tscache.go b/chain/events/tscache.go
index 3852c9930..d47c71480 100644
--- a/chain/events/tscache.go
+++ b/chain/events/tscache.go
@@ -3,13 +3,16 @@ package events
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
)
-type tsByHFunc func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
+type tsCacheAPI interface {
+ ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
+ ChainHead(context.Context) (*types.TipSet, error)
+}
// tipSetCache implements a simple ring-buffer cache to keep track of recent
// tipsets
@@ -18,10 +21,10 @@ type tipSetCache struct {
start int
len int
- storage tsByHFunc
+ storage tsCacheAPI
}
-func newTSCache(cap abi.ChainEpoch, storage tsByHFunc) *tipSetCache {
+func newTSCache(cap abi.ChainEpoch, storage tsCacheAPI) *tipSetCache {
return &tipSetCache{
cache: make([]*types.TipSet, cap),
start: 0,
@@ -94,7 +97,7 @@ func (tsc *tipSetCache) getNonNull(height abi.ChainEpoch) (*types.TipSet, error)
func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) {
if tsc.len == 0 {
log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height)
- return tsc.storage(context.TODO(), height, types.EmptyTSK)
+ return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, types.EmptyTSK)
}
headH := tsc.cache[tsc.start].Height()
@@ -114,14 +117,18 @@ func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) {
if height < tail.Height() {
log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tail.Height())
- return tsc.storage(context.TODO(), height, tail.Key())
+ return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, tail.Key())
}
return tsc.cache[normalModulo(tsc.start-int(headH-height), clen)], nil
}
-func (tsc *tipSetCache) best() *types.TipSet {
- return tsc.cache[tsc.start]
+func (tsc *tipSetCache) best() (*types.TipSet, error) {
+ best := tsc.cache[tsc.start]
+ if best == nil {
+ return tsc.storage.ChainHead(context.TODO())
+ }
+ return best, nil
}
func normalModulo(n, m int) int {
diff --git a/chain/events/tscache_test.go b/chain/events/tscache_test.go
index 1278e58e9..ab6336f24 100644
--- a/chain/events/tscache_test.go
+++ b/chain/events/tscache_test.go
@@ -4,8 +4,8 @@ import (
"context"
"testing"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
@@ -13,10 +13,7 @@ import (
)
func TestTsCache(t *testing.T) {
- tsc := newTSCache(50, func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) {
- t.Fatal("storage call")
- return &types.TipSet{}, nil
- })
+ tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t})
h := abi.ChainEpoch(75)
@@ -43,7 +40,12 @@ func TestTsCache(t *testing.T) {
for i := 0; i < 9000; i++ {
if i%90 > 60 {
- if err := tsc.revert(tsc.best()); err != nil {
+ best, err := tsc.best()
+ if err != nil {
+ t.Fatal(err, "; i:", i)
+ return
+ }
+ if err := tsc.revert(best); err != nil {
t.Fatal(err, "; i:", i)
return
}
@@ -55,11 +57,21 @@ func TestTsCache(t *testing.T) {
}
+type tsCacheAPIFailOnStorageCall struct {
+ t *testing.T
+}
+
+func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
+ tc.t.Fatal("storage call")
+ return &types.TipSet{}, nil
+}
+func (tc *tsCacheAPIFailOnStorageCall) ChainHead(ctx context.Context) (*types.TipSet, error) {
+ tc.t.Fatal("storage call")
+ return &types.TipSet{}, nil
+}
+
func TestTsCacheNulls(t *testing.T) {
- tsc := newTSCache(50, func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) {
- t.Fatal("storage call")
- return &types.TipSet{}, nil
- })
+ tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t})
h := abi.ChainEpoch(75)
@@ -91,7 +103,9 @@ func TestTsCacheNulls(t *testing.T) {
add()
add()
- require.Equal(t, h-1, tsc.best().Height())
+ best, err := tsc.best()
+ require.NoError(t, err)
+ require.Equal(t, h-1, best.Height())
ts, err := tsc.get(h - 1)
require.NoError(t, err)
@@ -109,9 +123,17 @@ func TestTsCacheNulls(t *testing.T) {
require.NoError(t, err)
require.Equal(t, h-8, ts.Height())
- require.NoError(t, tsc.revert(tsc.best()))
- require.NoError(t, tsc.revert(tsc.best()))
- require.Equal(t, h-8, tsc.best().Height())
+ best, err = tsc.best()
+ require.NoError(t, err)
+ require.NoError(t, tsc.revert(best))
+
+ best, err = tsc.best()
+ require.NoError(t, err)
+ require.NoError(t, tsc.revert(best))
+
+ best, err = tsc.best()
+ require.NoError(t, err)
+ require.Equal(t, h-8, best.Height())
h += 50
add()
@@ -120,3 +142,27 @@ func TestTsCacheNulls(t *testing.T) {
require.NoError(t, err)
require.Equal(t, h-1, ts.Height())
}
+
+type tsCacheAPIStorageCallCounter struct {
+ t *testing.T
+ chainGetTipSetByHeight int
+ chainHead int
+}
+
+func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
+ tc.chainGetTipSetByHeight++
+ return &types.TipSet{}, nil
+}
+func (tc *tsCacheAPIStorageCallCounter) ChainHead(ctx context.Context) (*types.TipSet, error) {
+ tc.chainHead++
+ return &types.TipSet{}, nil
+}
+
+func TestTsCacheEmpty(t *testing.T) {
+ // Calling best on an empty cache should just call out to the chain API
+ callCounter := &tsCacheAPIStorageCallCounter{t: t}
+ tsc := newTSCache(50, callCounter)
+ _, err := tsc.best()
+ require.NoError(t, err)
+ require.Equal(t, 1, callCounter.chainHead)
+}
diff --git a/chain/blocksync/cbor_gen.go b/chain/exchange/cbor_gen.go
similarity index 97%
rename from chain/blocksync/cbor_gen.go
rename to chain/exchange/cbor_gen.go
index cd43f4a64..29b258081 100644
--- a/chain/blocksync/cbor_gen.go
+++ b/chain/exchange/cbor_gen.go
@@ -1,6 +1,6 @@
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
-package blocksync
+package exchange
import (
"fmt"
@@ -146,7 +146,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error {
scratch := make([]byte, 9)
- // t.Status (blocksync.status) (uint64)
+ // t.Status (exchange.status) (uint64)
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil {
return err
@@ -164,7 +164,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error {
return err
}
- // t.Chain ([]*blocksync.BSTipSet) (slice)
+ // t.Chain ([]*exchange.BSTipSet) (slice)
if len(t.Chain) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Chain was too long")
}
@@ -198,7 +198,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) error {
return fmt.Errorf("cbor input had wrong number of fields")
}
- // t.Status (blocksync.status) (uint64)
+ // t.Status (exchange.status) (uint64)
{
@@ -222,7 +222,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) error {
t.ErrorMessage = string(sval)
}
- // t.Chain ([]*blocksync.BSTipSet) (slice)
+ // t.Chain ([]*exchange.BSTipSet) (slice)
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
@@ -567,7 +567,7 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error {
}
}
- // t.Messages (blocksync.CompactedMessages) (struct)
+ // t.Messages (exchange.CompactedMessages) (struct)
if err := t.Messages.MarshalCBOR(w); err != nil {
return err
}
@@ -621,7 +621,7 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error {
t.Blocks[i] = &v
}
- // t.Messages (blocksync.CompactedMessages) (struct)
+ // t.Messages (exchange.CompactedMessages) (struct)
{
diff --git a/chain/blocksync/client.go b/chain/exchange/client.go
similarity index 62%
rename from chain/blocksync/client.go
rename to chain/exchange/client.go
index 38e1f6d2c..cb030bcf7 100644
--- a/chain/blocksync/client.go
+++ b/chain/exchange/client.go
@@ -1,4 +1,4 @@
-package blocksync
+package exchange
import (
"bufio"
@@ -7,13 +7,17 @@ import (
"math/rand"
"time"
- host "github.com/libp2p/go-libp2p-core/host"
- inet "github.com/libp2p/go-libp2p-core/network"
+ "github.com/libp2p/go-libp2p-core/helpers"
+ "github.com/libp2p/go-libp2p-core/host"
+ "github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
+
"go.opencensus.io/trace"
+ "go.uber.org/fx"
"golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util"
+
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@@ -21,11 +25,9 @@ import (
"github.com/filecoin-project/lotus/lib/peermgr"
)
-// Protocol client.
-// FIXME: Rename to just `Client`. Not done at the moment to avoid
-// disrupting too much of the consumer code, should be done along
-// https://github.com/filecoin-project/lotus/issues/2612.
-type BlockSync struct {
+// client implements exchange.Client, using the libp2p ChainExchange protocol
+// as the fetching mechanism.
+type client struct {
// Connection manager used to contact the server.
// FIXME: We should have a reduced interface here, initialized
// just with our protocol ID, we shouldn't be able to open *any*
@@ -35,13 +37,14 @@ type BlockSync struct {
peerTracker *bsPeerTracker
}
-func NewClient(
- host host.Host,
- pmgr peermgr.MaybePeerMgr,
-) *BlockSync {
- return &BlockSync{
+var _ Client = (*client)(nil)
+
+// NewClient creates a new libp2p-based exchange.Client that uses the libp2p
+// ChainExhange protocol as the fetching mechanism.
+func NewClient(lc fx.Lifecycle, host host.Host, pmgr peermgr.MaybePeerMgr) Client {
+ return &client{
host: host,
- peerTracker: newPeerTracker(pmgr.Mgr),
+ peerTracker: newPeerTracker(lc, host, pmgr.Mgr),
}
}
@@ -62,10 +65,14 @@ func NewClient(
// request options without disrupting external calls. In the future the
// consumers should be forced to use a more standardized service and
// adhere to a single API derived from this function.
-func (client *BlockSync) doRequest(
+func (c *client) doRequest(
ctx context.Context,
req *Request,
singlePeer *peer.ID,
+ // In the `GetChainMessages` case, we won't request the headers but we still
+ // need them to check the integrity of the `CompactedMessages` in the response
+ // so the tipset blocks need to be provided by the caller.
+ tipsets []*types.TipSet,
) (*validatedResponse, error) {
// Validate request.
if req.Length == 0 {
@@ -86,7 +93,7 @@ func (client *BlockSync) doRequest(
if singlePeer != nil {
peers = []peer.ID{*singlePeer}
} else {
- peers = client.getShuffledPeers()
+ peers = c.getShuffledPeers()
if len(peers) == 0 {
return nil, xerrors.Errorf("no peers available")
}
@@ -107,25 +114,25 @@ func (client *BlockSync) doRequest(
}
// Send request, read response.
- res, err := client.sendRequestToPeer(ctx, peer, req)
+ res, err := c.sendRequestToPeer(ctx, peer, req)
if err != nil {
- if !xerrors.Is(err, inet.ErrNoConn) {
- log.Warnf("could not connect to peer %s: %s",
+ if !xerrors.Is(err, network.ErrNoConn) {
+ log.Warnf("could not send request to peer %s: %s",
peer.String(), err)
}
continue
}
// Process and validate response.
- validRes, err := client.processResponse(req, res)
+ validRes, err := c.processResponse(req, res, tipsets)
if err != nil {
log.Warnf("processing peer %s response failed: %s",
peer.String(), err)
continue
}
- client.peerTracker.logGlobalSuccess(build.Clock.Since(globalTime))
- client.host.ConnManager().TagPeer(peer, "bsync", SUCCESS_PEER_TAG_VALUE)
+ c.peerTracker.logGlobalSuccess(build.Clock.Since(globalTime))
+ c.host.ConnManager().TagPeer(peer, "bsync", SuccessPeerTagValue)
return validRes, nil
}
@@ -144,11 +151,8 @@ func (client *BlockSync) doRequest(
// We are conflating in the single error returned both status and validation
// errors. Peer penalization should happen here then, before returning, so
// we can apply the correct penalties depending on the cause of the error.
-func (client *BlockSync) processResponse(
- req *Request,
- res *Response,
- // FIXME: Add the `peer` as argument once we implement penalties.
-) (*validatedResponse, error) {
+// FIXME: Add the `peer` as argument once we implement penalties.
+func (c *client) processResponse(req *Request, res *Response, tipsets []*types.TipSet) (*validatedResponse, error) {
err := res.statusToError()
if err != nil {
return nil, xerrors.Errorf("status error: %s", err)
@@ -180,6 +184,16 @@ func (client *BlockSync) processResponse(
// Check for valid block sets and extract them into `TipSet`s.
validRes.tipsets = make([]*types.TipSet, resLength)
for i := 0; i < resLength; i++ {
+ if res.Chain[i] == nil {
+ return nil, xerrors.Errorf("response with nil tipset in pos %d", i)
+ }
+ for blockIdx, block := range res.Chain[i].Blocks {
+ if block == nil {
+ return nil, xerrors.Errorf("tipset with nil block in pos %d", blockIdx)
+ // FIXME: Maybe we should move this check to `NewTipSet`.
+ }
+ }
+
validRes.tipsets[i], err = types.NewTipSet(res.Chain[i].Blocks)
if err != nil {
return nil, xerrors.Errorf("invalid tipset blocks at height (head - %d): %w", i, err)
@@ -214,31 +228,28 @@ func (client *BlockSync) processResponse(
// If the headers were also returned check that the compression
// indexes are valid before `toFullTipSets()` is called by the
// consumer.
- for tipsetIdx := 0; tipsetIdx < resLength; tipsetIdx++ {
- msgs := res.Chain[tipsetIdx].Messages
- blocksNum := len(res.Chain[tipsetIdx].Blocks)
- if len(msgs.BlsIncludes) != blocksNum {
- return nil, xerrors.Errorf("BlsIncludes (%d) does not match number of blocks (%d)",
- len(msgs.BlsIncludes), blocksNum)
- }
- if len(msgs.SecpkIncludes) != blocksNum {
- return nil, xerrors.Errorf("SecpkIncludes (%d) does not match number of blocks (%d)",
- len(msgs.SecpkIncludes), blocksNum)
- }
- for blockIdx := 0; blockIdx < blocksNum; blockIdx++ {
- for _, mi := range msgs.BlsIncludes[blockIdx] {
- if int(mi) >= len(msgs.Bls) {
- return nil, xerrors.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)",
- mi, len(msgs.Bls))
- }
- }
- for _, mi := range msgs.SecpkIncludes[blockIdx] {
- if int(mi) >= len(msgs.Secpk) {
- return nil, xerrors.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)",
- mi, len(msgs.Secpk))
- }
- }
+ err := c.validateCompressedIndices(res.Chain)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // If we didn't request the headers they should have been provided
+ // by the caller.
+ if len(tipsets) < len(res.Chain) {
+ return nil, xerrors.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets))
+ }
+ chain := make([]*BSTipSet, 0, resLength)
+ for i, resChain := range res.Chain {
+ next := &BSTipSet{
+ Blocks: tipsets[i].Blocks(),
+ Messages: resChain.Messages,
}
+ chain = append(chain, next)
+ }
+
+ err := c.validateCompressedIndices(chain)
+ if err != nil {
+ return nil, err
}
}
}
@@ -246,16 +257,44 @@ func (client *BlockSync) processResponse(
return validRes, nil
}
-// GetBlocks fetches count blocks from the network, from the provided tipset
-// *backwards*, returning as many tipsets as count.
-//
-// {hint/usage}: This is used by the Syncer during normal chain syncing and when
-// resolving forks.
-func (client *BlockSync) GetBlocks(
- ctx context.Context,
- tsk types.TipSetKey,
- count int,
-) ([]*types.TipSet, error) {
+func (c *client) validateCompressedIndices(chain []*BSTipSet) error {
+ resLength := len(chain)
+ for tipsetIdx := 0; tipsetIdx < resLength; tipsetIdx++ {
+ msgs := chain[tipsetIdx].Messages
+ blocksNum := len(chain[tipsetIdx].Blocks)
+
+ if len(msgs.BlsIncludes) != blocksNum {
+ return xerrors.Errorf("BlsIncludes (%d) does not match number of blocks (%d)",
+ len(msgs.BlsIncludes), blocksNum)
+ }
+
+ if len(msgs.SecpkIncludes) != blocksNum {
+ return xerrors.Errorf("SecpkIncludes (%d) does not match number of blocks (%d)",
+ len(msgs.SecpkIncludes), blocksNum)
+ }
+
+ for blockIdx := 0; blockIdx < blocksNum; blockIdx++ {
+ for _, mi := range msgs.BlsIncludes[blockIdx] {
+ if int(mi) >= len(msgs.Bls) {
+ return xerrors.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)",
+ mi, len(msgs.Bls))
+ }
+ }
+
+ for _, mi := range msgs.SecpkIncludes[blockIdx] {
+ if int(mi) >= len(msgs.Secpk) {
+ return xerrors.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)",
+ mi, len(msgs.Secpk))
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// GetBlocks implements Client.GetBlocks(). Refer to the godocs there.
+func (c *client) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks")
defer span.End()
if span.IsRecordingEvents() {
@@ -271,7 +310,7 @@ func (client *BlockSync) GetBlocks(
Options: Headers,
}
- validRes, err := client.doRequest(ctx, req, nil)
+ validRes, err := c.doRequest(ctx, req, nil, nil)
if err != nil {
return nil, err
}
@@ -279,11 +318,8 @@ func (client *BlockSync) GetBlocks(
return validRes.tipsets, nil
}
-func (client *BlockSync) GetFullTipSet(
- ctx context.Context,
- peer peer.ID,
- tsk types.TipSetKey,
-) (*store.FullTipSet, error) {
+// GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there.
+func (c *client) GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
// TODO: round robin through these peers on error
req := &Request{
@@ -292,7 +328,7 @@ func (client *BlockSync) GetFullTipSet(
Options: Headers | Messages,
}
- validRes, err := client.doRequest(ctx, req, &peer)
+ validRes, err := c.doRequest(ctx, req, &peer, nil)
if err != nil {
return nil, err
}
@@ -302,11 +338,11 @@ func (client *BlockSync) GetFullTipSet(
// *one* tipset here, so it's safe to index directly.
}
-func (client *BlockSync) GetChainMessages(
- ctx context.Context,
- head *types.TipSet,
- length uint64,
-) ([]*CompactedMessages, error) {
+// GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there.
+func (c *client) GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*CompactedMessages, error) {
+ head := tipsets[0]
+ length := uint64(len(tipsets))
+
ctx, span := trace.StartSpan(ctx, "GetChainMessages")
if span.IsRecordingEvents() {
span.AddAttributes(
@@ -322,7 +358,7 @@ func (client *BlockSync) GetChainMessages(
Options: Messages,
}
- validRes, err := client.doRequest(ctx, req, nil)
+ validRes, err := c.doRequest(ctx, req, nil, tipsets)
if err != nil {
return nil, err
}
@@ -333,11 +369,7 @@ func (client *BlockSync) GetChainMessages(
// Send a request to a peer. Write request in the stream and read the
// response back. We do not do any processing of the request/response
// here.
-func (client *BlockSync) sendRequestToPeer(
- ctx context.Context,
- peer peer.ID,
- req *Request,
-) (_ *Response, err error) {
+func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Request) (_ *Response, err error) {
// Trace code.
ctx, span := trace.StartSpan(ctx, "sendRequestToPeer")
defer span.End()
@@ -358,34 +390,39 @@ func (client *BlockSync) sendRequestToPeer(
}()
// -- TRACE --
- supported, err := client.host.Peerstore().SupportsProtocols(peer, BlockSyncProtocolID)
+ supported, err := c.host.Peerstore().SupportsProtocols(peer, BlockSyncProtocolID, ChainExchangeProtocolID)
if err != nil {
+ c.RemovePeer(peer)
return nil, xerrors.Errorf("failed to get protocols for peer: %w", err)
}
- if len(supported) == 0 || supported[0] != BlockSyncProtocolID {
- return nil, xerrors.Errorf("peer %s does not support protocol %s",
- peer, BlockSyncProtocolID)
- // FIXME: `ProtoBook` should support a *single* protocol check that returns
- // a bool instead of a list.
+ if len(supported) == 0 || (supported[0] != BlockSyncProtocolID && supported[0] != ChainExchangeProtocolID) {
+ return nil, xerrors.Errorf("peer %s does not support protocols %s",
+ peer, []string{BlockSyncProtocolID, ChainExchangeProtocolID})
}
connectionStart := build.Clock.Now()
// Open stream to peer.
- stream, err := client.host.NewStream(
- inet.WithNoDial(ctx, "should already have connection"),
+ stream, err := c.host.NewStream(
+ network.WithNoDial(ctx, "should already have connection"),
peer,
- BlockSyncProtocolID)
+ ChainExchangeProtocolID, BlockSyncProtocolID)
if err != nil {
- client.RemovePeer(peer)
+ c.RemovePeer(peer)
return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
}
+ defer func() {
+ // Note: this will become just stream.Close once we've completed the go-libp2p migration to
+ // go-libp2p-core 0.7.0
+ go helpers.FullClose(stream) //nolint:errcheck
+ }()
+
// Write request.
- _ = stream.SetWriteDeadline(time.Now().Add(WRITE_REQ_DEADLINE))
+ _ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline))
if err := cborutil.WriteCborRPC(stream, req); err != nil {
_ = stream.SetWriteDeadline(time.Time{})
- client.peerTracker.logFailure(peer, build.Clock.Since(connectionStart))
+ c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length)
// FIXME: Should we also remove peer here?
return nil, err
}
@@ -395,11 +432,11 @@ func (client *BlockSync) sendRequestToPeer(
// Read response.
var res Response
err = cborutil.ReadCborRPC(
- bufio.NewReader(incrt.New(stream, READ_RES_MIN_SPEED, READ_RES_DEADLINE)),
+ bufio.NewReader(incrt.New(stream, ReadResMinSpeed, ReadResDeadline)),
&res)
if err != nil {
- client.peerTracker.logFailure(peer, build.Clock.Since(connectionStart))
- return nil, xerrors.Errorf("failed to read blocksync response: %w", err)
+ c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length)
+ return nil, xerrors.Errorf("failed to read chainxchg response: %w", err)
}
// FIXME: Move all this together at the top using a defer as done elsewhere.
@@ -412,32 +449,34 @@ func (client *BlockSync) sendRequestToPeer(
)
}
- client.peerTracker.logSuccess(peer, build.Clock.Since(connectionStart))
+ c.peerTracker.logSuccess(peer, build.Clock.Since(connectionStart), uint64(len(res.Chain)))
// FIXME: We should really log a success only after we validate the response.
// It might be a bit hard to do.
return &res, nil
}
-func (client *BlockSync) AddPeer(p peer.ID) {
- client.peerTracker.addPeer(p)
+// AddPeer implements Client.AddPeer(). Refer to the godocs there.
+func (c *client) AddPeer(p peer.ID) {
+ c.peerTracker.addPeer(p)
}
-func (client *BlockSync) RemovePeer(p peer.ID) {
- client.peerTracker.removePeer(p)
+// RemovePeer implements Client.RemovePeer(). Refer to the godocs there.
+func (c *client) RemovePeer(p peer.ID) {
+ c.peerTracker.removePeer(p)
}
// getShuffledPeers returns a preference-sorted set of peers (by latency
// and failure counting), shuffling the first few peers so we don't always
// pick the same peer.
// FIXME: Consider merging with `shufflePrefix()s`.
-func (client *BlockSync) getShuffledPeers() []peer.ID {
- peers := client.peerTracker.prefSortedPeers()
+func (c *client) getShuffledPeers() []peer.ID {
+ peers := c.peerTracker.prefSortedPeers()
shufflePrefix(peers)
return peers
}
func shufflePrefix(peers []peer.ID) {
- prefix := SHUFFLE_PEERS_PREFIX
+ prefix := ShufflePeersPrefix
if len(peers) < prefix {
prefix = len(peers)
}
diff --git a/chain/exchange/doc.go b/chain/exchange/doc.go
new file mode 100644
index 000000000..b20ee0c1f
--- /dev/null
+++ b/chain/exchange/doc.go
@@ -0,0 +1,19 @@
+// Package exchange contains the ChainExchange server and client components.
+//
+// ChainExchange is the basic chain synchronization protocol of Filecoin.
+// ChainExchange is an RPC-oriented protocol, with a single operation to
+// request blocks for now.
+//
+// A request contains a start anchor block (referred to with a CID), and a
+// amount of blocks requested beyond the anchor (including the anchor itself).
+//
+// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
+// two options at the moment:
+//
+// - include block contents
+// - include block messages
+//
+// The response will include a status code, an optional message, and the
+// response payload in case of success. The payload is a slice of serialized
+// tipsets.
+package exchange
diff --git a/chain/exchange/interfaces.go b/chain/exchange/interfaces.go
new file mode 100644
index 000000000..acc0854da
--- /dev/null
+++ b/chain/exchange/interfaces.go
@@ -0,0 +1,50 @@
+package exchange
+
+import (
+ "context"
+
+ "github.com/libp2p/go-libp2p-core/network"
+ "github.com/libp2p/go-libp2p-core/peer"
+
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+// Server is the responder side of the ChainExchange protocol. It accepts
+// requests from clients and services them by returning the requested
+// chain data.
+type Server interface {
+ // HandleStream is the protocol handler to be registered on a libp2p
+ // protocol router.
+ //
+ // In the current version of the protocol, streams are single-use. The
+ // server will read a single Request, and will respond with a single
+ // Response. It will dispose of the stream straight after.
+ HandleStream(stream network.Stream)
+}
+
+// Client is the requesting side of the ChainExchange protocol. It acts as
+// a proxy for other components to request chain data from peers. It is chiefly
+// used by the Syncer.
+type Client interface {
+ // GetBlocks fetches block headers from the network, from the provided
+ // tipset *backwards*, returning as many tipsets as the count parameter,
+ // or less.
+ GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error)
+
+ // GetChainMessages fetches messages from the network, starting from the first provided tipset
+ // and returning messages from as many tipsets as requested or less.
+ GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*CompactedMessages, error)
+
+ // GetFullTipSet fetches a full tipset from a given peer. If successful,
+ // the fetched object contains block headers and all messages in full form.
+ GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error)
+
+ // AddPeer adds a peer to the pool of peers that the Client requests
+ // data from.
+ AddPeer(peer peer.ID)
+
+ // RemovePeer removes a peer from the pool of peers that the Client
+ // requests data from.
+ RemovePeer(peer peer.ID)
+}
diff --git a/chain/blocksync/peer_tracker.go b/chain/exchange/peer_tracker.go
similarity index 78%
rename from chain/blocksync/peer_tracker.go
rename to chain/exchange/peer_tracker.go
index f1f6ede07..902baadce 100644
--- a/chain/blocksync/peer_tracker.go
+++ b/chain/exchange/peer_tracker.go
@@ -1,13 +1,16 @@
-package blocksync
+package exchange
// FIXME: This needs to be reviewed.
import (
+ "context"
"sort"
"sync"
"time"
+ host "github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
+ "go.uber.org/fx"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/lib/peermgr"
@@ -29,11 +32,30 @@ type bsPeerTracker struct {
pmgr *peermgr.PeerMgr
}
-func newPeerTracker(pmgr *peermgr.PeerMgr) *bsPeerTracker {
- return &bsPeerTracker{
+func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr *peermgr.PeerMgr) *bsPeerTracker {
+ bsPt := &bsPeerTracker{
peers: make(map[peer.ID]*peerStats),
pmgr: pmgr,
}
+
+ sub, err := h.EventBus().Subscribe(new(peermgr.NewFilPeer))
+ if err != nil {
+ panic(err)
+ }
+
+ go func() {
+ for newPeer := range sub.Out() {
+ bsPt.addPeer(newPeer.(peermgr.NewFilPeer).Id)
+ }
+ }()
+
+ lc.Append(fx.Hook{
+ OnStop: func(ctx context.Context) error {
+ return sub.Close()
+ },
+ })
+
+ return bsPt
}
func (bpt *bsPeerTracker) addPeer(p peer.ID) {
@@ -72,16 +94,7 @@ func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID {
var costI, costJ float64
getPeerInitLat := func(p peer.ID) float64 {
- var res float64
- if bpt.pmgr != nil {
- if lat, ok := bpt.pmgr.GetPeerLatency(p); ok {
- res = float64(lat)
- }
- }
- if res == 0 {
- res = float64(bpt.avgGlobalTime)
- }
- return res * newPeerMul
+ return float64(bpt.avgGlobalTime) * newPeerMul
}
if pi.successes+pi.failures > 0 {
@@ -107,8 +120,8 @@ func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID {
const (
// xInvAlpha = (N+1)/2
- localInvAlpha = 5 // 86% of the value is the last 9
- globalInvAlpha = 20 // 86% of the value is the last 39
+ localInvAlpha = 10 // 86% of the value is the last 19
+ globalInvAlpha = 25 // 86% of the value is the last 49
)
func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) {
@@ -133,7 +146,7 @@ func logTime(pi *peerStats, dur time.Duration) {
}
-func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration) {
+func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration, reqSize uint64) {
bpt.lk.Lock()
defer bpt.lk.Unlock()
@@ -145,10 +158,13 @@ func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration) {
}
pi.successes++
- logTime(pi, dur)
+ if reqSize == 0 {
+ reqSize = 1
+ }
+ logTime(pi, dur/time.Duration(reqSize))
}
-func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) {
+func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration, reqSize uint64) {
bpt.lk.Lock()
defer bpt.lk.Unlock()
@@ -160,7 +176,10 @@ func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) {
}
pi.failures++
- logTime(pi, dur)
+ if reqSize == 0 {
+ reqSize = 1
+ }
+ logTime(pi, dur/time.Duration(reqSize))
}
func (bpt *bsPeerTracker) removePeer(p peer.ID) {
diff --git a/chain/blocksync/protocol.go b/chain/exchange/protocol.go
similarity index 87%
rename from chain/blocksync/protocol.go
rename to chain/exchange/protocol.go
index 6a2861b80..211479335 100644
--- a/chain/blocksync/protocol.go
+++ b/chain/exchange/protocol.go
@@ -1,4 +1,4 @@
-package blocksync
+package exchange
import (
"time"
@@ -13,9 +13,17 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
-var log = logging.Logger("blocksync")
+var log = logging.Logger("chainxchg")
-const BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
+const (
+ // BlockSyncProtocolID is the protocol ID of the former blocksync protocol.
+ // Deprecated.
+ BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
+
+ // ChainExchangeProtocolID is the protocol ID of the chain exchange
+ // protocol.
+ ChainExchangeProtocolID = "/fil/chain/xchg/0.0.1"
+)
// FIXME: Bumped from original 800 to this to accommodate `syncFork()`
// use of `GetBlocks()`. It seems the expectation of that API is to
@@ -25,14 +33,16 @@ const BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
// qualifier to avoid "const initializer [...] is not a constant" error.)
var MaxRequestLength = uint64(build.ForkLengthThreshold)
-// Extracted constants from the code.
-// FIXME: Should be reviewed and confirmed.
-const SUCCESS_PEER_TAG_VALUE = 25
-const WRITE_REQ_DEADLINE = 5 * time.Second
-const READ_RES_DEADLINE = WRITE_REQ_DEADLINE
-const READ_RES_MIN_SPEED = 50 << 10
-const SHUFFLE_PEERS_PREFIX = 5
-const WRITE_RES_DEADLINE = 60 * time.Second
+const (
+ // Extracted constants from the code.
+ // FIXME: Should be reviewed and confirmed.
+ SuccessPeerTagValue = 25
+ WriteReqDeadline = 5 * time.Second
+ ReadResDeadline = WriteReqDeadline
+ ReadResMinSpeed = 50 << 10
+ ShufflePeersPrefix = 16
+ WriteResDeadline = 60 * time.Second
+)
// FIXME: Rename. Make private.
type Request struct {
@@ -117,7 +127,7 @@ func (res *Response) statusToError() error {
case NotFound:
return xerrors.Errorf("not found")
case GoAway:
- return xerrors.Errorf("not handling 'go away' blocksync responses yet")
+ return xerrors.Errorf("not handling 'go away' chainxchg responses yet")
case InternalError:
return xerrors.Errorf("block sync peer errored: %s", res.ErrorMessage)
case BadRequest:
@@ -129,6 +139,8 @@ func (res *Response) statusToError() error {
// FIXME: Rename.
type BSTipSet struct {
+ // List of blocks belonging to a single tipset to which the
+ // `CompactedMessages` are linked.
Blocks []*types.BlockHeader
Messages *CompactedMessages
}
diff --git a/chain/blocksync/server.go b/chain/exchange/server.go
similarity index 72%
rename from chain/blocksync/server.go
rename to chain/exchange/server.go
index ffdf79ad0..dcdb5b3a5 100644
--- a/chain/blocksync/server.go
+++ b/chain/exchange/server.go
@@ -1,4 +1,4 @@
-package blocksync
+package exchange
import (
"bufio"
@@ -15,44 +15,34 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/helpers"
inet "github.com/libp2p/go-libp2p-core/network"
)
-// BlockSyncService is the component that services BlockSync requests from
-// peers.
-//
-// BlockSync is the basic chain synchronization protocol of Filecoin. BlockSync
-// is an RPC-oriented protocol, with a single operation to request blocks.
-//
-// A request contains a start anchor block (referred to with a CID), and a
-// amount of blocks requested beyond the anchor (including the anchor itself).
-//
-// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
-// two options at the moment:
-//
-// - include block contents
-// - include block messages
-//
-// The response will include a status code, an optional message, and the
-// response payload in case of success. The payload is a slice of serialized
-// tipsets.
-// FIXME: Rename to just `Server` (will be done later, see note on `BlockSync`).
-type BlockSyncService struct {
+// server implements exchange.Server. It services requests for the
+// libp2p ChainExchange protocol.
+type server struct {
cs *store.ChainStore
}
-func NewBlockSyncService(cs *store.ChainStore) *BlockSyncService {
- return &BlockSyncService{
+var _ Server = (*server)(nil)
+
+// NewServer creates a new libp2p-based exchange.Server. It services requests
+// for the libp2p ChainExchange protocol.
+func NewServer(cs *store.ChainStore) Server {
+ return &server{
cs: cs,
}
}
-// Entry point of the service, handles `Request`s.
-func (server *BlockSyncService) HandleStream(stream inet.Stream) {
- ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream")
+// HandleStream implements Server.HandleStream. Refer to the godocs there.
+func (s *server) HandleStream(stream inet.Stream) {
+ ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream")
defer span.End()
- defer stream.Close() //nolint:errcheck
+ // Note: this will become just stream.Close once we've completed the go-libp2p migration to
+ // go-libp2p-core 0.7.0
+ defer helpers.FullClose(stream) //nolint:errcheck
var req Request
if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil {
@@ -62,13 +52,13 @@ func (server *BlockSyncService) HandleStream(stream inet.Stream) {
log.Infow("block sync request",
"start", req.Head, "len", req.Length)
- resp, err := server.processRequest(ctx, &req)
+ resp, err := s.processRequest(ctx, &req)
if err != nil {
log.Warn("failed to process request: ", err)
return
}
- _ = stream.SetDeadline(time.Now().Add(WRITE_RES_DEADLINE))
+ _ = stream.SetDeadline(time.Now().Add(WriteResDeadline))
if err := cborutil.WriteCborRPC(stream, resp); err != nil {
_ = stream.SetDeadline(time.Time{})
log.Warnw("failed to write back response for handle stream",
@@ -80,10 +70,7 @@ func (server *BlockSyncService) HandleStream(stream inet.Stream) {
// Validate and service the request. We return either a protocol
// response or an internal error.
-func (server *BlockSyncService) processRequest(
- ctx context.Context,
- req *Request,
-) (*Response, error) {
+func (s *server) processRequest(ctx context.Context, req *Request) (*Response, error) {
validReq, errResponse := validateRequest(ctx, req)
if errResponse != nil {
// The request did not pass validation, return the response
@@ -91,17 +78,14 @@ func (server *BlockSyncService) processRequest(
return errResponse, nil
}
- return server.serviceRequest(ctx, validReq)
+ return s.serviceRequest(ctx, validReq)
}
// Validate request. We either return a `validatedRequest`, or an error
// `Response` indicating why we can't process it. We do not return any
// internal errors here, we just signal protocol ones.
-func validateRequest(
- ctx context.Context,
- req *Request,
-) (*validatedRequest, *Response) {
- _, span := trace.StartSpan(ctx, "blocksync.ValidateRequest")
+func validateRequest(ctx context.Context, req *Request) (*validatedRequest, *Response) {
+ _, span := trace.StartSpan(ctx, "chainxchg.ValidateRequest")
defer span.End()
validReq := validatedRequest{}
@@ -147,14 +131,11 @@ func validateRequest(
return &validReq, nil
}
-func (server *BlockSyncService) serviceRequest(
- ctx context.Context,
- req *validatedRequest,
-) (*Response, error) {
- _, span := trace.StartSpan(ctx, "blocksync.ServiceRequest")
+func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Response, error) {
+ _, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest")
defer span.End()
- chain, err := collectChainSegment(server.cs, req)
+ chain, err := collectChainSegment(s.cs, req)
if err != nil {
log.Warn("block sync request: collectChainSegment failed: ", err)
return &Response{
@@ -174,10 +155,7 @@ func (server *BlockSyncService) serviceRequest(
}, nil
}
-func collectChainSegment(
- cs *store.ChainStore,
- req *validatedRequest,
-) ([]*BSTipSet, error) {
+func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) {
var bstips []*BSTipSet
cur := req.head
diff --git a/chain/gen/gen.go b/chain/gen/gen.go
index 4ba127ce9..ee93ef60e 100644
--- a/chain/gen/gen.go
+++ b/chain/gen/gen.go
@@ -8,11 +8,13 @@ import (
"sync/atomic"
"time"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+ "github.com/google/uuid"
+
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
@@ -26,6 +28,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/stmgr"
@@ -46,7 +49,7 @@ const msgsPerBlock = 20
//nolint:deadcode,varcheck
var log = logging.Logger("gen")
-var ValidWpostForTesting = []abi.PoStProof{{
+var ValidWpostForTesting = []proof.PoStProof{{
ProofBytes: []byte("valid proof"),
}}
@@ -57,7 +60,7 @@ type ChainGen struct {
cs *store.ChainStore
- beacon beacon.RandomBeacon
+ beacon beacon.Schedule
sm *stmgr.StateManager
@@ -119,9 +122,8 @@ var DefaultRemainderAccountActor = genesis.Actor{
}
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
- saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
+ // TODO: we really shouldn't modify a global variable here.
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
mr := repo.NewMemory(nil)
lr, err := mr.Lock(repo.StorageMiner)
@@ -223,7 +225,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
},
VerifregRootKey: DefaultVerifregRootkeyActor,
RemainderAccount: DefaultRemainderAccountActor,
- NetworkName: "",
+ NetworkName: uuid.New().String(),
Timestamp: uint64(build.Clock.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()),
}
@@ -250,7 +252,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
miners := []address.Address{maddr1, maddr2}
- beac := beacon.NewMockBeacon(time.Second)
+ beac := beacon.Schedule{{Start: 0, Beacon: beacon.NewMockBeacon(time.Second)}}
//beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelaySecs)
//if err != nil {
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
@@ -336,7 +338,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
prev := mbi.PrevBeaconEntry
- entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, prev)
+ entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, pts.Height(), prev)
if err != nil {
return nil, nil, nil, xerrors.Errorf("get beacon entries for block: %w", err)
}
@@ -356,7 +358,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
return nil, nil, nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
}
- if len(entries) == 0 {
+ if round > build.UpgradeSmokeHeight {
buf.Write(pts.MinTicket().VRFProof)
}
@@ -457,7 +459,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch,
- wpost []abi.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
+ wpost []proof.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
var ts uint64
if cg.Timestamper != nil {
@@ -487,13 +489,16 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticke
// ResyncBankerNonce is used for dealing with messages made when
// simulating forks
func (cg *ChainGen) ResyncBankerNonce(ts *types.TipSet) error {
- var act types.Actor
- err := cg.sm.WithParentState(ts, cg.sm.WithActor(cg.banker, stmgr.GetActor(&act)))
+ st, err := cg.sm.ParentState(ts)
+ if err != nil {
+ return err
+ }
+ act, err := st.GetActor(cg.banker)
if err != nil {
return err
}
-
cg.bankerNonce = act.Nonce
+
return nil
}
@@ -557,7 +562,7 @@ type mca struct {
w *wallet.LocalWallet
sm *stmgr.StateManager
pv ffiwrapper.Verifier
- bcn beacon.RandomBeacon
+ bcn beacon.Schedule
}
func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
@@ -588,7 +593,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
type WinningPoStProver interface {
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
- ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error)
+ ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error)
}
type wppProvider struct{}
@@ -597,7 +602,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
return []uint64{0}, nil
}
-func (wpp *wppProvider) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
+func (wpp *wppProvider) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) {
return ValidWpostForTesting, nil
}
@@ -664,15 +669,15 @@ type genFakeVerifier struct{}
var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil)
-func (m genFakeVerifier) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
+func (m genFakeVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
return true, nil
}
-func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
+func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}
-func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
+func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
panic("not supported")
}
diff --git a/chain/gen/gen_test.go b/chain/gen/gen_test.go
index 52766af7a..8c38328d0 100644
--- a/chain/gen/gen_test.go
+++ b/chain/gen/gen_test.go
@@ -3,22 +3,17 @@ package gen
import (
"testing"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
)
func init() {
- miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
- power.ConsensusMinerMinPower = big.NewInt(2048)
- verifreg.MinVerifiedDealSize = big.NewInt(256)
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
func testGeneration(t testing.TB, n int, msgs int, sectors int) {
diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go
index 05c7b1273..9f15ecaed 100644
--- a/chain/gen/genesis/genesis.go
+++ b/chain/gen/genesis/genesis.go
@@ -6,6 +6,8 @@ import (
"encoding/json"
"fmt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor"
@@ -14,14 +16,14 @@ import (
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
- "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ account0 "github.com/filecoin-project/specs-actors/actors/builtin/account"
+ multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/state"
@@ -114,7 +116,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("putting empty object: %w", err)
}
- state, err := state.NewStateTree(cst)
+ state, err := state.NewStateTree(cst, types.StateTreeVersion0)
if err != nil {
return nil, nil, xerrors.Errorf("making new state tree: %w", err)
}
@@ -125,7 +127,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
if err != nil {
return nil, nil, xerrors.Errorf("setup init actor: %w", err)
}
- if err := state.SetActor(builtin.SystemActorAddr, sysact); err != nil {
+ if err := state.SetActor(builtin0.SystemActorAddr, sysact); err != nil {
return nil, nil, xerrors.Errorf("set init actor: %w", err)
}
@@ -135,7 +137,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
if err != nil {
return nil, nil, xerrors.Errorf("setup init actor: %w", err)
}
- if err := state.SetActor(builtin.InitActorAddr, initact); err != nil {
+ if err := state.SetActor(builtin0.InitActorAddr, initact); err != nil {
return nil, nil, xerrors.Errorf("set init actor: %w", err)
}
@@ -146,7 +148,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("setup init actor: %w", err)
}
- err = state.SetActor(builtin.RewardActorAddr, rewact)
+ err = state.SetActor(builtin0.RewardActorAddr, rewact)
if err != nil {
return nil, nil, xerrors.Errorf("set network account actor: %w", err)
}
@@ -156,7 +158,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
if err != nil {
return nil, nil, xerrors.Errorf("setup cron actor: %w", err)
}
- if err := state.SetActor(builtin.CronActorAddr, cronact); err != nil {
+ if err := state.SetActor(builtin0.CronActorAddr, cronact); err != nil {
return nil, nil, xerrors.Errorf("set cron actor: %w", err)
}
@@ -165,7 +167,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
if err != nil {
return nil, nil, xerrors.Errorf("setup storage market actor: %w", err)
}
- if err := state.SetActor(builtin.StoragePowerActorAddr, spact); err != nil {
+ if err := state.SetActor(builtin0.StoragePowerActorAddr, spact); err != nil {
return nil, nil, xerrors.Errorf("set storage market actor: %w", err)
}
@@ -174,7 +176,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
if err != nil {
return nil, nil, xerrors.Errorf("setup storage market actor: %w", err)
}
- if err := state.SetActor(builtin.StorageMarketActorAddr, marketact); err != nil {
+ if err := state.SetActor(builtin0.StorageMarketActorAddr, marketact); err != nil {
return nil, nil, xerrors.Errorf("set market actor: %w", err)
}
@@ -183,20 +185,20 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
if err != nil {
return nil, nil, xerrors.Errorf("setup storage market actor: %w", err)
}
- if err := state.SetActor(builtin.VerifiedRegistryActorAddr, verifact); err != nil {
+ if err := state.SetActor(builtin0.VerifiedRegistryActorAddr, verifact); err != nil {
return nil, nil, xerrors.Errorf("set market actor: %w", err)
}
- burntRoot, err := cst.Put(ctx, &account.State{
- Address: builtin.BurntFundsActorAddr,
+ burntRoot, err := cst.Put(ctx, &account0.State{
+ Address: builtin0.BurntFundsActorAddr,
})
if err != nil {
return nil, nil, xerrors.Errorf("failed to setup burnt funds actor state: %w", err)
}
// Setup burnt-funds
- err = state.SetActor(builtin.BurntFundsActorAddr, &types.Actor{
- Code: builtin.AccountActorCodeID,
+ err = state.SetActor(builtin0.BurntFundsActorAddr, &types.Actor{
+ Code: builtin0.AccountActorCodeID,
Balance: types.NewInt(0),
Head: burntRoot,
})
@@ -261,13 +263,13 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, err
}
- verifierState, err := cst.Put(ctx, &account.State{Address: verifierAd})
+ verifierState, err := cst.Put(ctx, &account0.State{Address: verifierAd})
if err != nil {
return nil, nil, err
}
err = state.SetActor(verifierId, &types.Actor{
- Code: builtin.AccountActorCodeID,
+ Code: builtin0.AccountActorCodeID,
Balance: types.NewInt(0),
Head: verifierState,
})
@@ -295,14 +297,9 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("somehow overallocated filecoin (allocated = %s)", types.FIL(totalFilAllocated))
}
- remAccKey, err := address.NewIDAddress(90)
- if err != nil {
- return nil, nil, err
- }
-
template.RemainderAccount.Balance = remainingFil
- if err := createMultisigAccount(ctx, bs, cst, state, remAccKey, template.RemainderAccount, keyIDs); err != nil {
+ if err := createMultisigAccount(ctx, bs, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs); err != nil {
return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err)
}
@@ -314,7 +311,7 @@ func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.St
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
- st, err := cst.Put(ctx, &account.State{Address: ainfo.Owner})
+ st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner})
if err != nil {
return err
}
@@ -325,7 +322,7 @@ func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.St
}
err = state.SetActor(ida, &types.Actor{
- Code: builtin.AccountActorCodeID,
+ Code: builtin0.AccountActorCodeID,
Balance: info.Balance,
Head: st,
})
@@ -343,7 +340,7 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
- pending, err := adt.MakeEmptyMap(adt.WrapStore(ctx, cst)).Root()
+ pending, err := adt0.MakeEmptyMap(adt0.WrapStore(ctx, cst)).Root()
if err != nil {
return xerrors.Errorf("failed to create empty map: %v", err)
}
@@ -363,12 +360,12 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I
continue
}
- st, err := cst.Put(ctx, &account.State{Address: e})
+ st, err := cst.Put(ctx, &account0.State{Address: e})
if err != nil {
return err
}
err = state.SetActor(idAddress, &types.Actor{
- Code: builtin.AccountActorCodeID,
+ Code: builtin0.AccountActorCodeID,
Balance: types.NewInt(0),
Head: st,
})
@@ -378,7 +375,7 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I
signers = append(signers, idAddress)
}
- st, err := cst.Put(ctx, &multisig.State{
+ st, err := cst.Put(ctx, &multisig0.State{
Signers: signers,
NumApprovalsThreshold: uint64(ainfo.Threshold),
StartEpoch: abi.ChainEpoch(ainfo.VestingStart),
@@ -390,7 +387,7 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I
return err
}
err = state.SetActor(ida, &types.Actor{
- Code: builtin.MultisigActorCodeID,
+ Code: builtin0.MultisigActorCodeID,
Balance: info.Balance,
Head: st,
})
@@ -411,9 +408,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
Bstore: cs.Blockstore(),
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
CircSupplyCalc: nil,
+ NtwkVersion: genesisNetworkVersion,
BaseFee: types.NewInt(0),
}
- vm, err := vm.NewVM(&vmopt)
+ vm, err := vm.NewVM(ctx, &vmopt)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err)
}
@@ -440,7 +438,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
return cid.Undef, err
}
- _, err = doExecValue(ctx, vm, builtin.VerifiedRegistryActorAddr, verifregRoot, types.NewInt(0), builtin.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg.AddVerifierParams{
+ _, err = doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{
Address: verifier,
Allowance: abi.NewStoragePower(int64(sum)), // eh, close enough
@@ -451,7 +449,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
}
for c, amt := range verifNeeds {
- _, err := doExecValue(ctx, vm, builtin.VerifiedRegistryActorAddr, verifier, types.NewInt(0), builtin.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg.AddVerifiedClientParams{
+ _, err := doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{
Address: c,
Allowance: abi.NewStoragePower(int64(amt)),
}))
@@ -493,8 +491,8 @@ func MakeGenesisBlock(ctx context.Context, bs bstore.Blockstore, sys vm.SyscallB
return nil, xerrors.Errorf("setup miners failed: %w", err)
}
- store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
- emptyroot, err := adt.MakeEmptyArray(store).Root()
+ store := adt0.WrapStore(ctx, cbor.NewCborStore(bs))
+ emptyroot, err := adt0.MakeEmptyArray(store).Root()
if err != nil {
return nil, xerrors.Errorf("amt build failed: %w", err)
}
@@ -542,7 +540,7 @@ func MakeGenesisBlock(ctx context.Context, bs bstore.Blockstore, sys vm.SyscallB
}
b := &types.BlockHeader{
- Miner: builtin.SystemActorAddr,
+ Miner: builtin0.SystemActorAddr,
Ticket: genesisticket,
Parents: []cid.Cid{filecoinGenesisCid},
Height: 0,
diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go
index 1c3f717ad..1023e5efa 100644
--- a/chain/gen/genesis/miners.go
+++ b/chain/gen/genesis/miners.go
@@ -6,7 +6,13 @@ import (
"fmt"
"math/rand"
- "github.com/filecoin-project/lotus/chain/state"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
@@ -14,17 +20,17 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/filecoin-project/specs-actors/actors/runtime"
+ "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
@@ -68,10 +74,11 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
Bstore: cs.Blockstore(),
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
CircSupplyCalc: csc,
+ NtwkVersion: genesisNetworkVersion,
BaseFee: types.NewInt(0),
}
- vm, err := vm.NewVM(vmopt)
+ vm, err := vm.NewVM(ctx, vmopt)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err)
}
@@ -99,7 +106,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
}
{
- constructorParams := &power.CreateMinerParams{
+ constructorParams := &power0.CreateMinerParams{
Owner: m.Worker,
Worker: m.Worker,
Peer: []byte(m.PeerId),
@@ -107,12 +114,12 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
}
params := mustEnc(constructorParams)
- rval, err := doExecValue(ctx, vm, builtin.StoragePowerActorAddr, m.Owner, m.PowerBalance, builtin.MethodsPower.CreateMiner, params)
+ rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, builtin.MethodsPower.CreateMiner, params)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err)
}
- var ma power.CreateMinerReturn
+ var ma power0.CreateMinerReturn
if err := ma.UnmarshalCBOR(bytes.NewReader(rval)); err != nil {
return cid.Undef, xerrors.Errorf("unmarshaling CreateMinerReturn: %w", err)
}
@@ -123,9 +130,10 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
}
minerInfos[i].maddr = ma.IDAddress
- err = vm.MutateState(ctx, minerInfos[i].maddr, func(cst cbor.IpldStore, st *miner.State) error {
- maxPeriods := miner.MaxSectorExpirationExtension / miner.WPoStProvingPeriod
- minerInfos[i].presealExp = (maxPeriods-1)*miner.WPoStProvingPeriod + st.ProvingPeriodStart - 1
+ // TODO: ActorUpgrade
+ err = vm.MutateState(ctx, minerInfos[i].maddr, func(cst cbor.IpldStore, st *miner0.State) error {
+ maxPeriods := miner0.MaxSectorExpirationExtension / miner0.WPoStProvingPeriod
+ minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + st.ProvingPeriodStart - 1
return nil
})
@@ -138,7 +146,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
if m.MarketBalance.GreaterThan(big.Zero()) {
params := mustEnc(&minerInfos[i].maddr)
- _, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, m.MarketBalance, builtin.MethodsMarket.AddBalance, params)
+ _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, builtin.MethodsMarket.AddBalance, params)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create genesis miner (add balance): %w", err)
}
@@ -150,7 +158,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
publish := func(params *market.PublishStorageDealsParams) error {
fmt.Printf("publishing %d storage deals on miner %s with worker %s\n", len(params.Deals), params.Deals[0].Proposal.Provider, m.Worker)
- ret, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, big.Zero(), builtin.MethodsMarket.PublishStorageDeals, mustEnc(params))
+ ret, err := doExecValue(ctx, vm, market.Address, m.Worker, big.Zero(), builtin.MethodsMarket.PublishStorageDeals, mustEnc(params))
if err != nil {
return xerrors.Errorf("failed to create genesis miner (publish deals): %w", err)
}
@@ -201,13 +209,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return cid.Undef, xerrors.Errorf("getting deal weight: %w", err)
}
- sectorWeight := miner.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight)
+ sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight)
qaPow = types.BigAdd(qaPow, sectorWeight)
}
}
- err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
+ err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error {
st.TotalQualityAdjPower = qaPow
st.TotalRawBytePower = rawPow
@@ -219,8 +227,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
}
- err = vm.MutateState(ctx, builtin.RewardActorAddr, func(sct cbor.IpldStore, st *reward.State) error {
- *st = *reward.ConstructState(qaPow)
+ err = vm.MutateState(ctx, reward.Address, func(sct cbor.IpldStore, st *reward0.State) error {
+ *st = *reward0.ConstructState(qaPow)
return nil
})
if err != nil {
@@ -246,10 +254,10 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return cid.Undef, xerrors.Errorf("getting deal weight: %w", err)
}
- sectorWeight := miner.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight)
+ sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight)
// we've added fake power for this sector above, remove it now
- err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
+ err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error {
st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint
st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize)))
return nil
@@ -268,9 +276,9 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return cid.Undef, xerrors.Errorf("getting current total power: %w", err)
}
- pcd := miner.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight)
+ pcd := miner0.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight)
- pledge := miner.InitialPledgeForPower(
+ pledge := miner0.InitialPledgeForPower(
sectorWeight,
epochReward.ThisEpochBaselinePower,
tpow.PledgeCollateral,
@@ -292,7 +300,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
Sectors: []abi.SectorNumber{preseal.SectorID},
}
- _, err = doExecValue(ctx, vm, minerInfos[i].maddr, builtin.StoragePowerActorAddr, big.Zero(), builtin.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams))
+ _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), builtin.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err)
}
@@ -301,7 +309,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
}
// Sanity-check total network power
- err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
+ err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error {
if !st.TotalRawBytePower.Equals(rawPow) {
return xerrors.Errorf("st.TotalRawBytePower doesn't match previously calculated rawPow")
}
@@ -340,12 +348,12 @@ func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization cry
return out, nil
}
-func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*power.CurrentTotalPowerReturn, error) {
- pwret, err := doExecValue(ctx, vm, builtin.StoragePowerActorAddr, maddr, big.Zero(), builtin.MethodsPower.CurrentTotalPower, nil)
+func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) {
+ pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin.MethodsPower.CurrentTotalPower, nil)
if err != nil {
return nil, err
}
- var pwr power.CurrentTotalPowerReturn
+ var pwr power0.CurrentTotalPowerReturn
if err := pwr.UnmarshalCBOR(bytes.NewReader(pwret)); err != nil {
return nil, err
}
@@ -353,38 +361,38 @@ func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*
return &pwr, nil
}
-func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) (market.VerifyDealsForActivationReturn, error) {
+func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) (market0.VerifyDealsForActivationReturn, error) {
params := &market.VerifyDealsForActivationParams{
DealIDs: dealIDs,
SectorStart: sectorStart,
SectorExpiry: sectorExpiry,
}
- var dealWeights market.VerifyDealsForActivationReturn
+ var dealWeights market0.VerifyDealsForActivationReturn
ret, err := doExecValue(ctx, vm,
- builtin.StorageMarketActorAddr,
+ market.Address,
maddr,
abi.NewTokenAmount(0),
builtin.MethodsMarket.VerifyDealsForActivation,
mustEnc(params),
)
if err != nil {
- return market.VerifyDealsForActivationReturn{}, err
+ return market0.VerifyDealsForActivationReturn{}, err
}
if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
- return market.VerifyDealsForActivationReturn{}, err
+ return market0.VerifyDealsForActivationReturn{}, err
}
return dealWeights, nil
}
-func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward.ThisEpochRewardReturn, error) {
- rwret, err := doExecValue(ctx, vm, builtin.RewardActorAddr, maddr, big.Zero(), builtin.MethodsReward.ThisEpochReward, nil)
+func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward0.ThisEpochRewardReturn, error) {
+ rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), builtin.MethodsReward.ThisEpochReward, nil)
if err != nil {
return nil, err
}
- var epochReward reward.ThisEpochRewardReturn
+ var epochReward reward0.ThisEpochRewardReturn
if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil {
return nil, err
}
diff --git a/chain/gen/genesis/t01_init.go b/chain/gen/genesis/t01_init.go
index 1686102fe..667079a6d 100644
--- a/chain/gen/genesis/t01_init.go
+++ b/chain/gen/genesis/t01_init.go
@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/util/adt"
@@ -50,7 +51,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
fmt.Printf("init set %s t0%d\n", e, counter)
value := cbg.CborInt(counter)
- if err := amap.Put(adt.AddrKey(e), &value); err != nil {
+ if err := amap.Put(abi.AddrKey(e), &value); err != nil {
return 0, nil, nil, err
}
counter = counter + 1
@@ -77,7 +78,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter)
value := cbg.CborInt(counter)
- if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
+ if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
return 0, nil, nil, err
}
counter = counter + 1
@@ -95,7 +96,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
value := cbg.CborInt(80)
- if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
+ if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
return 0, nil, nil, err
}
} else if rootVerifier.Type == genesis.TMultisig {
@@ -110,7 +111,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
fmt.Printf("init set %s t0%d\n", e, counter)
value := cbg.CborInt(counter)
- if err := amap.Put(adt.AddrKey(e), &value); err != nil {
+ if err := amap.Put(abi.AddrKey(e), &value); err != nil {
return 0, nil, nil, err
}
counter = counter + 1
diff --git a/chain/gen/genesis/t02_reward.go b/chain/gen/genesis/t02_reward.go
index 2f5922fd3..92531051b 100644
--- a/chain/gen/genesis/t02_reward.go
+++ b/chain/gen/genesis/t02_reward.go
@@ -3,10 +3,10 @@ package genesis
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/reward"
+ reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/lotus/build"
@@ -17,7 +17,7 @@ import (
func SetupRewardActor(bs bstore.Blockstore, qaPower big.Int) (*types.Actor, error) {
cst := cbor.NewCborStore(bs)
- st := reward.ConstructState(qaPower)
+ st := reward0.ConstructState(qaPower)
hcid, err := cst.Put(context.TODO(), st)
if err != nil {
diff --git a/chain/gen/genesis/t04_power.go b/chain/gen/genesis/t04_power.go
index 86ba684e0..2f1303ba4 100644
--- a/chain/gen/genesis/t04_power.go
+++ b/chain/gen/genesis/t04_power.go
@@ -6,7 +6,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/util/adt"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/lotus/chain/types"
@@ -30,7 +30,7 @@ func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) {
return nil, err
}
- sms := power.ConstructState(emptyMap, emptyMultiMap)
+ sms := power0.ConstructState(emptyMap, emptyMultiMap)
stcid, err := store.Put(store.Context(), sms)
if err != nil {
diff --git a/chain/gen/genesis/t06_vreg.go b/chain/gen/genesis/t06_vreg.go
index 6636fa05f..1709b205f 100644
--- a/chain/gen/genesis/t06_vreg.go
+++ b/chain/gen/genesis/t06_vreg.go
@@ -7,7 +7,7 @@ import (
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/chain/types"
@@ -34,7 +34,7 @@ func SetupVerifiedRegistryActor(bs bstore.Blockstore) (*types.Actor, error) {
return nil, err
}
- sms := verifreg.ConstructState(h, RootVerifierID)
+ sms := verifreg0.ConstructState(h, RootVerifierID)
stcid, err := store.Put(store.Context(), sms)
if err != nil {
diff --git a/chain/gen/genesis/util.go b/chain/gen/genesis/util.go
index 10081c763..bcafb007e 100644
--- a/chain/gen/genesis/util.go
+++ b/chain/gen/genesis/util.go
@@ -3,8 +3,11 @@ package genesis
import (
"context"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/build"
+
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
@@ -46,3 +49,14 @@ func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value
return ret.Return, nil
}
+
+var GenesisNetworkVersion = func() network.Version { // TODO: Get from build/
+ if build.UseNewestNetwork() { // TODO: Get from build/
+ return build.NewestNetworkVersion // TODO: Get from build/
+ } // TODO: Get from build/
+ return network.Version1 // TODO: Get from build/
+}() // TODO: Get from build/
+
+func genesisNetworkVersion(context.Context, abi.ChainEpoch) network.Version { // TODO: Get from build/
+ return GenesisNetworkVersion // TODO: Get from build/
+} // TODO: Get from build/
diff --git a/chain/gen/mining.go b/chain/gen/mining.go
index a9ac47a60..0c2f72590 100644
--- a/chain/gen/mining.go
+++ b/chain/gen/mining.go
@@ -3,7 +3,7 @@ package gen
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/specs-actors/actors/util/adt"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go
index 0d1940421..ee0435156 100644
--- a/chain/gen/slashfilter/slashfilter.go
+++ b/chain/gen/slashfilter/slashfilter.go
@@ -9,8 +9,8 @@ import (
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
type SlashFilter struct {
@@ -105,6 +105,10 @@ func checkFault(t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType str
return err
}
+ if other == bh.Cid() {
+ return nil
+ }
+
return xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other)
}
diff --git a/chain/market/fundmgr.go b/chain/market/fundmgr.go
index f7eab7e0a..aef3b98eb 100644
--- a/chain/market/fundmgr.go
+++ b/chain/market/fundmgr.go
@@ -4,8 +4,8 @@ import (
"context"
"sync"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"go.uber.org/fx"
"github.com/filecoin-project/specs-actors/actors/builtin"
@@ -15,6 +15,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/types"
@@ -151,7 +152,7 @@ func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Add
}
smsg, err := fm.api.MpoolPushMessage(ctx, &types.Message{
- To: builtin.StorageMarketActorAddr,
+ To: market.Address,
From: wallet,
Value: toAdd,
Method: builtin.MethodsMarket.AddBalance,
diff --git a/chain/market/fundmgr_test.go b/chain/market/fundmgr_test.go
index 5e8800528..b05db55d8 100644
--- a/chain/market/fundmgr_test.go
+++ b/chain/market/fundmgr_test.go
@@ -10,13 +10,14 @@ import (
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/crypto"
tutils "github.com/filecoin-project/specs-actors/support/testing"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -47,7 +48,7 @@ func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, s
func addFundsMsg(toAdd abi.TokenAmount, addr address.Address, wallet address.Address) *types.Message {
params, _ := actors.SerializeParams(&addr)
return &types.Message{
- To: builtin.StorageMarketActorAddr,
+ To: market.Address,
From: wallet,
Value: toAdd,
Method: builtin.MethodsMarket.AddBalance,
diff --git a/chain/messagepool/gasguess/guessgas.go b/chain/messagepool/gasguess/guessgas.go
index a787b9053..607c7824a 100644
--- a/chain/messagepool/gasguess/guessgas.go
+++ b/chain/messagepool/gasguess/guessgas.go
@@ -9,8 +9,10 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
)
type ActorLookup func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
@@ -27,19 +29,33 @@ type CostKey struct {
}
var Costs = map[CostKey]int64{
- {builtin.InitActorCodeID, 2}: 8916753,
- {builtin.StorageMarketActorCodeID, 2}: 6955002,
- {builtin.StorageMarketActorCodeID, 4}: 245436108,
- {builtin.StorageMinerActorCodeID, 4}: 2315133,
- {builtin.StorageMinerActorCodeID, 5}: 1600271356,
- {builtin.StorageMinerActorCodeID, 6}: 22864493,
- {builtin.StorageMinerActorCodeID, 7}: 142002419,
- {builtin.StorageMinerActorCodeID, 10}: 23008274,
- {builtin.StorageMinerActorCodeID, 11}: 19303178,
- {builtin.StorageMinerActorCodeID, 14}: 566356835,
- {builtin.StorageMinerActorCodeID, 16}: 5325185,
- {builtin.StorageMinerActorCodeID, 18}: 2328637,
- {builtin.StoragePowerActorCodeID, 2}: 23600956,
+ {builtin0.InitActorCodeID, 2}: 8916753,
+ {builtin0.StorageMarketActorCodeID, 2}: 6955002,
+ {builtin0.StorageMarketActorCodeID, 4}: 245436108,
+ {builtin0.StorageMinerActorCodeID, 4}: 2315133,
+ {builtin0.StorageMinerActorCodeID, 5}: 1600271356,
+ {builtin0.StorageMinerActorCodeID, 6}: 22864493,
+ {builtin0.StorageMinerActorCodeID, 7}: 142002419,
+ {builtin0.StorageMinerActorCodeID, 10}: 23008274,
+ {builtin0.StorageMinerActorCodeID, 11}: 19303178,
+ {builtin0.StorageMinerActorCodeID, 14}: 566356835,
+ {builtin0.StorageMinerActorCodeID, 16}: 5325185,
+ {builtin0.StorageMinerActorCodeID, 18}: 2328637,
+ {builtin0.StoragePowerActorCodeID, 2}: 23600956,
+ // TODO: Just reuse v0 values for now, this isn't actually used
+ {builtin2.InitActorCodeID, 2}: 8916753,
+ {builtin2.StorageMarketActorCodeID, 2}: 6955002,
+ {builtin2.StorageMarketActorCodeID, 4}: 245436108,
+ {builtin2.StorageMinerActorCodeID, 4}: 2315133,
+ {builtin2.StorageMinerActorCodeID, 5}: 1600271356,
+ {builtin2.StorageMinerActorCodeID, 6}: 22864493,
+ {builtin2.StorageMinerActorCodeID, 7}: 142002419,
+ {builtin2.StorageMinerActorCodeID, 10}: 23008274,
+ {builtin2.StorageMinerActorCodeID, 11}: 19303178,
+ {builtin2.StorageMinerActorCodeID, 14}: 566356835,
+ {builtin2.StorageMinerActorCodeID, 16}: 5325185,
+ {builtin2.StorageMinerActorCodeID, 18}: 2328637,
+ {builtin2.StoragePowerActorCodeID, 2}: 23600956,
}
func failedGuess(msg *types.SignedMessage) int64 {
@@ -51,7 +67,8 @@ func failedGuess(msg *types.SignedMessage) int64 {
}
func GuessGasUsed(ctx context.Context, tsk types.TipSetKey, msg *types.SignedMessage, al ActorLookup) (int64, error) {
- if msg.Message.Method == builtin.MethodSend {
+ // MethodSend is the same in all versions.
+ if msg.Message.Method == builtin0.MethodSend {
switch msg.Message.From.Protocol() {
case address.BLS:
return 1298450, nil
diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go
index e41e8b0c7..83aa5c6b7 100644
--- a/chain/messagepool/messagepool.go
+++ b/chain/messagepool/messagepool.go
@@ -11,8 +11,9 @@ import (
"sync"
"time"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru"
"github.com/ipfs/go-cid"
@@ -31,6 +32,7 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/node/modules/dtypes"
@@ -46,11 +48,14 @@ var rbfDenomBig = types.NewInt(RbfDenom)
const RbfDenom = 256
-var RepublishInterval = pubsub.TimeCacheDuration + time.Duration(5*build.BlockDelaySecs+build.PropagationDelaySecs)*time.Second
+var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
+var baseFeeLowerBoundFactor = types.NewInt(10)
+var baseFeeLowerBoundFactorConservative = types.NewInt(100)
var MaxActorPendingMessages = 1000
+var MaxUntrustedActorPendingMessages = 10
var MaxNonceGap = uint64(4)
@@ -71,8 +76,6 @@ var (
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
ErrNonceGap = errors.New("unfulfilled nonce gap")
-
- ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
)
const (
@@ -81,6 +84,34 @@ const (
localUpdates = "update"
)
+// Journal event types.
+const (
+ evtTypeMpoolAdd = iota
+ evtTypeMpoolRemove
+ evtTypeMpoolRepub
+)
+
+// MessagePoolEvt is the journal entry for message pool events.
+type MessagePoolEvt struct {
+ Action string
+ Messages []MessagePoolEvtMessage
+ Error error `json:",omitempty"`
+}
+
+type MessagePoolEvtMessage struct {
+ types.Message
+
+ CID cid.Cid
+}
+
+func init() {
+ // if the republish interval is too short compared to the pubsub timecache, adjust it
+ minInterval := pubsub.TimeCacheDuration + time.Duration(build.PropagationDelaySecs)
+ if RepublishInterval < minInterval {
+ RepublishInterval = minInterval
+ }
+}
+
type MessagePool struct {
lk sync.Mutex
@@ -126,6 +157,8 @@ type MessagePool struct {
netName dtypes.NetworkName
sigValCache *lru.TwoQueueCache
+
+ evtTypes [3]journal.EventType
}
type msgSet struct {
@@ -142,9 +175,38 @@ func newMsgSet(nonce uint64) *msgSet {
}
}
-func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (bool, error) {
+func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount {
+ minPrice := types.BigAdd(curPrem, types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig))
+ return types.BigAdd(minPrice, types.NewInt(1))
+}
+
+func CapGasFee(msg *types.Message, maxFee abi.TokenAmount) {
+ if maxFee.Equals(big.Zero()) {
+ maxFee = types.NewInt(build.FilecoinPrecision / 10)
+ }
+
+ gl := types.NewInt(uint64(msg.GasLimit))
+ totalFee := types.BigMul(msg.GasFeeCap, gl)
+
+ if totalFee.LessThanEqual(maxFee) {
+ return
+ }
+
+ msg.GasFeeCap = big.Div(maxFee, gl)
+ msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
+}
+
+func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted bool) (bool, error) {
nextNonce := ms.nextNonce
nonceGap := false
+
+ maxNonceGap := MaxNonceGap
+ maxActorPendingMessages := MaxActorPendingMessages
+ if untrusted {
+ maxNonceGap = 0
+ maxActorPendingMessages = MaxUntrustedActorPendingMessages
+ }
+
switch {
case m.Message.Nonce == nextNonce:
nextNonce++
@@ -153,7 +215,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (boo
nextNonce++
}
- case strict && m.Message.Nonce > nextNonce+MaxNonceGap:
+ case strict && m.Message.Nonce > nextNonce+maxNonceGap:
return false, xerrors.Errorf("message nonce has too big a gap from expected nonce (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap)
case m.Message.Nonce > nextNonce:
@@ -169,9 +231,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (boo
if m.Cid() != exms.Cid() {
// check if RBF passes
- minPrice := exms.Message.GasPremium
- minPrice = types.BigAdd(minPrice, types.BigDiv(types.BigMul(minPrice, rbfNumBig), rbfDenomBig))
- minPrice = types.BigAdd(minPrice, types.NewInt(1))
+ minPrice := ComputeMinRBF(exms.Message.GasPremium)
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium,
"newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce)
@@ -191,7 +251,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (boo
//ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int)
}
- if !has && strict && len(ms.msgs) > MaxActorPendingMessages {
+ if !has && strict && len(ms.msgs) >= maxActorPendingMessages {
log.Errorf("too many pending messages from actor %s", m.Message.From)
return false, ErrTooManyPendingMessages
}
@@ -283,6 +343,11 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
api: api,
netName: netName,
cfg: cfg,
+ evtTypes: [...]journal.EventType{
+ evtTypeMpoolAdd: journal.J.RegisterEventType("mpool", "add"),
+ evtTypeMpoolRemove: journal.J.RegisterEventType("mpool", "remove"),
+ evtTypeMpoolRepub: journal.J.RegisterEventType("mpool", "repub"),
+ },
}
// enable initial prunes
@@ -334,10 +399,12 @@ func (mp *MessagePool) runLoop() {
if err := mp.republishPendingMessages(); err != nil {
log.Errorf("error while republishing messages: %s", err)
}
+
case <-mp.pruneTrigger:
if err := mp.pruneExcessMessages(); err != nil {
log.Errorf("failed to prune excess messages from mempool: %s", err)
}
+
case <-mp.closer:
mp.repubTk.Stop()
return
@@ -355,13 +422,57 @@ func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error {
return nil
}
-func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, epoch abi.ChainEpoch) error {
+// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio
+// and whether the message has enough funds to be included in the next 20 blocks.
+// If the message is not valid for block inclusion, it returns an error.
+// For local messages, if the message can be included in the next 20 blocks, it returns true to
+// signal that it should be immediately published. If the message cannot be included in the next 20
+// blocks, it returns false so that the message doesn't immediately get published (and ignored by our
+// peers); instead it will be published through the republish loop, once the base fee has fallen
+// sufficiently.
+// For non local messages, if the message cannot be included in the next 20 blocks it returns
+// a (soft) validation error.
+func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
+ epoch := curTs.Height()
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
- return xerrors.Errorf("message will not be included in a block: %w", err)
+ return false, xerrors.Errorf("message will not be included in a block: %w", err)
}
- return nil
+
+ // this checks if the GasFeeCap is suffisciently high for inclusion in the next 20 blocks
+ // if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely
+ // on republish to push it through later, if the baseFee has fallen.
+ // this is a defensive check that stops minimum baseFee spam attacks from overloading validation
+ // queues.
+ // Note that for local messages, we always add them so that they can be accepted and republished
+ // automatically.
+ publish := local
+
+ var baseFee big.Int
+ if len(curTs.Blocks()) > 0 {
+ baseFee = curTs.Blocks()[0].ParentBaseFee
+ } else {
+ var err error
+ baseFee, err = mp.api.ChainComputeBaseFee(context.TODO(), curTs)
+ if err != nil {
+ return false, xerrors.Errorf("computing basefee: %w", err)
+ }
+ }
+
+ baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactorConservative)
+ if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
+ if local {
+ log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)",
+ m.Message.GasFeeCap, baseFeeLowerBound)
+ publish = false
+ } else {
+ return false, xerrors.Errorf("GasFeeCap doesn't meet base fee lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s): %w",
+ m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure)
+ }
+ }
+
+ return publish, nil
}
func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
@@ -382,7 +493,8 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
}
mp.curTsLk.Lock()
- if err := mp.addTs(m, mp.curTs); err != nil {
+ publish, err := mp.addTs(m, mp.curTs, true, false)
+ if err != nil {
mp.curTsLk.Unlock()
return cid.Undef, err
}
@@ -395,7 +507,11 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
}
mp.lk.Unlock()
- return m.Cid(), mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
+ if publish {
+ err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
+ }
+
+ return m.Cid(), err
}
func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
@@ -443,7 +559,9 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
- return mp.addTs(m, mp.curTs)
+
+ _, err = mp.addTs(m, mp.curTs, false, false)
+ return err
}
func sigCacheKey(m *types.SignedMessage) (string, error) {
@@ -510,28 +628,29 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet)
return nil
}
-func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error {
+func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
snonce, err := mp.getStateNonce(m.Message.From, curTs)
if err != nil {
- return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
+ return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
}
if snonce > m.Message.Nonce {
- return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
+ return false, xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
}
mp.lk.Lock()
defer mp.lk.Unlock()
- if err := mp.verifyMsgBeforeAdd(m, curTs.Height()); err != nil {
- return err
+ publish, err := mp.verifyMsgBeforeAdd(m, curTs, local)
+ if err != nil {
+ return false, err
}
if err := mp.checkBalance(m, curTs); err != nil {
- return err
+ return false, err
}
- return mp.addLocked(m, true)
+ return publish, mp.addLocked(m, !local, untrusted)
}
func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
@@ -557,7 +676,8 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
mp.lk.Lock()
defer mp.lk.Unlock()
- if err := mp.verifyMsgBeforeAdd(m, curTs.Height()); err != nil {
+ _, err = mp.verifyMsgBeforeAdd(m, curTs, true)
+ if err != nil {
return err
}
@@ -565,17 +685,17 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
return err
}
- return mp.addLocked(m, false)
+ return mp.addLocked(m, false, false)
}
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
mp.lk.Lock()
defer mp.lk.Unlock()
- return mp.addLocked(m, false)
+ return mp.addLocked(m, false, false)
}
-func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error {
+func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) error {
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
if m.Signature.Type == crypto.SigTypeBLS {
mp.blsSigCache.Add(m.Cid(), m.Signature)
@@ -602,9 +722,9 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error {
mp.pending[m.Message.From] = mset
}
- incr, err := mset.add(m, mp, strict)
+ incr, err := mset.add(m, mp, strict, untrusted)
if err != nil {
- log.Info(err)
+ log.Debug(err)
return err
}
@@ -623,6 +743,14 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error {
Type: api.MpoolAdd,
Message: m,
}, localUpdates)
+
+ journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolAdd], func() interface{} {
+ return MessagePoolEvt{
+ Action: "add",
+ Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}},
+ }
+ })
+
return nil
}
@@ -674,91 +802,48 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (
return act.Balance, nil
}
-func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address, cb func(address.Address, uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) {
+// this method is provided for the gateway to push messages.
+// differences from Push:
+// - strict checks are enabled
+// - extra strict add checks are used when adding the messages to the msgSet
+// that means: no nonce gaps, at most 10 pending messages for the actor
+func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) {
+ err := mp.checkMessage(m)
+ if err != nil {
+ return cid.Undef, err
+ }
+
// serialize push access to reduce lock contention
mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
- mp.curTsLk.Lock()
- mp.lk.Lock()
-
- curTs := mp.curTs
-
- fromKey := addr
- if fromKey.Protocol() == address.ID {
- var err error
- fromKey, err = mp.api.StateAccountKey(ctx, fromKey, mp.curTs)
- if err != nil {
- mp.lk.Unlock()
- mp.curTsLk.Unlock()
- return nil, xerrors.Errorf("resolving sender key: %w", err)
- }
- }
-
- nonce, err := mp.getNonceLocked(fromKey, mp.curTs)
+ msgb, err := m.Serialize()
if err != nil {
- mp.lk.Unlock()
- mp.curTsLk.Unlock()
- return nil, xerrors.Errorf("get nonce locked failed: %w", err)
+ return cid.Undef, err
}
- // release the locks for signing
- mp.lk.Unlock()
+ mp.curTsLk.Lock()
+ publish, err := mp.addTs(m, mp.curTs, false, true)
+ if err != nil {
+ mp.curTsLk.Unlock()
+ return cid.Undef, err
+ }
mp.curTsLk.Unlock()
- msg, err := cb(fromKey, nonce)
- if err != nil {
- return nil, err
- }
-
- err = mp.checkMessage(msg)
- if err != nil {
- return nil, err
- }
-
- msgb, err := msg.Serialize()
- if err != nil {
- return nil, err
- }
-
- // reacquire the locks and check state for consistency
- mp.curTsLk.Lock()
- defer mp.curTsLk.Unlock()
-
- if mp.curTs != curTs {
- return nil, ErrTryAgain
- }
-
mp.lk.Lock()
- defer mp.lk.Unlock()
+ if err := mp.addLocal(m, msgb); err != nil {
+ mp.lk.Unlock()
+ return cid.Undef, err
+ }
+ mp.lk.Unlock()
- nonce2, err := mp.getNonceLocked(fromKey, mp.curTs)
- if err != nil {
- return nil, xerrors.Errorf("get nonce locked failed: %w", err)
+ if publish {
+ err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
}
- if nonce2 != nonce {
- return nil, ErrTryAgain
- }
-
- if err := mp.verifyMsgBeforeAdd(msg, curTs.Height()); err != nil {
- return nil, err
- }
-
- if err := mp.checkBalance(msg, curTs); err != nil {
- return nil, err
- }
-
- if err := mp.addLocked(msg, true); err != nil {
- return nil, xerrors.Errorf("add locked failed: %w", err)
- }
- if err := mp.addLocal(msg, msgb); err != nil {
- log.Errorf("addLocal failed: %+v", err)
- }
-
- return msg, mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
+ return m.Cid(), err
}
func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) {
@@ -780,6 +865,12 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool)
Message: m,
}, localUpdates)
+ journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRemove], func() interface{} {
+ return MessagePoolEvt{
+ Action: "remove",
+ Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}}}
+ })
+
mp.currentSize--
}
@@ -1217,3 +1308,12 @@ func (mp *MessagePool) Clear(local bool) {
delete(mp.pending, a)
}
}
+
+func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
+ baseFeeLowerBound := types.BigDiv(baseFee, factor)
+ if baseFeeLowerBound.LessThan(minimumBaseFee) {
+ baseFeeLowerBound = minimumBaseFee
+ }
+
+ return baseFeeLowerBound
+}
diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go
index cf77f3fb0..41cc16591 100644
--- a/chain/messagepool/messagepool_test.go
+++ b/chain/messagepool/messagepool_test.go
@@ -7,6 +7,8 @@ import (
"testing"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
@@ -14,7 +16,6 @@ import (
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
@@ -34,6 +35,8 @@ type testMpoolAPI struct {
tipsets []*types.TipSet
published int
+
+ baseFee types.BigInt
}
func newTestMpoolAPI() *testMpoolAPI {
@@ -41,6 +44,7 @@ func newTestMpoolAPI() *testMpoolAPI {
bmsgs: make(map[cid.Cid][]*types.SignedMessage),
statenonce: make(map[address.Address]uint64),
balance: make(map[address.Address]types.BigInt),
+ baseFee: types.NewInt(100),
}
genesis := mock.MkBlock(nil, 1, 1)
tma.tipsets = append(tma.tipsets, mock.TipSet(genesis))
@@ -53,6 +57,13 @@ func (tma *testMpoolAPI) nextBlock() *types.BlockHeader {
return newBlk
}
+func (tma *testMpoolAPI) nextBlockWithHeight(height uint64) *types.BlockHeader {
+ newBlk := mock.MkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1)
+ newBlk.Height = abi.ChainEpoch(height)
+ tma.tipsets = append(tma.tipsets, mock.TipSet(newBlk))
+ return newBlk
+}
+
func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) {
t.Helper()
if err := tma.cb(nil, []*types.TipSet{mock.TipSet(b)}); err != nil {
@@ -182,7 +193,7 @@ func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error)
}
func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
- return types.NewInt(100), nil
+ return tma.baseFee, nil
}
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go
index 80b9a4297..347e90044 100644
--- a/chain/messagepool/provider.go
+++ b/chain/messagepool/provider.go
@@ -47,13 +47,15 @@ func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
}
func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
- var act types.Actor
stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts)
if err != nil {
return nil, xerrors.Errorf("computing tipset state for GetActor: %w", err)
}
-
- return &act, mpp.sm.WithStateTree(stcid, mpp.sm.WithActor(addr, stmgr.GetActor(&act)))
+ st, err := mpp.sm.StateTree(stcid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load state tree: %w", err)
+ }
+ return st.GetActor(addr)
}
func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go
index d1290e386..d0e53795a 100644
--- a/chain/messagepool/pruning.go
+++ b/chain/messagepool/pruning.go
@@ -46,13 +46,21 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
if err != nil {
return xerrors.Errorf("computing basefee: %w", err)
}
+ baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
pending, _ := mp.getPendingMessages(ts, ts)
- // priority actors -- not pruned
- priority := make(map[address.Address]struct{})
+ // protected actors -- not pruned
+ protected := make(map[address.Address]struct{})
+
+ // we never prune priority addresses
for _, actor := range mp.cfg.PriorityAddrs {
- priority[actor] = struct{}{}
+ protected[actor] = struct{}{}
+ }
+
+ // we also never prune locally published messages
+ for actor := range mp.localAddrs {
+ protected[actor] = struct{}{}
}
// Collect all messages to track which ones to remove and create chains for block inclusion
@@ -61,18 +69,18 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
var chains []*msgChain
for actor, mset := range pending {
- // we never prune priority actors
- _, keep := priority[actor]
+ // we never prune protected actors
+ _, keep := protected[actor]
if keep {
keepCount += len(mset)
continue
}
- // not a priority actor, track the messages and create chains
+ // not a protected actor, track the messages and create chains
for _, m := range mset {
pruneMsgs[m.Message.Cid()] = m
}
- actorChains := mp.createMessageChains(actor, mset, baseFee, ts)
+ actorChains := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts)
chains = append(chains, actorChains...)
}
diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go
index 1173bdb48..672119ba9 100644
--- a/chain/messagepool/repub.go
+++ b/chain/messagepool/repub.go
@@ -11,12 +11,13 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/journal"
"github.com/ipfs/go-cid"
)
const repubMsgLimit = 30
-var RepublishBatchDelay = 200 * time.Millisecond
+var RepublishBatchDelay = 100 * time.Millisecond
func (mp *MessagePool) republishPendingMessages() error {
mp.curTsLk.Lock()
@@ -27,6 +28,7 @@ func (mp *MessagePool) republishPendingMessages() error {
mp.curTsLk.Unlock()
return xerrors.Errorf("computing basefee: %w", err)
}
+ baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
mp.lk.Lock()
@@ -55,7 +57,11 @@ func (mp *MessagePool) republishPendingMessages() error {
var chains []*msgChain
for actor, mset := range pending {
- next := mp.createMessageChains(actor, mset, baseFee, ts)
+ // We use the baseFee lower bound for createChange so that we optimistically include
+ // chains that might become profitable in the next 20 blocks.
+ // We still check the lowerBound condition for individual messages so that we don't send
+ // messages that will be rejected by the mpool spam protector, so this is safe to do.
+ next := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts)
chains = append(chains, next...)
}
@@ -67,15 +73,10 @@ func (mp *MessagePool) republishPendingMessages() error {
return chains[i].Before(chains[j])
})
- // we don't republish negative performing chains; this is an error that will be screamed
- // at the user
- if chains[0].gasPerf < 0 {
- return xerrors.Errorf("skipping republish: all message chains have negative gas performance; best gas performance: %f", chains[0].gasPerf)
- }
-
gasLimit := int64(build.BlockGasLimit)
minGas := int64(gasguess.MinGas)
var msgs []*types.SignedMessage
+loop:
for i := 0; i < len(chains); {
chain := chains[i]
@@ -89,12 +90,6 @@ func (mp *MessagePool) republishPendingMessages() error {
break
}
- // we don't republish negative performing chains, as they won't be included in
- // a block anyway
- if chain.gasPerf < 0 {
- break
- }
-
// has the chain been invalidated?
if !chain.valid {
i++
@@ -103,15 +98,25 @@ func (mp *MessagePool) republishPendingMessages() error {
// does it fit in a block?
if chain.gasLimit <= gasLimit {
- gasLimit -= chain.gasLimit
- msgs = append(msgs, chain.msgs...)
+ // check the baseFee lower bound -- only republish messages that can be included in the chain
+ // within the next 20 blocks.
+ for _, m := range chain.msgs {
+ if !allowNegativeChains(ts.Height()) && m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
+ chain.Invalidate()
+ continue loop
+ }
+ gasLimit -= m.Message.GasLimit
+ msgs = append(msgs, m)
+ }
+
+ // we processed the whole chain, advance
i++
continue
}
// we can't fit the current chain but there is gas to spare
// trim it and push it down
- chain.Trim(gasLimit, mp, baseFee, ts)
+ chain.Trim(gasLimit, mp, baseFee, true)
for j := i; j < len(chains)-1; j++ {
if chains[j].Before(chains[j+1]) {
break
@@ -142,6 +147,19 @@ func (mp *MessagePool) republishPendingMessages() error {
}
}
+ if len(msgs) > 0 {
+ journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} {
+ msgsEv := make([]MessagePoolEvtMessage, 0, len(msgs))
+ for _, m := range msgs {
+ msgsEv = append(msgsEv, MessagePoolEvtMessage{Message: m.Message, CID: m.Cid()})
+ }
+ return MessagePoolEvt{
+ Action: "repub",
+ Messages: msgsEv,
+ }
+ })
+ }
+
// track most recently republished messages
republished := make(map[cid.Cid]struct{})
for _, m := range msgs[:count] {
diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go
index 2e3fa123f..c398ce4ee 100644
--- a/chain/messagepool/repub_test.go
+++ b/chain/messagepool/repub_test.go
@@ -5,10 +5,10 @@ import (
"testing"
"time"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-datastore"
)
diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go
index 5ba679d76..2ddbed0ad 100644
--- a/chain/messagepool/selection.go
+++ b/chain/messagepool/selection.go
@@ -3,21 +3,29 @@ package messagepool
import (
"context"
"math/big"
+ "math/rand"
"sort"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ tbig "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
- abig "github.com/filecoin-project/specs-actors/actors/abi/big"
)
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
+// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go
+// away in the next fork.
+func allowNegativeChains(epoch abi.ChainEpoch) bool {
+ return epoch < build.UpgradeBreezeHeight+5
+}
+
const MaxBlocks = 15
type msgChain struct {
@@ -100,9 +108,9 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
return chains[i].Before(chains[j])
})
- if len(chains) != 0 && chains[0].gasPerf < 0 {
+ if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
- return nil, nil
+ return result, nil
}
// 3. Parition chains into blocks (without trimming)
@@ -153,7 +161,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
last := len(chains)
for i, chain := range chains {
// did we run out of performing chains?
- if chain.gasPerf < 0 {
+ if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
break
}
@@ -191,9 +199,11 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
gasLimit -= chainGasLimit
// resort to account for already merged chains and effective performance adjustments
- sort.Slice(chains[i+1:], func(i, j int) bool {
+ // the sort *must* be stable or we end up getting negative gasPerfs pushed up.
+ sort.SliceStable(chains[i+1:], func(i, j int) bool {
return chains[i].BeforeEffective(chains[j])
})
+
continue
}
@@ -217,7 +227,7 @@ tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim if necessary
if chains[last].gasLimit > gasLimit {
- chains[last].Trim(gasLimit, mp, baseFee, ts)
+ chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
}
// push down if it hasn't been invalidated
@@ -243,7 +253,7 @@ tailLoop:
}
// if gasPerf < 0 we have no more profitable chains
- if chain.gasPerf < 0 {
+ if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
break tailLoop
}
@@ -284,7 +294,7 @@ tailLoop:
}
// dependencies fit, just trim it
- chain.Trim(gasLimit-depGasLimit, mp, baseFee, ts)
+ chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
last += i
continue tailLoop
}
@@ -297,6 +307,79 @@ tailLoop:
log.Infow("pack tail chains done", "took", dt)
}
+ // if we have gasLimit to spare, pick some random (non-negative) chains to fill the block
+ // we pick randomly so that we minimize the probability of duplication among all miners
+ if gasLimit >= minGas {
+ randomCount := 0
+
+ startRandom := time.Now()
+ shuffleChains(chains)
+
+ for _, chain := range chains {
+ // have we filled the block
+ if gasLimit < minGas {
+ break
+ }
+
+ // has it been merged or invalidated?
+ if chain.merged || !chain.valid {
+ continue
+ }
+
+ // is it negative?
+ if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
+ continue
+ }
+
+ // compute the dependencies that must be merged and the gas limit including deps
+ chainGasLimit := chain.gasLimit
+ depGasLimit := int64(0)
+ var chainDeps []*msgChain
+ for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
+ chainDeps = append(chainDeps, curChain)
+ chainGasLimit += curChain.gasLimit
+ depGasLimit += curChain.gasLimit
+ }
+
+ // do the deps fit? if the deps won't fit, invalidate the chain
+ if depGasLimit > gasLimit {
+ chain.Invalidate()
+ continue
+ }
+
+ // do they fit as is? if it doesn't, trim to make it fit if possible
+ if chainGasLimit > gasLimit {
+ chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
+
+ if !chain.valid {
+ continue
+ }
+ }
+
+ // include it together with all dependencies
+ for i := len(chainDeps) - 1; i >= 0; i-- {
+ curChain := chainDeps[i]
+ curChain.merged = true
+ result = append(result, curChain.msgs...)
+ randomCount += len(curChain.msgs)
+ }
+
+ chain.merged = true
+ result = append(result, chain.msgs...)
+ randomCount += len(chain.msgs)
+ gasLimit -= chainGasLimit
+ }
+
+ if dt := time.Since(startRandom); dt > time.Millisecond {
+ log.Infow("pack random tail chains done", "took", dt)
+ }
+
+ if randomCount > 0 {
+ log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection",
+ randomCount)
+ }
+ }
+
return result, nil
}
@@ -349,9 +432,9 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
return chains[i].Before(chains[j])
})
- if len(chains) != 0 && chains[0].gasPerf < 0 {
+ if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
- return nil, nil
+ return result, nil
}
// 3. Merge the head chains to produce the list of messages selected for inclusion, subject to
@@ -360,7 +443,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
last := len(chains)
for i, chain := range chains {
// did we run out of performing chains?
- if chain.gasPerf < 0 {
+ if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
break
}
@@ -389,7 +472,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim
- chains[last].Trim(gasLimit, mp, baseFee, ts)
+ chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
// push down if it hasn't been invalidated
if chains[last].valid {
@@ -409,7 +492,7 @@ tailLoop:
}
// if gasPerf < 0 we have no more profitable chains
- if chain.gasPerf < 0 {
+ if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
break tailLoop
}
@@ -471,7 +554,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
return chains[i].Before(chains[j])
})
- if len(chains) != 0 && chains[0].gasPerf < 0 {
+ if !allowNegativeChains(ts.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf)
return nil, gasLimit
}
@@ -479,7 +562,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
// 3. Merge chains until the block limit, as long as they have non-negative gas performance
last := len(chains)
for i, chain := range chains {
- if chain.gasPerf < 0 {
+ if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 {
break
}
@@ -497,7 +580,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim, discarding negative performing messages
- chains[last].Trim(gasLimit, mp, baseFee, ts)
+ chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(ts.Height()))
// push down if it hasn't been invalidated
if chains[last].valid {
@@ -517,7 +600,7 @@ tailLoop:
}
// if gasPerf < 0 we have no more profitable chains
- if chain.gasPerf < 0 {
+ if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 {
break tailLoop
}
@@ -592,7 +675,7 @@ func (*MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt)
maxPremium = msg.Message.GasPremium
}
- gasReward := abig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit)))
+ gasReward := tbig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit)))
return gasReward.Int
}
@@ -775,9 +858,9 @@ func (mc *msgChain) Before(other *msgChain) bool {
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
}
-func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, ts *types.TipSet) {
+func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, allowNegative bool) {
i := len(mc.msgs) - 1
- for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) {
+ for i >= 0 && (mc.gasLimit > gasLimit || (!allowNegative && mc.gasPerf < 0)) {
gasReward := mp.getGasReward(mc.msgs[i], baseFee)
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
mc.gasLimit -= mc.msgs[i].Message.GasLimit
@@ -841,7 +924,16 @@ func (mc *msgChain) SetNullEffectivePerf() {
func (mc *msgChain) BeforeEffective(other *msgChain) bool {
// move merged chains to the front so we can discard them earlier
- return (mc.merged && !other.merged) || mc.effPerf > other.effPerf ||
+ return (mc.merged && !other.merged) ||
+ (mc.gasPerf >= 0 && other.gasPerf < 0) ||
+ mc.effPerf > other.effPerf ||
(mc.effPerf == other.effPerf && mc.gasPerf > other.gasPerf) ||
(mc.effPerf == other.effPerf && mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
}
+
+func shuffleChains(lst []*msgChain) {
+ for i := range lst {
+ j := rand.Intn(i + 1)
+ lst[i], lst[j] = lst[j], lst[i]
+ }
+}
diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go
index ecbf08b13..72fea4d2c 100644
--- a/chain/messagepool/selection_test.go
+++ b/chain/messagepool/selection_test.go
@@ -1,21 +1,26 @@
package messagepool
import (
+ "compress/gzip"
"context"
+ "encoding/json"
"fmt"
+ "io"
"math"
"math/big"
"math/rand"
+ "os"
+ "sort"
"testing"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
@@ -728,6 +733,102 @@ func TestPriorityMessageSelection2(t *testing.T) {
}
}
+func TestPriorityMessageSelection3(t *testing.T) {
+ t.Skip("reenable after removing allow negative")
+
+ mp, tma := makeTestMpool()
+
+ // the actors
+ w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ block := tma.nextBlock()
+ ts := mock.TipSet(block)
+ tma.applyBlock(t, block)
+
+ gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
+
+ tma.setBalance(a1, 1) // in FIL
+ tma.setBalance(a2, 1) // in FIL
+
+ mp.cfg.PriorityAddrs = []address.Address{a1}
+
+ tma.baseFee = types.NewInt(1000)
+ nMessages := 10
+ for i := 0; i < nMessages; i++ {
+ bias := (nMessages - i) / 3
+ m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1000+i%3+bias))
+ mustAdd(t, mp, m)
+ // messages from a2 have negative performance
+ m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, 100)
+ mustAdd(t, mp, m)
+ }
+
+ // test greedy selection
+ msgs, err := mp.SelectMessages(ts, 1.0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedMsgs := 10
+ if len(msgs) != expectedMsgs {
+ t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs))
+ }
+
+ // all messages must be from a1
+ nextNonce := uint64(0)
+ for _, m := range msgs {
+ if m.Message.From != a1 {
+ t.Fatal("expected messages from a1 before messages from a2")
+ }
+ if m.Message.Nonce != nextNonce {
+ t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
+ }
+ nextNonce++
+ }
+
+ // test optimal selection
+ msgs, err = mp.SelectMessages(ts, 0.1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedMsgs = 10
+ if len(msgs) != expectedMsgs {
+ t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs))
+ }
+
+ // all messages must be from a1
+ nextNonce = uint64(0)
+ for _, m := range msgs {
+ if m.Message.From != a1 {
+ t.Fatal("expected messages from a1 before messages from a2")
+ }
+ if m.Message.Nonce != nextNonce {
+ t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
+ }
+ nextNonce++
+ }
+
+}
+
func TestOptimalMessageSelection1(t *testing.T) {
// this test uses just a single actor sending messages with a low tq
// the chain depenent merging algorithm should pick messages from the actor
@@ -1115,6 +1216,9 @@ func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 {
}
func TestCompetitiveMessageSelectionExp(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
var capacityBoost, rewardBoost, tqReward float64
seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45}
for _, seed := range seeds {
@@ -1185,3 +1289,177 @@ func TestGasReward(t *testing.T) {
})
}
}
+
+func TestRealWorldSelection(t *testing.T) {
+ // load test-messages.json.gz and rewrite the messages so that
+ // 1) we map each real actor to a test actor so that we can sign the messages
+ // 2) adjust the nonces so that they start from 0
+ file, err := os.Open("test-messages.json.gz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gzr, err := gzip.NewReader(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dec := json.NewDecoder(gzr)
+
+ var msgs []*types.SignedMessage
+ baseNonces := make(map[address.Address]uint64)
+
+readLoop:
+ for {
+ m := new(types.SignedMessage)
+ err := dec.Decode(m)
+ switch err {
+ case nil:
+ msgs = append(msgs, m)
+ nonce, ok := baseNonces[m.Message.From]
+ if !ok || m.Message.Nonce < nonce {
+ baseNonces[m.Message.From] = m.Message.Nonce
+ }
+
+ case io.EOF:
+ break readLoop
+
+ default:
+ t.Fatal(err)
+ }
+ }
+
+ actorMap := make(map[address.Address]address.Address)
+ actorWallets := make(map[address.Address]*wallet.Wallet)
+
+ for _, m := range msgs {
+ baseNonce := baseNonces[m.Message.From]
+
+ localActor, ok := actorMap[m.Message.From]
+ if !ok {
+ w, err := wallet.NewWallet(wallet.NewMemKeyStore())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ a, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ actorMap[m.Message.From] = a
+ actorWallets[a] = w
+ localActor = a
+ }
+
+ w, ok := actorWallets[localActor]
+ if !ok {
+ t.Fatalf("failed to lookup wallet for actor %s", localActor)
+ }
+
+ m.Message.From = localActor
+ m.Message.Nonce -= baseNonce
+
+ sig, err := w.Sign(context.TODO(), localActor, m.Message.Cid().Bytes())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m.Signature = *sig
+ }
+
+ mp, tma := makeTestMpool()
+
+ block := tma.nextBlockWithHeight(build.UpgradeBreezeHeight + 10)
+ ts := mock.TipSet(block)
+ tma.applyBlock(t, block)
+
+ for _, a := range actorMap {
+ tma.setBalance(a, 1000000)
+ }
+
+ tma.baseFee = types.NewInt(800_000_000)
+
+ sort.Slice(msgs, func(i, j int) bool {
+ return msgs[i].Message.Nonce < msgs[j].Message.Nonce
+ })
+
+ // add the messages
+ for _, m := range msgs {
+ mustAdd(t, mp, m)
+ }
+
+ // do message selection and check block packing
+ minGasLimit := int64(0.9 * float64(build.BlockGasLimit))
+
+ // greedy first
+ selected, err := mp.SelectMessages(ts, 1.0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gasLimit := int64(0)
+ for _, m := range selected {
+ gasLimit += m.Message.GasLimit
+ }
+ if gasLimit < minGasLimit {
+ t.Fatalf("failed to pack with tq=1.0; packed %d, minimum packing: %d", gasLimit, minGasLimit)
+ }
+
+ // high quality ticket
+ selected, err = mp.SelectMessages(ts, .8)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gasLimit = int64(0)
+ for _, m := range selected {
+ gasLimit += m.Message.GasLimit
+ }
+ if gasLimit < minGasLimit {
+ t.Fatalf("failed to pack with tq=0.8; packed %d, minimum packing: %d", gasLimit, minGasLimit)
+ }
+
+ // mid quality ticket
+ selected, err = mp.SelectMessages(ts, .4)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gasLimit = int64(0)
+ for _, m := range selected {
+ gasLimit += m.Message.GasLimit
+ }
+ if gasLimit < minGasLimit {
+ t.Fatalf("failed to pack with tq=0.4; packed %d, minimum packing: %d", gasLimit, minGasLimit)
+ }
+
+ // low quality ticket
+ selected, err = mp.SelectMessages(ts, .1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gasLimit = int64(0)
+ for _, m := range selected {
+ gasLimit += m.Message.GasLimit
+ }
+ if gasLimit < minGasLimit {
+ t.Fatalf("failed to pack with tq=0.1; packed %d, minimum packing: %d", gasLimit, minGasLimit)
+ }
+
+ // very low quality ticket
+ selected, err = mp.SelectMessages(ts, .01)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gasLimit = int64(0)
+ for _, m := range selected {
+ gasLimit += m.Message.GasLimit
+ }
+ if gasLimit < minGasLimit {
+ t.Fatalf("failed to pack with tq=0.01; packed %d, minimum packing: %d", gasLimit, minGasLimit)
+ }
+
+}
diff --git a/chain/messagepool/test-messages.json.gz b/chain/messagepool/test-messages.json.gz
new file mode 100644
index 000000000..09481e1f8
Binary files /dev/null and b/chain/messagepool/test-messages.json.gz differ
diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go
new file mode 100644
index 000000000..ac94d6a3e
--- /dev/null
+++ b/chain/messagesigner/messagesigner.go
@@ -0,0 +1,155 @@
+package messagesigner
+
+import (
+ "bytes"
+ "context"
+ "sync"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/chain/messagepool"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/wallet"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+ "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/namespace"
+ logging "github.com/ipfs/go-log/v2"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+)
+
+const dsKeyActorNonce = "ActorNextNonce"
+
+var log = logging.Logger("messagesigner")
+
+type mpoolAPI interface {
+ GetNonce(address.Address) (uint64, error)
+}
+
+// MessageSigner keeps track of nonces per address, and increments the nonce
+// when signing a message
+type MessageSigner struct {
+ wallet *wallet.Wallet
+ lk sync.Mutex
+ mpool mpoolAPI
+ ds datastore.Batching
+}
+
+func NewMessageSigner(wallet *wallet.Wallet, mpool *messagepool.MessagePool, ds dtypes.MetadataDS) *MessageSigner {
+ return newMessageSigner(wallet, mpool, ds)
+}
+
+func newMessageSigner(wallet *wallet.Wallet, mpool mpoolAPI, ds dtypes.MetadataDS) *MessageSigner {
+ ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
+ return &MessageSigner{
+ wallet: wallet,
+ mpool: mpool,
+ ds: ds,
+ }
+}
+
+// SignMessage increments the nonce for the message From address, and signs
+// the message
+func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
+ ms.lk.Lock()
+ defer ms.lk.Unlock()
+
+ // Get the next message nonce
+ nonce, err := ms.nextNonce(msg.From)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create nonce: %w", err)
+ }
+
+ // Sign the message with the nonce
+ msg.Nonce = nonce
+ sig, err := ms.wallet.Sign(ctx, msg.From, msg.Cid().Bytes())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to sign message: %w", err)
+ }
+
+ // Callback with the signed message
+ smsg := &types.SignedMessage{
+ Message: *msg,
+ Signature: *sig,
+ }
+ err = cb(smsg)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the callback executed successfully, write the nonce to the datastore
+ if err := ms.saveNonce(msg.From, nonce); err != nil {
+ return nil, xerrors.Errorf("failed to save nonce: %w", err)
+ }
+
+ return smsg, nil
+}
+
+// nextNonce gets the next nonce for the given address.
+// If there is no nonce in the datastore, gets the nonce from the message pool.
+func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) {
+ // Nonces used to be created by the mempool and we need to support nodes
+ // that have mempool nonces, so first check the mempool for a nonce for
+ // this address. Note that the mempool returns the actor state's nonce
+ // by default.
+ nonce, err := ms.mpool.GetNonce(addr)
+ if err != nil {
+ return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err)
+ }
+
+ // Get the next nonce for this address from the datastore
+ addrNonceKey := ms.dstoreKey(addr)
+ dsNonceBytes, err := ms.ds.Get(addrNonceKey)
+
+ switch {
+ case xerrors.Is(err, datastore.ErrNotFound):
+ // If a nonce for this address hasn't yet been created in the
+ // datastore, just use the nonce from the mempool
+ return nonce, nil
+
+ case err != nil:
+ return 0, xerrors.Errorf("failed to get nonce from datastore: %w", err)
+
+ default:
+ // There is a nonce in the datastore, so unmarshall it
+ maj, dsNonce, err := cbg.CborReadHeader(bytes.NewReader(dsNonceBytes))
+ if err != nil {
+ return 0, xerrors.Errorf("failed to parse nonce from datastore: %w", err)
+ }
+ if maj != cbg.MajUnsignedInt {
+ return 0, xerrors.Errorf("bad cbor type parsing nonce from datastore")
+ }
+
+ // The message pool nonce should be <= than the datastore nonce
+ if nonce <= dsNonce {
+ nonce = dsNonce
+ } else {
+ log.Warnf("mempool nonce was larger than datastore nonce (%d > %d)", nonce, dsNonce)
+ }
+
+ return nonce, nil
+ }
+}
+
+// saveNonce increments the nonce for this address and writes it to the
+// datastore
+func (ms *MessageSigner) saveNonce(addr address.Address, nonce uint64) error {
+ // Increment the nonce
+ nonce++
+
+ // Write the nonce to the datastore
+ addrNonceKey := ms.dstoreKey(addr)
+ buf := bytes.Buffer{}
+ _, err := buf.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, nonce))
+ if err != nil {
+ return xerrors.Errorf("failed to marshall nonce: %w", err)
+ }
+ err = ms.ds.Put(addrNonceKey, buf.Bytes())
+ if err != nil {
+ return xerrors.Errorf("failed to write nonce to datastore: %w", err)
+ }
+ return nil
+}
+
+func (ms *MessageSigner) dstoreKey(addr address.Address) datastore.Key {
+ return datastore.KeyWithNamespaces([]string{dsKeyActorNonce, addr.String()})
+}
diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go
new file mode 100644
index 000000000..04869ff6d
--- /dev/null
+++ b/chain/messagesigner/messagesigner_test.go
@@ -0,0 +1,201 @@
+package messagesigner
+
+import (
+ "context"
+ "sync"
+ "testing"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/wallet"
+
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/stretchr/testify/require"
+
+ ds_sync "github.com/ipfs/go-datastore/sync"
+
+ "github.com/filecoin-project/go-address"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/ipfs/go-datastore"
+)
+
+type mockMpool struct {
+ lk sync.RWMutex
+ nonces map[address.Address]uint64
+}
+
+func newMockMpool() *mockMpool {
+ return &mockMpool{nonces: make(map[address.Address]uint64)}
+}
+
+func (mp *mockMpool) setNonce(addr address.Address, nonce uint64) {
+ mp.lk.Lock()
+ defer mp.lk.Unlock()
+
+ mp.nonces[addr] = nonce
+}
+
+func (mp *mockMpool) GetNonce(addr address.Address) (uint64, error) {
+ mp.lk.RLock()
+ defer mp.lk.RUnlock()
+
+ return mp.nonces[addr], nil
+}
+
+func TestMessageSignerSignMessage(t *testing.T) {
+ ctx := context.Background()
+
+ w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
+ from1, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+ from2, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+ to1, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+ to2, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+
+ type msgSpec struct {
+ msg *types.Message
+ mpoolNonce [1]uint64
+ expNonce uint64
+ cbErr error
+ }
+ tests := []struct {
+ name string
+ msgs []msgSpec
+ }{{
+ // No nonce yet in datastore
+ name: "no nonce yet",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 0,
+ }},
+ }, {
+ // Get nonce value of zero from mpool
+ name: "mpool nonce zero",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ mpoolNonce: [1]uint64{0},
+ expNonce: 0,
+ }},
+ }, {
+ // Get non-zero nonce value from mpool
+ name: "mpool nonce set",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ mpoolNonce: [1]uint64{5},
+ expNonce: 5,
+ }, {
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ // Should adjust datastore nonce because mpool nonce is higher
+ mpoolNonce: [1]uint64{10},
+ expNonce: 10,
+ }},
+ }, {
+ // Nonce should increment independently for each address
+ name: "nonce increments per address",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 0,
+ }, {
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 1,
+ }, {
+ msg: &types.Message{
+ To: to2,
+ From: from2,
+ },
+ mpoolNonce: [1]uint64{5},
+ expNonce: 5,
+ }, {
+ msg: &types.Message{
+ To: to2,
+ From: from2,
+ },
+ expNonce: 6,
+ }, {
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 2,
+ }},
+ }, {
+ name: "recover from callback error",
+ msgs: []msgSpec{{
+ // No nonce yet in datastore
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 0,
+ }, {
+ // Increment nonce
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 1,
+ }, {
+ // Callback returns error
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ cbErr: xerrors.Errorf("err"),
+ }, {
+ // Callback successful, should increment nonce in datastore
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 2,
+ }},
+ }}
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ mpool := newMockMpool()
+ ds := ds_sync.MutexWrap(datastore.NewMapDatastore())
+ ms := newMessageSigner(w, mpool, ds)
+
+ for _, m := range tt.msgs {
+ if len(m.mpoolNonce) == 1 {
+ mpool.setNonce(m.msg.From, m.mpoolNonce[0])
+ }
+ merr := m.cbErr
+ smsg, err := ms.SignMessage(ctx, m.msg, func(message *types.SignedMessage) error {
+ return merr
+ })
+
+ if m.cbErr != nil {
+ require.Error(t, err)
+ require.Nil(t, smsg)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, m.expNonce, smsg.Message.Nonce)
+ }
+ }
+ })
+ }
+}
diff --git a/chain/metrics/consensus.go b/chain/metrics/consensus.go
index 25e299247..c3c4a10d1 100644
--- a/chain/metrics/consensus.go
+++ b/chain/metrics/consensus.go
@@ -4,7 +4,7 @@ import (
"context"
"encoding/json"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
diff --git a/chain/state/statetree.go b/chain/state/statetree.go
index c083f1817..e9b76ea77 100644
--- a/chain/state/statetree.go
+++ b/chain/state/statetree.go
@@ -1,13 +1,10 @@
package state
import (
+ "bytes"
"context"
"fmt"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
-
- "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
@@ -15,6 +12,13 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -22,8 +26,10 @@ var log = logging.Logger("statetree")
// StateTree stores actors state by their ID.
type StateTree struct {
- root *adt.Map
- Store cbor.IpldStore
+ root adt.Map
+ version types.StateTreeVersion
+ info cid.Cid
+ Store cbor.IpldStore
snaps *stateSnaps
}
@@ -115,29 +121,87 @@ func (ss *stateSnaps) deleteActor(addr address.Address) {
ss.layers[len(ss.layers)-1].actors[addr] = streeOp{Delete: true}
}
-func NewStateTree(cst cbor.IpldStore) (*StateTree, error) {
-
- return &StateTree{
- root: adt.MakeEmptyMap(adt.WrapStore(context.TODO(), cst)),
- Store: cst,
- snaps: newStateSnaps(),
- }, nil
+// VersionForNetwork returns the state tree version for the given network
+// version.
+func VersionForNetwork(ver network.Version) types.StateTreeVersion {
+ if actors.VersionForNetwork(ver) == actors.Version0 {
+ return types.StateTreeVersion0
+ }
+ return types.StateTreeVersion1
}
-func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
- nd, err := adt.AsMap(adt.WrapStore(context.TODO(), cst), c)
+func adtForSTVersion(ver types.StateTreeVersion) actors.Version {
+ switch ver {
+ case types.StateTreeVersion0:
+ return actors.Version0
+ case types.StateTreeVersion1:
+ return actors.Version2
+ default:
+ panic("unhandled state tree version")
+ }
+}
+
+func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, error) {
+ var info cid.Cid
+ switch ver {
+ case types.StateTreeVersion0:
+ // info is undefined
+ case types.StateTreeVersion1:
+ var err error
+ info, err = cst.Put(context.TODO(), new(types.StateInfo0))
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
+ }
+ root, err := adt.NewMap(adt.WrapStore(context.TODO(), cst), adtForSTVersion(ver))
if err != nil {
- log.Errorf("loading hamt node %s failed: %s", c, err)
return nil, err
}
return &StateTree{
- root: nd,
- Store: cst,
- snaps: newStateSnaps(),
+ root: root,
+ info: info,
+ version: ver,
+ Store: cst,
+ snaps: newStateSnaps(),
}, nil
}
+func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
+ var root types.StateRoot
+ // Try loading as a new-style state-tree (version/actors tuple).
+ if err := cst.Get(context.TODO(), c, &root); err != nil {
+ // We failed to decode as the new version, must be an old version.
+ root.Actors = c
+ root.Version = types.StateTreeVersion0
+ }
+
+ switch root.Version {
+ case types.StateTreeVersion0, types.StateTreeVersion1:
+ // Load the actual state-tree HAMT.
+ nd, err := adt.AsMap(
+ adt.WrapStore(context.TODO(), cst), root.Actors,
+ adtForSTVersion(root.Version),
+ )
+ if err != nil {
+ log.Errorf("loading hamt node %s failed: %s", c, err)
+ return nil, err
+ }
+
+ return &StateTree{
+ root: nd,
+ info: root.Info,
+ version: root.Version,
+ Store: cst,
+ snaps: newStateSnaps(),
+ }, nil
+ default:
+ return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version)
+ }
+}
+
func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
iaddr, err := st.LookupID(addr)
if err != nil {
@@ -160,17 +224,17 @@ func (st *StateTree) LookupID(addr address.Address) (address.Address, error) {
return resa, nil
}
- act, err := st.GetActor(builtin.InitActorAddr)
+ act, err := st.GetActor(init_.Address)
if err != nil {
return address.Undef, xerrors.Errorf("getting init actor: %w", err)
}
- var ias init_.State
- if err := st.Store.Get(context.TODO(), act.Head, &ias); err != nil {
+ ias, err := init_.Load(&AdtStore{st.Store}, act)
+ if err != nil {
return address.Undef, xerrors.Errorf("loading init actor state: %w", err)
}
- a, found, err := ias.ResolveAddress(&AdtStore{st.Store}, addr)
+ a, found, err := ias.ResolveAddress(addr)
if err == nil && !found {
err = types.ErrActorNotFound
}
@@ -209,7 +273,7 @@ func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) {
}
var act types.Actor
- if found, err := st.root.Get(adt.AddrKey(addr), &act); err != nil {
+ if found, err := st.root.Get(abi.AddrKey(addr), &act); err != nil {
return nil, xerrors.Errorf("hamt find failed: %w", err)
} else if !found {
return nil, types.ErrActorNotFound
@@ -254,17 +318,26 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
for addr, sto := range st.snaps.layers[0].actors {
if sto.Delete {
- if err := st.root.Delete(adt.AddrKey(addr)); err != nil {
+ if err := st.root.Delete(abi.AddrKey(addr)); err != nil {
return cid.Undef, err
}
} else {
- if err := st.root.Put(adt.AddrKey(addr), &sto.Act); err != nil {
+ if err := st.root.Put(abi.AddrKey(addr), &sto.Act); err != nil {
return cid.Undef, err
}
}
}
- return st.root.Root()
+ root, err := st.root.Root()
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to flush state-tree hamt: %w", err)
+ }
+ // If we're version 0, return a raw tree.
+ if st.version == types.StateTreeVersion0 {
+ return root, nil
+ }
+ // Otherwise, return a versioned tree.
+ return st.Store.Put(ctx, &types.StateRoot{Version: st.version, Actors: root, Info: st.info})
}
func (st *StateTree) Snapshot(ctx context.Context) error {
@@ -282,19 +355,19 @@ func (st *StateTree) ClearSnapshot() {
func (st *StateTree) RegisterNewAddress(addr address.Address) (address.Address, error) {
var out address.Address
- err := st.MutateActor(builtin.InitActorAddr, func(initact *types.Actor) error {
- var ias init_.State
- if err := st.Store.Get(context.TODO(), initact.Head, &ias); err != nil {
+ err := st.MutateActor(init_.Address, func(initact *types.Actor) error {
+ ias, err := init_.Load(&AdtStore{st.Store}, initact)
+ if err != nil {
return err
}
- oaddr, err := ias.MapAddressToNewID(&AdtStore{st.Store}, addr)
+ oaddr, err := ias.MapAddressToNewID(addr)
if err != nil {
return err
}
out = oaddr
- ncid, err := st.Store.Put(context.TODO(), &ias)
+ ncid, err := st.Store.Put(context.TODO(), ias)
if err != nil {
return err
}
@@ -340,6 +413,7 @@ func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) erro
func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error {
var act types.Actor
return st.root.ForEach(&act, func(k string) error {
+ act := act // copy
addr, err := address.NewFromBytes([]byte(k))
if err != nil {
return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err)
@@ -348,3 +422,49 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error
return f(addr, &act)
})
}
+
+// Version returns the version of the StateTree data structure in use.
+func (st *StateTree) Version() types.StateTreeVersion {
+ return st.version
+}
+
+func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) {
+ out := map[string]types.Actor{}
+
+ var (
+ ncval, ocval cbg.Deferred
+ buf = bytes.NewReader(nil)
+ )
+ if err := newTree.root.ForEach(&ncval, func(k string) error {
+ var act types.Actor
+
+ addr, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return xerrors.Errorf("address in state tree was not valid: %w", err)
+ }
+
+ found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval)
+ if err != nil {
+ return err
+ }
+
+ if found && bytes.Equal(ocval.Raw, ncval.Raw) {
+ return nil // not changed
+ }
+
+ buf.Reset(ncval.Raw)
+ err = act.UnmarshalCBOR(buf)
+ buf.Reset(nil)
+
+ if err != nil {
+ return err
+ }
+
+ out[addr.String()] = act
+
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go
index e45090d1a..ed1fb1889 100644
--- a/chain/state/statetree_test.go
+++ b/chain/state/statetree_test.go
@@ -5,17 +5,20 @@ import (
"fmt"
"testing"
- "github.com/filecoin-project/specs-actors/actors/builtin"
-
- address "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
+
+ address "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
)
func BenchmarkStateTreeSet(b *testing.B) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst)
+ st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
if err != nil {
b.Fatal(err)
}
@@ -42,7 +45,7 @@ func BenchmarkStateTreeSet(b *testing.B) {
func BenchmarkStateTreeSetFlush(b *testing.B) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst)
+ st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
if err != nil {
b.Fatal(err)
}
@@ -72,7 +75,7 @@ func BenchmarkStateTreeSetFlush(b *testing.B) {
func BenchmarkStateTree10kGetActor(b *testing.B) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst)
+ st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
if err != nil {
b.Fatal(err)
}
@@ -114,7 +117,7 @@ func BenchmarkStateTree10kGetActor(b *testing.B) {
func TestSetCache(t *testing.T) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst)
+ st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
if err != nil {
t.Fatal(err)
}
@@ -151,7 +154,7 @@ func TestSetCache(t *testing.T) {
func TestSnapshots(t *testing.T) {
ctx := context.Background()
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst)
+ st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
if err != nil {
t.Fatal(err)
}
@@ -234,7 +237,8 @@ func assertNotHas(t *testing.T, st *StateTree, addr address.Address) {
func TestStateTreeConsistency(t *testing.T) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst)
+ // TODO: ActorUpgrade: this test tests pre actors v2
+ st, err := NewStateTree(cst, VersionForNetwork(network.Version3))
if err != nil {
t.Fatal(err)
}
diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go
index b21fb75f2..df3bfa357 100644
--- a/chain/stmgr/call.go
+++ b/chain/stmgr/call.go
@@ -2,11 +2,11 @@ package stmgr
import (
"context"
+ "errors"
"fmt"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/ipfs/go-cid"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@@ -18,21 +18,55 @@ import (
"github.com/filecoin-project/lotus/chain/vm"
)
-func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate cid.Cid, r vm.Rand, bheight abi.ChainEpoch) (*api.InvocResult, error) {
- ctx, span := trace.StartSpan(ctx, "statemanager.CallRaw")
+var ErrExpensiveFork = errors.New("refusing explicit call due to state fork at epoch")
+
+func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) {
+ ctx, span := trace.StartSpan(ctx, "statemanager.Call")
defer span.End()
+ // If no tipset is provided, try to find one without a fork.
+ if ts == nil {
+ ts = sm.cs.GetHeaviestTipSet()
+
+ // Search back till we find a height with no fork, or we reach the beginning.
+ for ts.Height() > 0 && sm.hasExpensiveFork(ctx, ts.Height()-1) {
+ var err error
+ ts, err = sm.cs.GetTipSetFromKey(ts.Parents())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
+ }
+ }
+ }
+
+ bstate := ts.ParentState()
+ bheight := ts.Height()
+
+ // If we have to run an expensive migration, and we're not at genesis,
+ // return an error because the migration will take too long.
+ //
+ // We allow this at height 0 for at-genesis migrations (for testing).
+ if bheight-1 > 0 && sm.hasExpensiveFork(ctx, bheight-1) {
+ return nil, ErrExpensiveFork
+ }
+
+ // Run the (not expensive) migration.
+ bstate, err := sm.handleStateForks(ctx, bstate, bheight-1, nil, ts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to handle fork: %w", err)
+ }
+
vmopt := &vm.VMOpts{
StateBase: bstate,
Epoch: bheight,
- Rand: r,
+ Rand: store.NewChainRand(sm.cs, ts.Cids()),
Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetCirculatingSupply,
+ NtwkVersion: sm.GetNtwkVersion,
BaseFee: types.NewInt(0),
}
- vmi, err := vm.NewVM(vmopt)
+ vmi, err := sm.newVM(ctx, vmopt)
if err != nil {
return nil, xerrors.Errorf("failed to set up vm: %w", err)
}
@@ -88,24 +122,30 @@ func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate
}
-func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) {
- if ts == nil {
- ts = sm.cs.GetHeaviestTipSet()
- }
-
- state := ts.ParentState()
-
- r := store.NewChainRand(sm.cs, ts.Cids())
-
- return sm.CallRaw(ctx, msg, state, r, ts.Height())
-}
-
func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet) (*api.InvocResult, error) {
ctx, span := trace.StartSpan(ctx, "statemanager.CallWithGas")
defer span.End()
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
+
+ // Search back till we find a height with no fork, or we reach the beginning.
+ // We need the _previous_ height to have no fork, because we'll
+ // run the fork logic in `sm.TipSetState`. We need the _current_
+ // height to have no fork, because we'll run it inside this
+ // function before executing the given message.
+ for ts.Height() > 0 && (sm.hasExpensiveFork(ctx, ts.Height()) || sm.hasExpensiveFork(ctx, ts.Height()-1)) {
+ var err error
+ ts, err = sm.cs.GetTipSetFromKey(ts.Parents())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err)
+ }
+ }
+ }
+
+ // When we're not at the genesis block, make sure we don't have an expensive migration.
+ if ts.Height() > 0 && (sm.hasExpensiveFork(ctx, ts.Height()) || sm.hasExpensiveFork(ctx, ts.Height()-1)) {
+ return nil, ErrExpensiveFork
}
state, _, err := sm.TipSetState(ctx, ts)
@@ -113,6 +153,11 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
return nil, xerrors.Errorf("computing tipset state: %w", err)
}
+ state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to handle fork: %w", err)
+ }
+
r := store.NewChainRand(sm.cs, ts.Cids())
if span.IsRecordingEvents() {
@@ -130,9 +175,10 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetCirculatingSupply,
+ NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,
}
- vmi, err := vm.NewVM(vmopt)
+ vmi, err := sm.newVM(ctx, vmopt)
if err != nil {
return nil, xerrors.Errorf("failed to set up vm: %w", err)
}
diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go
index c7c7526b3..a61f70b44 100644
--- a/chain/stmgr/forks.go
+++ b/chain/stmgr/forks.go
@@ -1,22 +1,866 @@
package stmgr
import (
+ "bytes"
"context"
+ "encoding/binary"
+ "math"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/ipfs/go-cid"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ "golang.org/x/xerrors"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+
+ "github.com/filecoin-project/specs-actors/actors/migration/nv3"
+ m2 "github.com/filecoin-project/specs-actors/v2/actors/migration"
+ states2 "github.com/filecoin-project/specs-actors/v2/actors/states"
+
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/lotus/chain/vm"
+ bstore "github.com/filecoin-project/lotus/lib/blockstore"
+ "github.com/filecoin-project/lotus/lib/bufbstore"
)
-var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, types.StateTree) error{}
+// UpgradeFunc is a migration function run at every upgrade.
+//
+// - The oldState is the state produced by the upgrade epoch.
+// - The returned newState is the new state that will be used by the next epoch.
+// - The height is the upgrade epoch height (already executed).
+// - The tipset is the tipset for the last non-null block before the upgrade. Do
+// not assume that ts.Height() is the upgrade height.
+type UpgradeFunc func(ctx context.Context, sm *StateManager, cb ExecCallback, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error)
-func (sm *StateManager) handleStateForks(ctx context.Context, st types.StateTree, height abi.ChainEpoch) (err error) {
- f, ok := ForksAtHeight[height]
+type Upgrade struct {
+ Height abi.ChainEpoch
+ Network network.Version
+ Expensive bool
+ Migration UpgradeFunc
+}
+
+type UpgradeSchedule []Upgrade
+
+func DefaultUpgradeSchedule() UpgradeSchedule {
+ var us UpgradeSchedule
+
+ updates := []Upgrade{{
+ Height: build.UpgradeBreezeHeight,
+ Network: network.Version1,
+ Migration: UpgradeFaucetBurnRecovery,
+ }, {
+ Height: build.UpgradeSmokeHeight,
+ Network: network.Version2,
+ Migration: nil,
+ }, {
+ Height: build.UpgradeIgnitionHeight,
+ Network: network.Version3,
+ Migration: UpgradeIgnition,
+ }, {
+ Height: build.UpgradeRefuelHeight,
+ Network: network.Version3,
+ Migration: UpgradeRefuel,
+ }, {
+ Height: build.UpgradeActorsV2Height,
+ Network: network.Version4,
+ Expensive: true,
+ Migration: UpgradeActorsV2,
+ }, {
+ Height: build.UpgradeLiftoffHeight,
+ Network: network.Version4,
+ Migration: UpgradeLiftoff,
+ }}
+
+ if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade
+ updates = []Upgrade{{
+ Height: build.UpgradeBreezeHeight,
+ Network: network.Version1,
+ Migration: UpgradeFaucetBurnRecovery,
+ }, {
+ Height: build.UpgradeSmokeHeight,
+ Network: network.Version2,
+ Migration: nil,
+ }, {
+ Height: build.UpgradeIgnitionHeight,
+ Network: network.Version3,
+ Migration: UpgradeIgnition,
+ }, {
+ Height: build.UpgradeRefuelHeight,
+ Network: network.Version3,
+ Migration: UpgradeRefuel,
+ }, {
+ Height: build.UpgradeLiftoffHeight,
+ Network: network.Version3,
+ Migration: UpgradeLiftoff,
+ }}
+ }
+
+ for _, u := range updates {
+ if u.Height < 0 {
+ // upgrade disabled
+ continue
+ }
+ us = append(us, u)
+ }
+ return us
+}
+
+func (us UpgradeSchedule) Validate() error {
+ // Make sure we're not trying to upgrade to version 0.
+ for _, u := range us {
+ if u.Network <= 0 {
+ return xerrors.Errorf("cannot upgrade to version <= 0: %d", u.Network)
+ }
+ }
+
+ // Make sure all the upgrades make sense.
+ for i := 1; i < len(us); i++ {
+ prev := &us[i-1]
+ curr := &us[i]
+ if !(prev.Network <= curr.Network) {
+ return xerrors.Errorf("cannot downgrade from version %d to version %d", prev.Network, curr.Network)
+ }
+ // Make sure the heights make sense.
+ if prev.Height < 0 {
+ // Previous upgrade was disabled.
+ continue
+ }
+ if !(prev.Height < curr.Height) {
+ return xerrors.Errorf("upgrade heights must be strictly increasing: upgrade %d was at height %d, followed by upgrade %d at height %d", i-1, prev.Height, i, curr.Height)
+ }
+ }
+ return nil
+}
+
+func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) {
+ retCid := root
+ var err error
+ f, ok := sm.stateMigrations[height]
if ok {
- err := f(ctx, sm, st)
+ retCid, err = f(ctx, sm, cb, root, height, ts)
if err != nil {
- return err
+ return cid.Undef, err
+ }
+ }
+
+ return retCid, nil
+}
+
+func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEpoch) bool {
+ _, ok := sm.expensiveUpgrades[height]
+ return ok
+}
+
+func doTransfer(cb ExecCallback, tree types.StateTree, from, to address.Address, amt abi.TokenAmount) error {
+ fromAct, err := tree.GetActor(from)
+ if err != nil {
+ return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err)
+ }
+
+ fromAct.Balance = types.BigSub(fromAct.Balance, amt)
+ if fromAct.Balance.Sign() < 0 {
+ return xerrors.Errorf("(sanity) deducted more funds from target account than it had (%s, %s)", from, types.FIL(amt))
+ }
+
+ if err := tree.SetActor(from, fromAct); err != nil {
+ return xerrors.Errorf("failed to persist from actor: %w", err)
+ }
+
+ toAct, err := tree.GetActor(to)
+ if err != nil {
+ return xerrors.Errorf("failed to get 'to' actor for transfer: %w", err)
+ }
+
+ toAct.Balance = types.BigAdd(toAct.Balance, amt)
+
+ if err := tree.SetActor(to, toAct); err != nil {
+ return xerrors.Errorf("failed to persist to actor: %w", err)
+ }
+
+ if cb != nil {
+ // record the transfer in execution traces
+
+ fakeMsg := &types.Message{
+ From: from,
+ To: to,
+ Value: amt,
+ Nonce: math.MaxUint64,
+ }
+ fakeRct := &types.MessageReceipt{
+ ExitCode: 0,
+ Return: nil,
+ GasUsed: 0,
+ }
+
+ if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
+ MessageReceipt: *fakeRct,
+ ActorErr: nil,
+ ExecutionTrace: types.ExecutionTrace{
+ Msg: fakeMsg,
+ MsgRct: fakeRct,
+ Error: "",
+ Duration: 0,
+ GasCharges: nil,
+ Subcalls: nil,
+ },
+ Duration: 0,
+ GasCosts: vm.ZeroGasOutputs(),
+ }); err != nil {
+ return xerrors.Errorf("recording transfer: %w", err)
}
}
return nil
}
+
+func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ // Some initial parameters
+ FundsForMiners := types.FromFil(1_000_000)
+ LookbackEpoch := abi.ChainEpoch(32000)
+ AccountCap := types.FromFil(0)
+ BaseMinerBalance := types.FromFil(20)
+ DesiredReimbursementBalance := types.FromFil(5_000_000)
+
+ isSystemAccount := func(addr address.Address) (bool, error) {
+ id, err := address.IDFromAddress(addr)
+ if err != nil {
+ return false, xerrors.Errorf("id address: %w", err)
+ }
+
+ if id < 1000 {
+ return true, nil
+ }
+ return false, nil
+ }
+
+ minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount {
+ return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow)
+ }
+
+ // Grab lookback state for account checks
+ lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err)
+ }
+
+ lbtree, err := sm.ParentState(lbts)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err)
+ }
+
+ ReserveAddress, err := address.NewFromString("t090")
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to parse reserve address: %w", err)
+ }
+
+ tree, err := sm.StateTree(root)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+ }
+
+ type transfer struct {
+ From address.Address
+ To address.Address
+ Amt abi.TokenAmount
+ }
+
+ var transfers []transfer
+
+ // Take all excess funds away, put them into the reserve account
+ err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+ switch act.Code {
+ case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
+ sysAcc, err := isSystemAccount(addr)
+ if err != nil {
+ return xerrors.Errorf("checking system account: %w", err)
+ }
+
+ if !sysAcc {
+ transfers = append(transfers, transfer{
+ From: addr,
+ To: ReserveAddress,
+ Amt: act.Balance,
+ })
+ }
+ case builtin0.StorageMinerActorCodeID:
+ var st miner0.State
+ if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil {
+ return xerrors.Errorf("failed to load miner state: %w", err)
+ }
+
+ var available abi.TokenAmount
+ {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err)
+ }
+ available = abi.NewTokenAmount(0)
+ }()
+ // this panics if the miner doesnt have enough funds to cover their locked pledge
+ available = st.GetAvailableBalance(act.Balance)
+ }
+
+ transfers = append(transfers, transfer{
+ From: addr,
+ To: ReserveAddress,
+ Amt: available,
+ })
+ }
+ return nil
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
+ }
+
+ // Execute transfers from previous step
+ for _, t := range transfers {
+ if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil {
+ return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
+ }
+ }
+
+ // pull up power table to give miners back some funds proportional to their power
+ var ps power0.State
+ powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err)
+ }
+
+ cst := cbor.NewCborStore(sm.ChainStore().Blockstore())
+ if err := cst.Get(ctx, powAct.Head, &ps); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err)
+ }
+
+ totalPower := ps.TotalBytesCommitted
+
+ var transfersBack []transfer
+ // Now, we return some funds to places where they are needed
+ err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+ lbact, err := lbtree.GetActor(addr)
+ if err != nil {
+ if !xerrors.Is(err, types.ErrActorNotFound) {
+ return xerrors.Errorf("failed to get actor in lookback state")
+ }
+ }
+
+ prevBalance := abi.NewTokenAmount(0)
+ if lbact != nil {
+ prevBalance = lbact.Balance
+ }
+
+ switch act.Code {
+ case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
+ nbalance := big.Min(prevBalance, AccountCap)
+ if nbalance.Sign() != 0 {
+ transfersBack = append(transfersBack, transfer{
+ From: ReserveAddress,
+ To: addr,
+ Amt: nbalance,
+ })
+ }
+ case builtin0.StorageMinerActorCodeID:
+ var st miner0.State
+ if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil {
+ return xerrors.Errorf("failed to load miner state: %w", err)
+ }
+
+ var minfo miner0.MinerInfo
+ if err := cst.Get(ctx, st.Info, &minfo); err != nil {
+ return xerrors.Errorf("failed to get miner info: %w", err)
+ }
+
+ sectorsArr, err := adt0.AsArray(sm.ChainStore().Store(ctx), st.Sectors)
+ if err != nil {
+ return xerrors.Errorf("failed to load sectors array: %w", err)
+ }
+
+ slen := sectorsArr.Length()
+
+ power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize)))
+
+ mfunds := minerFundsAlloc(power, totalPower)
+ transfersBack = append(transfersBack, transfer{
+ From: ReserveAddress,
+ To: minfo.Worker,
+ Amt: mfunds,
+ })
+
+ // Now make sure to give each miner who had power at the lookback some FIL
+ lbact, err := lbtree.GetActor(addr)
+ if err == nil {
+ var lbst miner0.State
+ if err := sm.ChainStore().Store(ctx).Get(ctx, lbact.Head, &lbst); err != nil {
+ return xerrors.Errorf("failed to load miner state: %w", err)
+ }
+
+ lbsectors, err := adt0.AsArray(sm.ChainStore().Store(ctx), lbst.Sectors)
+ if err != nil {
+ return xerrors.Errorf("failed to load lb sectors array: %w", err)
+ }
+
+ if lbsectors.Length() > 0 {
+ transfersBack = append(transfersBack, transfer{
+ From: ReserveAddress,
+ To: minfo.Worker,
+ Amt: BaseMinerBalance,
+ })
+ }
+
+ } else {
+ log.Warnf("failed to get miner in lookback state: %s", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
+ }
+
+ for _, t := range transfersBack {
+ if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil {
+ return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
+ }
+ }
+
+ // transfer all burnt funds back to the reserve account
+ burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
+ }
+ if err := doTransfer(cb, tree, builtin0.BurntFundsActorAddr, ReserveAddress, burntAct.Balance); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
+ }
+
+ // Top up the reimbursement service
+ reimbAddr, err := address.NewFromString("t0111")
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address")
+ }
+
+ reimb, err := tree.GetActor(reimbAddr)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err)
+ }
+
+ difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
+ if err := doTransfer(cb, tree, ReserveAddress, reimbAddr, difference); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
+ }
+
+ // Now, a final sanity check to make sure the balances all check out
+ total := abi.NewTokenAmount(0)
+ err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+ total = types.BigAdd(total, act.Balance)
+ return nil
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err)
+ }
+
+ exp := types.FromFil(build.FilBase)
+ if !exp.Equals(total) {
+ return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
+ }
+
+ return tree.Flush(ctx)
+}
+
+func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ store := sm.cs.Store(ctx)
+
+ if build.UpgradeLiftoffHeight <= epoch {
+ return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
+ }
+
+ nst, err := nv3.MigrateStateTree(ctx, store, root, epoch)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("migrating actors state: %w", err)
+ }
+
+ tree, err := sm.StateTree(nst)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+ }
+
+ err = setNetworkName(ctx, store, tree, "ignition")
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("setting network name: %w", err)
+ }
+
+ split1, err := address.NewFromString("t0115")
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("first split address: %w", err)
+ }
+
+ split2, err := address.NewFromString("t0116")
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("second split address: %w", err)
+ }
+
+ err = resetGenesisMsigs(ctx, sm, store, tree, build.UpgradeLiftoffHeight)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err)
+ }
+
+ err = splitGenesisMultisig(ctx, cb, split1, store, tree, 50)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("splitting first msig: %w", err)
+ }
+
+ err = splitGenesisMultisig(ctx, cb, split2, store, tree, 50)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("splitting second msig: %w", err)
+ }
+
+ err = nv3.CheckStateTree(ctx, store, nst, epoch, builtin0.TotalFilecoin)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err)
+ }
+
+ return tree.Flush(ctx)
+}
+
+func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+
+ store := sm.cs.Store(ctx)
+ tree, err := sm.StateTree(root)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+ }
+
+ addr, err := address.NewFromString("t0122")
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting address: %w", err)
+ }
+
+ err = resetMultisigVesting(ctx, store, tree, addr, 0, 0, big.Zero())
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
+ }
+
+ err = resetMultisigVesting(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero())
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
+ }
+
+ err = resetMultisigVesting(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero())
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
+ }
+
+ return tree.Flush(ctx)
+}
+
+func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync())
+ store := store.ActorStore(ctx, buf)
+
+ info, err := store.Put(ctx, new(types.StateInfo0))
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
+ }
+
+ newHamtRoot, err := m2.MigrateStateTree(ctx, store, root, epoch, m2.DefaultConfig())
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
+ }
+
+ newStateTree, err := states2.LoadTree(store, newHamtRoot)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to load new state tree: %w", err)
+ }
+
+ // Check all state-tree invariants.
+ if msgs, err := states2.CheckStateInvariants(newStateTree, types.TotalFilecoinInt); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to check new state tree: %w", err)
+ } else if !msgs.IsEmpty() {
+ // This error is going to be really nasty.
+ return cid.Undef, xerrors.Errorf("network upgrade failed: %v", msgs.Messages())
+ }
+
+ newRoot, err := store.Put(ctx, &types.StateRoot{
+ Version: types.StateTreeVersion1,
+ Actors: newHamtRoot,
+ Info: info,
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+ }
+
+ // perform some basic sanity checks to make sure everything still works.
+ if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
+ } else if newRoot2, err := newSm.Flush(ctx); err != nil {
+ return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
+ } else if newRoot2 != newRoot {
+ return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
+ } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
+ }
+
+ {
+ from := buf
+ to := buf.Read()
+
+ if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+ }
+ }
+
+ return newRoot, nil
+}
+
+func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ tree, err := sm.StateTree(root)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+ }
+
+ err = setNetworkName(ctx, sm.cs.Store(ctx), tree, "mainnet")
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("setting network name: %w", err)
+ }
+
+ return tree.Flush(ctx)
+}
+
+func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
+ ia, err := tree.GetActor(builtin0.InitActorAddr)
+ if err != nil {
+ return xerrors.Errorf("getting init actor: %w", err)
+ }
+
+ initState, err := init_.Load(store, ia)
+ if err != nil {
+ return xerrors.Errorf("reading init state: %w", err)
+ }
+
+ if err := initState.SetNetworkName(name); err != nil {
+ return xerrors.Errorf("setting network name: %w", err)
+ }
+
+ ia.Head, err = store.Put(ctx, initState)
+ if err != nil {
+ return xerrors.Errorf("writing new init state: %w", err)
+ }
+
+ if err := tree.SetActor(builtin0.InitActorAddr, ia); err != nil {
+ return xerrors.Errorf("setting init actor: %w", err)
+ }
+
+ return nil
+}
+
+func splitGenesisMultisig(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64) error {
+ if portions < 1 {
+ return xerrors.Errorf("cannot split into 0 portions")
+ }
+
+ mact, err := tree.GetActor(addr)
+ if err != nil {
+ return xerrors.Errorf("getting msig actor: %w", err)
+ }
+
+ mst, err := multisig.Load(store, mact)
+ if err != nil {
+ return xerrors.Errorf("getting msig state: %w", err)
+ }
+
+ signers, err := mst.Signers()
+ if err != nil {
+ return xerrors.Errorf("getting msig signers: %w", err)
+ }
+
+ thresh, err := mst.Threshold()
+ if err != nil {
+ return xerrors.Errorf("getting msig threshold: %w", err)
+ }
+
+ ibal, err := mst.InitialBalance()
+ if err != nil {
+ return xerrors.Errorf("getting msig initial balance: %w", err)
+ }
+
+ se, err := mst.StartEpoch()
+ if err != nil {
+ return xerrors.Errorf("getting msig start epoch: %w", err)
+ }
+
+ ud, err := mst.UnlockDuration()
+ if err != nil {
+ return xerrors.Errorf("getting msig unlock duration: %w", err)
+ }
+
+ pending, err := adt0.MakeEmptyMap(store).Root()
+ if err != nil {
+ return xerrors.Errorf("failed to create empty map: %w", err)
+ }
+
+ newIbal := big.Div(ibal, types.NewInt(portions))
+ newState := &multisig0.State{
+ Signers: signers,
+ NumApprovalsThreshold: thresh,
+ NextTxnID: 0,
+ InitialBalance: newIbal,
+ StartEpoch: se,
+ UnlockDuration: ud,
+ PendingTxns: pending,
+ }
+
+ scid, err := store.Put(ctx, newState)
+ if err != nil {
+ return xerrors.Errorf("storing new state: %w", err)
+ }
+
+ newActor := types.Actor{
+ Code: builtin0.MultisigActorCodeID,
+ Head: scid,
+ Nonce: 0,
+ Balance: big.Zero(),
+ }
+
+ i := uint64(0)
+ for i < portions {
+ keyAddr, err := makeKeyAddr(addr, i)
+ if err != nil {
+ return xerrors.Errorf("creating key address: %w", err)
+ }
+
+ idAddr, err := tree.RegisterNewAddress(keyAddr)
+ if err != nil {
+ return xerrors.Errorf("registering new address: %w", err)
+ }
+
+ err = tree.SetActor(idAddr, &newActor)
+ if err != nil {
+ return xerrors.Errorf("setting new msig actor state: %w", err)
+ }
+
+ if err := doTransfer(cb, tree, addr, idAddr, newIbal); err != nil {
+ return xerrors.Errorf("transferring split msig balance: %w", err)
+ }
+
+ i++
+ }
+
+ return nil
+}
+
+func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) {
+ var b bytes.Buffer
+ if err := splitAddr.MarshalCBOR(&b); err != nil {
+ return address.Undef, xerrors.Errorf("marshalling split address: %w", err)
+ }
+
+ if err := binary.Write(&b, binary.BigEndian, count); err != nil {
+ return address.Undef, xerrors.Errorf("writing count into a buffer: %w", err)
+ }
+
+ if err := binary.Write(&b, binary.BigEndian, []byte("Ignition upgrade")); err != nil {
+ return address.Undef, xerrors.Errorf("writing fork name into a buffer: %w", err)
+ }
+
+ addr, err := address.NewActorAddress(b.Bytes())
+ if err != nil {
+ return address.Undef, xerrors.Errorf("create actor address: %w", err)
+ }
+
+ return addr, nil
+}
+
+// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
+func resetGenesisMsigs(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
+ gb, err := sm.cs.GetGenesis()
+ if err != nil {
+ return xerrors.Errorf("getting genesis block: %w", err)
+ }
+
+ gts, err := types.NewTipSet([]*types.BlockHeader{gb})
+ if err != nil {
+ return xerrors.Errorf("getting genesis tipset: %w", err)
+ }
+
+ cst := cbor.NewCborStore(sm.cs.Blockstore())
+ genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
+ if err != nil {
+ return xerrors.Errorf("loading state tree: %w", err)
+ }
+
+ err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error {
+ if genesisActor.Code == builtin0.MultisigActorCodeID {
+ currActor, err := tree.GetActor(addr)
+ if err != nil {
+ return xerrors.Errorf("loading actor: %w", err)
+ }
+
+ var currState multisig0.State
+ if err := store.Get(ctx, currActor.Head, &currState); err != nil {
+ return xerrors.Errorf("reading multisig state: %w", err)
+ }
+
+ currState.StartEpoch = startEpoch
+
+ currActor.Head, err = store.Put(ctx, &currState)
+ if err != nil {
+ return xerrors.Errorf("writing new multisig state: %w", err)
+ }
+
+ if err := tree.SetActor(addr, currActor); err != nil {
+ return xerrors.Errorf("setting multisig actor: %w", err)
+ }
+ }
+ return nil
+ })
+
+ if err != nil {
+ return xerrors.Errorf("iterating over genesis actors: %w", err)
+ }
+
+ return nil
+}
+
+func resetMultisigVesting(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error {
+ act, err := tree.GetActor(addr)
+ if err != nil {
+ return xerrors.Errorf("getting actor: %w", err)
+ }
+
+ if !builtin.IsMultisigActor(act.Code) {
+ return xerrors.Errorf("actor wasn't msig: %w", err)
+ }
+
+ var msigState multisig0.State
+ if err := store.Get(ctx, act.Head, &msigState); err != nil {
+ return xerrors.Errorf("reading multisig state: %w", err)
+ }
+
+ msigState.StartEpoch = startEpoch
+ msigState.UnlockDuration = duration
+ msigState.InitialBalance = balance
+
+ act.Head, err = store.Put(ctx, &msigState)
+ if err != nil {
+ return xerrors.Errorf("writing new multisig state: %w", err)
+ }
+
+ if err := tree.SetActor(addr, act); err != nil {
+ return xerrors.Errorf("setting multisig actor: %w", err)
+ }
+
+ return nil
+}
diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go
index e5874d51d..3687e6b34 100644
--- a/chain/stmgr/forks_test.go
+++ b/chain/stmgr/forks_test.go
@@ -7,38 +7,35 @@ import (
"testing"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/specs-actors/actors/builtin"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ init0 "github.com/filecoin-project/specs-actors/actors/builtin/init"
"github.com/filecoin-project/specs-actors/actors/runtime"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
+ lotusinit "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/gen"
- "github.com/filecoin-project/lotus/chain/stmgr"
. "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
- cbor "github.com/ipfs/go-ipld-cbor"
+ "github.com/ipfs/go-cid"
+ ipldcbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log"
cbg "github.com/whyrusleeping/cbor-gen"
)
func init() {
- miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
- power.ConsensusMinerMinPower = big.NewInt(2048)
- verifreg.MinVerifiedDealSize = big.NewInt(256)
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
const testForkHeight = 40
@@ -46,6 +43,10 @@ const testForkHeight = 40
type testActor struct {
}
+// must use existing actor that an account is allowed to exec.
+func (testActor) Code() cid.Cid { return builtin.PaymentChannelActorCodeID }
+func (testActor) State() cbor.Er { return new(testActorState) }
+
type testActorState struct {
HasUpgraded uint64
}
@@ -66,25 +67,25 @@ func (tas *testActorState) UnmarshalCBOR(r io.Reader) error {
return nil
}
-func (ta *testActor) Exports() []interface{} {
+func (ta testActor) Exports() []interface{} {
return []interface{}{
1: ta.Constructor,
2: ta.TestMethod,
}
}
-func (ta *testActor) Constructor(rt runtime.Runtime, params *adt.EmptyValue) *adt.EmptyValue {
+func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
rt.ValidateImmediateCallerAcceptAny()
- rt.State().Create(&testActorState{11})
- fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Message().Receiver())
+ rt.StateCreate(&testActorState{11})
+ //fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Receiver())
- return adt.Empty
+ return abi.Empty
}
-func (ta *testActor) TestMethod(rt runtime.Runtime, params *adt.EmptyValue) *adt.EmptyValue {
+func (ta *testActor) TestMethod(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
rt.ValidateImmediateCallerAcceptAny()
var st testActorState
- rt.State().Readonly(&st)
+ rt.StateReadonly(&st)
if rt.CurrEpoch() > testForkHeight {
if st.HasUpgraded != 55 {
@@ -96,7 +97,7 @@ func (ta *testActor) TestMethod(rt runtime.Runtime, params *adt.EmptyValue) *adt
}
}
- return adt.Empty
+ return abi.Empty
}
func TestForkHeightTriggers(t *testing.T) {
@@ -109,48 +110,59 @@ func TestForkHeightTriggers(t *testing.T) {
t.Fatal(err)
}
- sm := NewStateManager(cg.ChainStore())
-
- inv := vm.NewInvoker()
-
// predicting the address here... may break if other assumptions change
taddr, err := address.NewIDAddress(1002)
if err != nil {
t.Fatal(err)
}
- stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, st types.StateTree) error {
- cst := cbor.NewCborStore(sm.ChainStore().Blockstore())
+ sm, err := NewStateManagerWithUpgradeSchedule(
+ cg.ChainStore(), UpgradeSchedule{{
+ Network: 1,
+ Height: testForkHeight,
+ Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
+ root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ cst := ipldcbor.NewCborStore(sm.ChainStore().Blockstore())
- act, err := st.GetActor(taddr)
- if err != nil {
- return err
- }
+ st, err := sm.StateTree(root)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+ }
- var tas testActorState
- if err := cst.Get(ctx, act.Head, &tas); err != nil {
- return xerrors.Errorf("in fork handler, failed to run get: %w", err)
- }
+ act, err := st.GetActor(taddr)
+ if err != nil {
+ return cid.Undef, err
+ }
- tas.HasUpgraded = 55
+ var tas testActorState
+ if err := cst.Get(ctx, act.Head, &tas); err != nil {
+ return cid.Undef, xerrors.Errorf("in fork handler, failed to run get: %w", err)
+ }
- ns, err := cst.Put(ctx, &tas)
- if err != nil {
- return err
- }
+ tas.HasUpgraded = 55
- act.Head = ns
+ ns, err := cst.Put(ctx, &tas)
+ if err != nil {
+ return cid.Undef, err
+ }
- if err := st.SetActor(taddr, act); err != nil {
- return err
- }
+ act.Head = ns
- return nil
+ if err := st.SetActor(taddr, act); err != nil {
+ return cid.Undef, err
+ }
+
+ return st.Flush(ctx)
+ }}})
+ if err != nil {
+ t.Fatal(err)
}
- inv.Register(builtin.PaymentChannelActorCodeID, &testActor{}, &testActorState{})
- sm.SetVMConstructor(func(vmopt *vm.VMOpts) (*vm.VM, error) {
- nvm, err := vm.NewVM(vmopt)
+ inv := vm.NewActorRegistry()
+ inv.Register(nil, testActor{})
+
+ sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
+ nvm, err := vm.NewVM(ctx, vmopt)
if err != nil {
return nil, err
}
@@ -162,14 +174,14 @@ func TestForkHeightTriggers(t *testing.T) {
var msgs []*types.SignedMessage
- enc, err := actors.SerializeParams(&init_.ExecParams{CodeCID: builtin.PaymentChannelActorCodeID})
+ enc, err := actors.SerializeParams(&init0.ExecParams{CodeCID: (testActor{}).Code()})
if err != nil {
t.Fatal(err)
}
m := &types.Message{
From: cg.Banker(),
- To: builtin.InitActorAddr,
+ To: lotusinit.Address,
Method: builtin.MethodsInit.Exec,
Params: enc,
GasLimit: types.TestGasLimit,
@@ -222,3 +234,84 @@ func TestForkHeightTriggers(t *testing.T) {
}
}
}
+
+func TestForkRefuseCall(t *testing.T) {
+ logging.SetAllLoggers(logging.LevelInfo)
+
+ ctx := context.TODO()
+
+ cg, err := gen.NewGenerator()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sm, err := NewStateManagerWithUpgradeSchedule(
+ cg.ChainStore(), UpgradeSchedule{{
+ Network: 1,
+ Expensive: true,
+ Height: testForkHeight,
+ Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
+ root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ return root, nil
+ }}})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ inv := vm.NewActorRegistry()
+ inv.Register(nil, testActor{})
+
+ sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
+ nvm, err := vm.NewVM(ctx, vmopt)
+ if err != nil {
+ return nil, err
+ }
+ nvm.SetInvoker(inv)
+ return nvm, nil
+ })
+
+ cg.SetStateManager(sm)
+
+ enc, err := actors.SerializeParams(&init0.ExecParams{CodeCID: (testActor{}).Code()})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m := &types.Message{
+ From: cg.Banker(),
+ To: lotusinit.Address,
+ Method: builtin.MethodsInit.Exec,
+ Params: enc,
+ GasLimit: types.TestGasLimit,
+ Value: types.NewInt(0),
+ GasPremium: types.NewInt(0),
+ GasFeeCap: types.NewInt(0),
+ }
+
+ for i := 0; i < 50; i++ {
+ ts, err := cg.NextTipSet()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet())
+ switch ts.TipSet.TipSet().Height() {
+ case testForkHeight, testForkHeight + 1:
+ // If I had a fork, or I _will_ have a fork, it should fail.
+ require.Equal(t, ErrExpensiveFork, err)
+ default:
+ require.NoError(t, err)
+ require.True(t, ret.MsgRct.ExitCode.IsSuccess())
+ }
+ // Call just runs on the parent state for a tipset, so we only
+ // expect an error at the fork height.
+ ret, err = sm.Call(ctx, m, ts.TipSet.TipSet())
+ switch ts.TipSet.TipSet().Height() {
+ case testForkHeight + 1:
+ require.Equal(t, ErrExpensiveFork, err)
+ default:
+ require.NoError(t, err)
+ require.True(t, ret.MsgRct.ExitCode.IsSuccess())
+ }
+ }
+}
diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go
index c707b5195..9a9b80265 100644
--- a/chain/stmgr/read.go
+++ b/chain/stmgr/read.go
@@ -2,7 +2,6 @@ package stmgr
import (
"context"
- "reflect"
"golang.org/x/xerrors"
@@ -12,144 +11,56 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
)
-type StateTreeCB func(state *state.StateTree) error
-
-func (sm *StateManager) WithParentStateTsk(tsk types.TipSetKey, cb StateTreeCB) error {
+func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, error) {
ts, err := sm.cs.GetTipSetFromKey(tsk)
if err != nil {
- return xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
+ return sm.ParentState(ts)
+}
+func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error) {
cst := cbor.NewCborStore(sm.cs.Blockstore())
state, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil {
- return xerrors.Errorf("load state tree: %w", err)
+ return nil, xerrors.Errorf("load state tree: %w", err)
}
- return cb(state)
+ return state, nil
}
-func (sm *StateManager) WithParentState(ts *types.TipSet, cb StateTreeCB) error {
- cst := cbor.NewCborStore(sm.cs.Blockstore())
- state, err := state.LoadStateTree(cst, sm.parentState(ts))
- if err != nil {
- return xerrors.Errorf("load state tree: %w", err)
- }
-
- return cb(state)
-}
-
-func (sm *StateManager) WithStateTree(st cid.Cid, cb StateTreeCB) error {
+func (sm *StateManager) StateTree(st cid.Cid) (*state.StateTree, error) {
cst := cbor.NewCborStore(sm.cs.Blockstore())
state, err := state.LoadStateTree(cst, st)
if err != nil {
- return xerrors.Errorf("load state tree: %w", err)
+ return nil, xerrors.Errorf("load state tree: %w", err)
}
- return cb(state)
+ return state, nil
}
-type ActorCB func(act *types.Actor) error
-
-func GetActor(out *types.Actor) ActorCB {
- return func(act *types.Actor) error {
- *out = *act
- return nil
+func (sm *StateManager) LoadActor(_ context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, error) {
+ state, err := sm.ParentState(ts)
+ if err != nil {
+ return nil, err
}
+ return state.GetActor(addr)
}
-func (sm *StateManager) WithActor(addr address.Address, cb ActorCB) StateTreeCB {
- return func(state *state.StateTree) error {
- act, err := state.GetActor(addr)
- if err != nil {
- return xerrors.Errorf("get actor: %w", err)
- }
-
- return cb(act)
+func (sm *StateManager) LoadActorTsk(_ context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) {
+ state, err := sm.ParentStateTsk(tsk)
+ if err != nil {
+ return nil, err
}
+ return state.GetActor(addr)
}
-// WithActorState usage:
-// Option 1: WithActorState(ctx, idAddr, func(store adt.Store, st *ActorStateType) error {...})
-// Option 2: WithActorState(ctx, idAddr, actorStatePtr)
-func (sm *StateManager) WithActorState(ctx context.Context, out interface{}) ActorCB {
- return func(act *types.Actor) error {
- store := sm.cs.Store(ctx)
-
- outCallback := reflect.TypeOf(out).Kind() == reflect.Func
-
- var st reflect.Value
- if outCallback {
- st = reflect.New(reflect.TypeOf(out).In(1).Elem())
- } else {
- st = reflect.ValueOf(out)
- }
- if err := store.Get(ctx, act.Head, st.Interface()); err != nil {
- return xerrors.Errorf("read actor head: %w", err)
- }
-
- if outCallback {
- out := reflect.ValueOf(out).Call([]reflect.Value{reflect.ValueOf(store), st})
- if !out[0].IsNil() && out[0].Interface().(error) != nil {
- return out[0].Interface().(error)
- }
- }
-
- return nil
- }
-}
-
-type DeadlinesCB func(store adt.Store, deadlines *miner.Deadlines) error
-
-func (sm *StateManager) WithDeadlines(cb DeadlinesCB) func(store adt.Store, mas *miner.State) error {
- return func(store adt.Store, mas *miner.State) error {
- deadlines, err := mas.LoadDeadlines(store)
- if err != nil {
- return err
- }
-
- return cb(store, deadlines)
- }
-}
-
-type DeadlineCB func(store adt.Store, idx uint64, deadline *miner.Deadline) error
-
-func (sm *StateManager) WithDeadline(idx uint64, cb DeadlineCB) DeadlinesCB {
- return func(store adt.Store, deadlines *miner.Deadlines) error {
- d, err := deadlines.LoadDeadline(store, idx)
- if err != nil {
- return err
- }
-
- return cb(store, idx, d)
- }
-}
-
-func (sm *StateManager) WithEachDeadline(cb DeadlineCB) DeadlinesCB {
- return func(store adt.Store, deadlines *miner.Deadlines) error {
- return deadlines.ForEach(store, func(dlIdx uint64, dl *miner.Deadline) error {
- return cb(store, dlIdx, dl)
- })
- }
-}
-
-type PartitionCB func(store adt.Store, idx uint64, partition *miner.Partition) error
-
-func (sm *StateManager) WithEachPartition(cb PartitionCB) DeadlineCB {
- return func(store adt.Store, idx uint64, deadline *miner.Deadline) error {
- parts, err := deadline.PartitionsArray(store)
- if err != nil {
- return err
- }
-
- var partition miner.Partition
- return parts.ForEach(&partition, func(i int64) error {
- p := partition
- return cb(store, uint64(i), &p)
- })
+func (sm *StateManager) LoadActorRaw(_ context.Context, addr address.Address, st cid.Cid) (*types.Actor, error) {
+ state, err := sm.StateTree(st)
+ if err != nil {
+ return nil, err
}
+ return state.GetActor(addr)
}
diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go
index e041fe088..ba3dcd1d8 100644
--- a/chain/stmgr/stmgr.go
+++ b/chain/stmgr/stmgr.go
@@ -2,58 +2,121 @@ package stmgr
import (
"context"
+ "errors"
"fmt"
"sync"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/actors"
- "github.com/filecoin-project/lotus/chain/state"
- "github.com/filecoin-project/lotus/chain/store"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/vm"
-
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
-
- "golang.org/x/xerrors"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/trace"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
)
var log = logging.Logger("statemgr")
+type versionSpec struct {
+ networkVersion network.Version
+ atOrBelow abi.ChainEpoch
+}
+
type StateManager struct {
cs *store.ChainStore
- stCache map[string][]cid.Cid
- compWait map[string]chan struct{}
- stlk sync.Mutex
- genesisMsigLk sync.Mutex
- newVM func(*vm.VMOpts) (*vm.VM, error)
- genInfo *genesisInfo
+ // Determines the network version at any given epoch.
+ networkVersions []versionSpec
+ latestVersion network.Version
+
+ // Maps chain epochs to upgrade functions.
+ stateMigrations map[abi.ChainEpoch]UpgradeFunc
+ // A set of potentially expensive/time consuming upgrades. Explicit
+ // calls for, e.g., gas estimation fail against this epoch with
+ // ErrExpensiveFork.
+ expensiveUpgrades map[abi.ChainEpoch]struct{}
+
+ stCache map[string][]cid.Cid
+ compWait map[string]chan struct{}
+ stlk sync.Mutex
+ genesisMsigLk sync.Mutex
+ newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
+ preIgnitionGenInfos *genesisInfo
+ postIgnitionGenInfos *genesisInfo
}
func NewStateManager(cs *store.ChainStore) *StateManager {
- return &StateManager{
- newVM: vm.NewVM,
- cs: cs,
- stCache: make(map[string][]cid.Cid),
- compWait: make(map[string]chan struct{}),
+ sm, err := NewStateManagerWithUpgradeSchedule(cs, DefaultUpgradeSchedule())
+ if err != nil {
+ panic(fmt.Sprintf("default upgrade schedule is invalid: %s", err))
}
+ return sm
+}
+
+func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule) (*StateManager, error) {
+ // If we have upgrades, make sure they're in-order and make sense.
+ if err := us.Validate(); err != nil {
+ return nil, err
+ }
+
+ stateMigrations := make(map[abi.ChainEpoch]UpgradeFunc, len(us))
+ expensiveUpgrades := make(map[abi.ChainEpoch]struct{}, len(us))
+ var networkVersions []versionSpec
+ lastVersion := network.Version0
+ if len(us) > 0 {
+ // If we have any upgrades, process them and create a version
+ // schedule.
+ for _, upgrade := range us {
+ if upgrade.Migration != nil {
+ stateMigrations[upgrade.Height] = upgrade.Migration
+ }
+ if upgrade.Expensive {
+ expensiveUpgrades[upgrade.Height] = struct{}{}
+ }
+ networkVersions = append(networkVersions, versionSpec{
+ networkVersion: lastVersion,
+ atOrBelow: upgrade.Height,
+ })
+ lastVersion = upgrade.Network
+ }
+ } else {
+ // Otherwise, go directly to the latest version.
+ lastVersion = build.NewestNetworkVersion
+ }
+
+ return &StateManager{
+ networkVersions: networkVersions,
+ latestVersion: lastVersion,
+ stateMigrations: stateMigrations,
+ expensiveUpgrades: expensiveUpgrades,
+ newVM: vm.NewVM,
+ cs: cs,
+ stCache: make(map[string][]cid.Cid),
+ compWait: make(map[string]chan struct{}),
+ }, nil
}
func cidsToKey(cids []cid.Cid) string {
@@ -121,9 +184,8 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return st, rec, nil
}
-func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
- var trace []*api.InvocResult
- st, _, err := sm.computeTipSetState(ctx, ts, func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
+func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
+ return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
ir := &api.InvocResult{
Msg: msg,
MsgRct: &ret.MessageReceipt,
@@ -133,9 +195,14 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
if ret.ActorErr != nil {
ir.Error = ret.ActorErr.Error()
}
- trace = append(trace, ir)
+ *trace = append(*trace, ir)
return nil
- })
+ }
+}
+
+func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
+ var trace []*api.InvocResult
+ st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace))
if err != nil {
return cid.Undef, nil, err
}
@@ -145,39 +212,44 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
-func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount) (cid.Cid, cid.Cid, error) {
+func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
- vmopt := &vm.VMOpts{
- StateBase: pstate,
- Epoch: epoch,
- Rand: r,
- Bstore: sm.cs.Blockstore(),
- Syscalls: sm.cs.VMSys(),
- CircSupplyCalc: sm.GetCirculatingSupply,
- BaseFee: baseFee,
+ makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
+ vmopt := &vm.VMOpts{
+ StateBase: base,
+ Epoch: epoch,
+ Rand: r,
+ Bstore: sm.cs.Blockstore(),
+ Syscalls: sm.cs.VMSys(),
+ CircSupplyCalc: sm.GetCirculatingSupply,
+ NtwkVersion: sm.GetNtwkVersion,
+ BaseFee: baseFee,
+ }
+
+ return sm.newVM(ctx, vmopt)
}
- vmi, err := sm.newVM(vmopt)
+ vmi, err := makeVmWithBaseState(pstate)
if err != nil {
- return cid.Undef, cid.Undef, xerrors.Errorf("instantiating VM failed: %w", err)
+ return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
}
runCron := func() error {
// TODO: this nonce-getting is a tiny bit ugly
- ca, err := vmi.StateTree().GetActor(builtin.SystemActorAddr)
+ ca, err := vmi.StateTree().GetActor(builtin0.SystemActorAddr)
if err != nil {
return err
}
cronMsg := &types.Message{
- To: builtin.CronActorAddr,
- From: builtin.SystemActorAddr,
+ To: builtin0.CronActorAddr,
+ From: builtin0.SystemActorAddr,
Nonce: ca.Nonce,
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little
- Method: builtin.MethodsCron.EpochTick,
+ Method: builtin0.MethodsCron.EpochTick,
Params: nil,
}
ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg)
@@ -197,20 +269,34 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
}
for i := parentEpoch; i < epoch; i++ {
+ if i > parentEpoch {
+ // run cron for null rounds if any
+ if err := runCron(); err != nil {
+ return cid.Undef, cid.Undef, err
+ }
+
+ pstate, err = vmi.Flush(ctx)
+ if err != nil {
+ return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err)
+ }
+ }
+
// handle state forks
- err = sm.handleStateForks(ctx, vmi.StateTree(), i)
+ // XXX: The state tree
+ newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
}
- if i > parentEpoch {
- // run cron for null rounds if any
- if err := runCron(); err != nil {
- return cid.Cid{}, cid.Cid{}, err
+ if pstate != newState {
+ vmi, err = makeVmWithBaseState(newState)
+ if err != nil {
+ return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
}
}
vmi.SetBlockHeight(i + 1)
+ pstate = newState
}
var receipts []cbg.CBORMarshaler
@@ -230,8 +316,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
}
receipts = append(receipts, &r.MessageReceipt)
- gasReward = big.Add(gasReward, r.MinerTip)
- penalty = big.Add(penalty, r.Penalty)
+ gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
+ penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
if cb != nil {
if err := cb(cm.Cid(), m, r); err != nil {
@@ -241,7 +327,6 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
processedMsgs[m.Cid()] = true
}
- var err error
params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{
Miner: b.Miner,
Penalty: penalty,
@@ -252,25 +337,25 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
}
- sysAct, err := vmi.StateTree().GetActor(builtin.SystemActorAddr)
- if err != nil {
- return cid.Undef, cid.Undef, xerrors.Errorf("failed to get system actor: %w", err)
+ sysAct, actErr := vmi.StateTree().GetActor(builtin0.SystemActorAddr)
+ if actErr != nil {
+ return cid.Undef, cid.Undef, xerrors.Errorf("failed to get system actor: %w", actErr)
}
rwMsg := &types.Message{
- From: builtin.SystemActorAddr,
- To: builtin.RewardActorAddr,
+ From: builtin0.SystemActorAddr,
+ To: reward.Address,
Nonce: sysAct.Nonce,
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: 1 << 30,
- Method: builtin.MethodsReward.AwardBlockReward,
+ Method: builtin0.MethodsReward.AwardBlockReward,
Params: params,
}
- ret, err := vmi.ApplyImplicitMessage(ctx, rwMsg)
- if err != nil {
- return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, err)
+ ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
+ if actErr != nil {
+ return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
}
if cb != nil {
if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil {
@@ -287,7 +372,11 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
return cid.Cid{}, cid.Cid{}, err
}
- rectarr := adt.MakeEmptyArray(sm.cs.Store(ctx))
+ // XXX: Is the height correct? Or should it be epoch-1?
+ rectarr, err := adt.NewArray(sm.cs.Store(ctx), actors.VersionForNetwork(sm.GetNtwkVersion(ctx, epoch)))
+ if err != nil {
+ return cid.Undef, cid.Undef, xerrors.Errorf("failed to create receipts amt: %w", err)
+ }
for i, receipt := range receipts {
if err := rectarr.Set(uint64(i), receipt); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
@@ -347,7 +436,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
baseFee := blks[0].ParentBaseFee
- return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee)
+ return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts)
}
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {
@@ -419,16 +508,7 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T
return nil, fmt.Errorf("failed to load message: %w", err)
}
- r, _, err := sm.tipsetExecutedMessage(ts, msg, m.VMMessage())
- if err != nil {
- return nil, err
- }
-
- if r != nil {
- return r, nil
- }
-
- _, r, _, err = sm.searchBackForMsg(ctx, ts, m)
+ _, r, _, err := sm.searchBackForMsg(ctx, ts, m)
if err != nil {
return nil, fmt.Errorf("failed to look back through chain for message: %w", err)
}
@@ -586,6 +666,18 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty
func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
cur := from
+ curActor, err := sm.LoadActor(ctx, m.VMMessage().From, cur)
+ if err != nil {
+ return nil, nil, cid.Undef, xerrors.Errorf("failed to load initital tipset")
+ }
+
+ mFromId, err := sm.LookupID(ctx, m.VMMessage().From, from)
+ if err != nil {
+ return nil, nil, cid.Undef, xerrors.Errorf("looking up From id address: %w", err)
+ }
+
+ mNonce := m.VMMessage().Nonce
+
for {
if cur.Height() == 0 {
// it ain't here!
@@ -598,33 +690,37 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
default:
}
- var act types.Actor
- err := sm.WithParentState(cur, sm.WithActor(m.VMMessage().From, GetActor(&act)))
- if err != nil {
- return nil, nil, cid.Undef, err
- }
-
// we either have no messages from the sender, or the latest message we found has a lower nonce than the one being searched for,
// either way, no reason to lookback, it ain't there
- if act.Nonce == 0 || act.Nonce < m.VMMessage().Nonce {
+ if curActor == nil || curActor.Nonce == 0 || curActor.Nonce < mNonce {
return nil, nil, cid.Undef, nil
}
- ts, err := sm.cs.LoadTipSet(cur.Parents())
+ pts, err := sm.cs.LoadTipSet(cur.Parents())
if err != nil {
- return nil, nil, cid.Undef, fmt.Errorf("failed to load tipset during msg wait searchback: %w", err)
+ return nil, nil, cid.Undef, xerrors.Errorf("failed to load tipset during msg wait searchback: %w", err)
}
- r, foundMsg, err := sm.tipsetExecutedMessage(ts, m.Cid(), m.VMMessage())
- if err != nil {
- return nil, nil, cid.Undef, fmt.Errorf("checking for message execution during lookback: %w", err)
+ act, err := sm.LoadActor(ctx, mFromId, pts)
+ actorNoExist := errors.Is(err, types.ErrActorNotFound)
+ if err != nil && !actorNoExist {
+ return nil, nil, cid.Cid{}, xerrors.Errorf("failed to load the actor: %w", err)
}
- if r != nil {
- return ts, r, foundMsg, nil
+ // check that between cur and parent tipset the nonce fell into range of our message
+ if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) {
+ r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage())
+ if err != nil {
+ return nil, nil, cid.Undef, xerrors.Errorf("checking for message execution during lookback: %w", err)
+ }
+
+ if r != nil {
+ return pts, r, foundMsg, nil
+ }
}
- cur = ts
+ cur = pts
+ curActor = act
}
}
@@ -686,17 +782,13 @@ func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([]
return nil, err
}
- r, err := adt.AsMap(sm.cs.Store(ctx), st)
+ stateTree, err := sm.StateTree(st)
if err != nil {
return nil, err
}
var out []address.Address
- err = r.ForEach(nil, func(k string) error {
- addr, err := address.NewFromBytes([]byte(k))
- if err != nil {
- return xerrors.Errorf("address in state tree was not valid: %w", err)
- }
+ err = stateTree.ForEach(func(addr address.Address, act *types.Actor) error {
out = append(out, addr)
return nil
})
@@ -708,8 +800,17 @@ func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([]
}
func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (api.MarketBalance, error) {
- var state market.State
- _, err := sm.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts)
+ st, err := sm.ParentState(ts)
+ if err != nil {
+ return api.MarketBalance{}, err
+ }
+
+ act, err := st.GetActor(market.Address)
+ if err != nil {
+ return api.MarketBalance{}, err
+ }
+
+ mstate, err := market.Load(sm.cs.Store(ctx), act)
if err != nil {
return api.MarketBalance{}, err
}
@@ -721,7 +822,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address,
var out api.MarketBalance
- et, err := adt.AsBalanceTable(sm.cs.Store(ctx), state.EscrowTable)
+ et, err := mstate.EscrowTable()
if err != nil {
return api.MarketBalance{}, err
}
@@ -730,7 +831,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address,
return api.MarketBalance{}, xerrors.Errorf("getting escrow balance: %w", err)
}
- lt, err := adt.AsBalanceTable(sm.cs.Store(ctx), state.LockedTable)
+ lt, err := mstate.LockedTable()
if err != nil {
return api.MarketBalance{}, err
}
@@ -771,12 +872,12 @@ func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) err
return nil
}
-func (sm *StateManager) SetVMConstructor(nvm func(*vm.VMOpts) (*vm.VM, error)) {
+func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (*vm.VM, error)) {
sm.newVM = nvm
}
type genesisInfo struct {
- genesisMsigs []multisig.State
+ genesisMsigs []msig0.State
// info about the Accounts in the genesis state
genesisActors []genesisActor
genesisPledge abi.TokenAmount
@@ -824,51 +925,56 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
return xerrors.Errorf("setting up genesis pledge: %w", err)
}
- r, err := adt.AsMap(sm.cs.Store(ctx), st)
- if err != nil {
- return xerrors.Errorf("getting genesis actors: %w", err)
- }
-
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
- var act types.Actor
- err = r.ForEach(&act, func(k string) error {
- if act.Code == builtin.MultisigActorCodeID {
- var s multisig.State
- err := sm.cs.Store(ctx).Get(ctx, act.Head, &s)
+ err = sTree.ForEach(func(kaddr address.Address, act *types.Actor) error {
+ if builtin.IsMultisigActor(act.Code) {
+ s, err := multisig.Load(sm.cs.Store(ctx), act)
if err != nil {
return err
}
- if s.StartEpoch != 0 {
+ se, err := s.StartEpoch()
+ if err != nil {
+ return err
+ }
+
+ if se != 0 {
return xerrors.New("genesis multisig doesn't start vesting at epoch 0!")
}
- ot, f := totalsByEpoch[s.UnlockDuration]
- if f {
- totalsByEpoch[s.UnlockDuration] = big.Add(ot, s.InitialBalance)
- } else {
- totalsByEpoch[s.UnlockDuration] = s.InitialBalance
+ ud, err := s.UnlockDuration()
+ if err != nil {
+ return err
}
- } else if act.Code == builtin.AccountActorCodeID {
+ ib, err := s.InitialBalance()
+ if err != nil {
+ return err
+ }
+
+ ot, f := totalsByEpoch[ud]
+ if f {
+ totalsByEpoch[ud] = big.Add(ot, ib)
+ } else {
+ totalsByEpoch[ud] = ib
+ }
+
+ } else if builtin.IsAccountActor(act.Code) {
// should exclude burnt funds actor and "remainder account actor"
// should only ever be "faucet" accounts in testnets
- kaddr, err := address.NewFromBytes([]byte(k))
+ if kaddr == builtin0.BurntFundsActorAddr {
+ return nil
+ }
+
+ kid, err := sTree.LookupID(kaddr)
if err != nil {
- return xerrors.Errorf("decoding address: %w", err)
+ return xerrors.Errorf("resolving address: %w", err)
}
- if kaddr != builtin.BurntFundsActorAddr {
- kid, err := sTree.LookupID(kaddr)
- if err != nil {
- return xerrors.Errorf("resolving address: %w", err)
- }
-
- gi.genesisActors = append(gi.genesisActors, genesisActor{
- addr: kid,
- initBal: act.Balance,
- })
- }
+ gi.genesisActors = append(gi.genesisActors, genesisActor{
+ addr: kid,
+ initBal: act.Balance,
+ })
}
return nil
})
@@ -877,9 +983,10 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
return xerrors.Errorf("error setting up genesis infos: %w", err)
}
- gi.genesisMsigs = make([]multisig.State, 0, len(totalsByEpoch))
+ // TODO: use network upgrade abstractions or always start at actors v0?
+ gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
- ns := multisig.State{
+ ns := msig0.State{
InitialBalance: v,
UnlockDuration: k,
PendingTxns: cid.Undef,
@@ -887,7 +994,7 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
gi.genesisMsigs = append(gi.genesisMsigs, ns)
}
- sm.genInfo = &gi
+ sm.preIgnitionGenInfos = &gi
return nil
}
@@ -895,7 +1002,7 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
// sets up information about the actors in the genesis state
// For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs
// We also do not consider ANY account actors (including the faucet)
-func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error {
+func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error {
gi := genesisInfo{}
@@ -933,30 +1040,30 @@ func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error {
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
// 6 months
- sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay)
+ sixMonths := abi.ChainEpoch(183 * builtin0.EpochsInDay)
totalsByEpoch[sixMonths] = big.NewInt(49_929_341)
totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700))
// 1 year
- oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay)
+ oneYear := abi.ChainEpoch(365 * builtin0.EpochsInDay)
totalsByEpoch[oneYear] = big.NewInt(22_421_712)
// 2 years
- twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay)
+ twoYears := abi.ChainEpoch(2 * 365 * builtin0.EpochsInDay)
totalsByEpoch[twoYears] = big.NewInt(7_223_364)
// 3 years
- threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay)
+ threeYears := abi.ChainEpoch(3 * 365 * builtin0.EpochsInDay)
totalsByEpoch[threeYears] = big.NewInt(87_637_883)
// 6 years
- sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay)
+ sixYears := abi.ChainEpoch(6 * 365 * builtin0.EpochsInDay)
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
- gi.genesisMsigs = make([]multisig.State, 0, len(totalsByEpoch))
+ gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
- ns := multisig.State{
+ ns := msig0.State{
InitialBalance: v,
UnlockDuration: k,
PendingTxns: cid.Undef,
@@ -964,7 +1071,87 @@ func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error {
gi.genesisMsigs = append(gi.genesisMsigs, ns)
}
- sm.genInfo = &gi
+ sm.preIgnitionGenInfos = &gi
+
+ return nil
+}
+
+// sets up information about the actors in the genesis state, post the ignition fork
+func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error {
+
+ gi := genesisInfo{}
+
+ gb, err := sm.cs.GetGenesis()
+ if err != nil {
+ return xerrors.Errorf("getting genesis block: %w", err)
+ }
+
+ gts, err := types.NewTipSet([]*types.BlockHeader{gb})
+ if err != nil {
+ return xerrors.Errorf("getting genesis tipset: %w", err)
+ }
+
+ st, _, err := sm.TipSetState(ctx, gts)
+ if err != nil {
+ return xerrors.Errorf("getting genesis tipset state: %w", err)
+ }
+
+ cst := cbor.NewCborStore(sm.cs.Blockstore())
+ sTree, err := state.LoadStateTree(cst, st)
+ if err != nil {
+ return xerrors.Errorf("loading state tree: %w", err)
+ }
+
+ // Unnecessary, should be removed
+ gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
+ if err != nil {
+ return xerrors.Errorf("setting up genesis market funds: %w", err)
+ }
+
+ // Unnecessary, should be removed
+ gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
+ if err != nil {
+ return xerrors.Errorf("setting up genesis pledge: %w", err)
+ }
+
+ totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
+
+ // 6 months
+ sixMonths := abi.ChainEpoch(183 * builtin0.EpochsInDay)
+ totalsByEpoch[sixMonths] = big.NewInt(49_929_341)
+ totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700))
+
+ // 1 year
+ oneYear := abi.ChainEpoch(365 * builtin0.EpochsInDay)
+ totalsByEpoch[oneYear] = big.NewInt(22_421_712)
+
+ // 2 years
+ twoYears := abi.ChainEpoch(2 * 365 * builtin0.EpochsInDay)
+ totalsByEpoch[twoYears] = big.NewInt(7_223_364)
+
+ // 3 years
+ threeYears := abi.ChainEpoch(3 * 365 * builtin0.EpochsInDay)
+ totalsByEpoch[threeYears] = big.NewInt(87_637_883)
+
+ // 6 years
+ sixYears := abi.ChainEpoch(6 * 365 * builtin0.EpochsInDay)
+ totalsByEpoch[sixYears] = big.NewInt(100_000_000)
+ totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
+
+ gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
+ for k, v := range totalsByEpoch {
+ ns := msig0.State{
+ // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error
+ InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))),
+ UnlockDuration: k,
+ PendingTxns: cid.Undef,
+ // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself.
+ StartEpoch: build.UpgradeLiftoffHeight,
+ }
+ gi.genesisMsigs = append(gi.genesisMsigs, ns)
+ }
+
+ sm.postIgnitionGenInfos = &gi
return nil
}
@@ -974,13 +1161,23 @@ func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error {
// - For Accounts, it counts max(currentBalance - genesisBalance, 0).
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {
vf := big.Zero()
- for _, v := range sm.genInfo.genesisMsigs {
- au := big.Sub(v.InitialBalance, v.AmountLocked(height))
- vf = big.Add(vf, au)
+ if height <= build.UpgradeIgnitionHeight {
+ for _, v := range sm.preIgnitionGenInfos.genesisMsigs {
+ au := big.Sub(v.InitialBalance, v.AmountLocked(height))
+ vf = big.Add(vf, au)
+ }
+ } else {
+ for _, v := range sm.postIgnitionGenInfos.genesisMsigs {
+ // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.
+ // The start epoch changed in the Ignition upgrade.
+ au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))
+ vf = big.Add(vf, au)
+ }
}
// there should not be any such accounts in testnet (and also none in mainnet?)
- for _, v := range sm.genInfo.genesisActors {
+ // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
+ for _, v := range sm.preIgnitionGenInfos.genesisActors {
act, err := st.GetActor(v.addr)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to get actor: %w", err)
@@ -992,53 +1189,67 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch,
}
}
- vf = big.Add(vf, sm.genInfo.genesisPledge)
- vf = big.Add(vf, sm.genInfo.genesisMarketFunds)
+ // After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed
+ if height <= build.UpgradeActorsV2Height {
+ // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
+ vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge)
+ // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
+ vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds)
+ }
return vf, nil
}
+func GetFilReserveDisbursed(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
+ ract, err := st.GetActor(builtin.ReserveAddress)
+ if err != nil {
+ return big.Zero(), xerrors.Errorf("failed to get reserve actor: %w", err)
+ }
+
+ // If money enters the reserve actor, this could lead to a negative term
+ return big.Sub(big.NewFromGo(build.InitialFilReserved), ract.Balance), nil
+}
+
func GetFilMined(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
- ractor, err := st.GetActor(builtin.RewardActorAddr)
+ ractor, err := st.GetActor(reward.Address)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load reward actor state: %w", err)
}
- var rst reward.State
- if err := st.Store.Get(ctx, ractor.Head, &rst); err != nil {
- return big.Zero(), xerrors.Errorf("failed to load reward state: %w", err)
+ rst, err := reward.Load(adt.WrapStore(ctx, st.Store), ractor)
+ if err != nil {
+ return big.Zero(), err
}
- return rst.TotalMined, nil
+ return rst.TotalStoragePowerReward()
}
func getFilMarketLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
- mactor, err := st.GetActor(builtin.StorageMarketActorAddr)
+ act, err := st.GetActor(market.Address)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load market actor: %w", err)
}
- var mst market.State
- if err := st.Store.Get(ctx, mactor.Head, &mst); err != nil {
+ mst, err := market.Load(adt.WrapStore(ctx, st.Store), act)
+ if err != nil {
return big.Zero(), xerrors.Errorf("failed to load market state: %w", err)
}
- fml := types.BigAdd(mst.TotalClientLockedCollateral, mst.TotalProviderLockedCollateral)
- fml = types.BigAdd(fml, mst.TotalClientStorageFee)
- return fml, nil
+ return mst.TotalLocked()
}
func getFilPowerLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
- pactor, err := st.GetActor(builtin.StoragePowerActorAddr)
+ pactor, err := st.GetActor(power.Address)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load power actor: %w", err)
}
- var pst power.State
- if err := st.Store.Get(ctx, pactor.Head, &pst); err != nil {
+ pst, err := power.Load(adt.WrapStore(ctx, st.Store), pactor)
+ if err != nil {
return big.Zero(), xerrors.Errorf("failed to load power state: %w", err)
}
- return pst.TotalPledgeCollateral, nil
+
+ return pst.TotalLocked()
}
func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
@@ -1057,7 +1268,7 @@ func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) (
}
func GetFilBurnt(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
- burnt, err := st.GetActor(builtin.BurntFundsActorAddr)
+ burnt, err := st.GetActor(builtin0.BurntFundsActorAddr)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load burnt actor: %w", err)
}
@@ -1068,10 +1279,16 @@ func GetFilBurnt(ctx context.Context, st *state.StateTree) (abi.TokenAmount, err
func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) {
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
- if sm.genInfo == nil {
- err := sm.setupGenesisActorsTestnet(ctx)
+ if sm.preIgnitionGenInfos == nil {
+ err := sm.setupPreIgnitionGenesisActorsTestnet(ctx)
if err != nil {
- return api.CirculatingSupply{}, xerrors.Errorf("failed to setup genesis information: %w", err)
+ return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err)
+ }
+ }
+ if sm.postIgnitionGenInfos == nil {
+ err := sm.setupPostIgnitionGenesisActors(ctx)
+ if err != nil {
+ return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err)
}
}
@@ -1080,6 +1297,14 @@ func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err)
}
+ filReserveDisbursed := big.Zero()
+ if height > build.UpgradeActorsV2Height {
+ filReserveDisbursed, err = GetFilReserveDisbursed(ctx, st)
+ if err != nil {
+ return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filReserveDisbursed: %w", err)
+ }
+ }
+
filMined, err := GetFilMined(ctx, st)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filMined: %w", err)
@@ -1096,6 +1321,7 @@ func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height
}
ret := types.BigAdd(filVested, filMined)
+ ret = types.BigAdd(ret, filReserveDisbursed)
ret = types.BigSub(ret, filBurnt)
ret = types.BigSub(ret, filLocked)
@@ -1120,3 +1346,50 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
return csi.FilCirculating, nil
}
+
+func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version {
+ // The epochs here are the _last_ epoch for every version, or -1 if the
+ // version is disabled.
+ for _, spec := range sm.networkVersions {
+ if height <= spec.atOrBelow {
+ return spec.networkVersion
+ }
+ }
+ return sm.latestVersion
+}
+
+func (sm *StateManager) GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) {
+ st, err := sm.ParentState(ts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ act, err := st.GetActor(addr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ actState, err := paych.Load(sm.cs.Store(ctx), act)
+ if err != nil {
+ return nil, nil, err
+ }
+ return act, actState, nil
+}
+
+func (sm *StateManager) GetMarketState(ctx context.Context, ts *types.TipSet) (market.State, error) {
+ st, err := sm.ParentState(ts)
+ if err != nil {
+ return nil, err
+ }
+
+ act, err := st.GetActor(market.Address)
+ if err != nil {
+ return nil, err
+ }
+
+ actState, err := market.Load(sm.cs.Store(ctx), act)
+ if err != nil {
+ return nil, err
+ }
+ return actState, nil
+}
diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go
index db6157b09..c0f0c4d2f 100644
--- a/chain/stmgr/utils.go
+++ b/chain/stmgr/utils.go
@@ -3,222 +3,188 @@ package stmgr
import (
"bytes"
"context"
+ "fmt"
"os"
"reflect"
+ "runtime"
+ "strings"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
cid "github.com/ipfs/go-cid"
- cbor "github.com/ipfs/go-ipld-cbor"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
- "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
- "github.com/filecoin-project/specs-actors/actors/builtin/cron"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/rt"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
+ proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+ exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
- "github.com/filecoin-project/lotus/lib/blockstore"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.NetworkName, error) {
- var state init_.State
- err := sm.WithStateTree(st, sm.WithActor(builtin.InitActorAddr, sm.WithActorState(ctx, &state)))
+ act, err := sm.LoadActorRaw(ctx, init_.Address, st)
+ if err != nil {
+ return "", err
+ }
+ ias, err := init_.Load(sm.cs.Store(ctx), act)
if err != nil {
return "", err
}
- return dtypes.NetworkName(state.NetworkName), nil
-}
-
-func (sm *StateManager) LoadActorState(ctx context.Context, addr address.Address, out interface{}, ts *types.TipSet) (*types.Actor, error) {
- var a *types.Actor
- if err := sm.WithParentState(ts, sm.WithActor(addr, func(act *types.Actor) error {
- a = act
- return sm.WithActorState(ctx, out)(act)
- })); err != nil {
- return nil, err
- }
-
- return a, nil
-}
-
-func (sm *StateManager) LoadActorStateRaw(ctx context.Context, addr address.Address, out interface{}, st cid.Cid) (*types.Actor, error) {
- var a *types.Actor
- if err := sm.WithStateTree(st, sm.WithActor(addr, func(act *types.Actor) error {
- a = act
- return sm.WithActorState(ctx, out)(act)
- })); err != nil {
- return nil, err
- }
-
- return a, nil
+ return ias.NetworkName()
}
func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (address.Address, error) {
- var mas miner.State
- _, err := sm.LoadActorStateRaw(ctx, maddr, &mas, st)
+ state, err := sm.StateTree(st)
+ if err != nil {
+ return address.Undef, xerrors.Errorf("(get sset) failed to load state tree: %w", err)
+ }
+ act, err := state.GetActor(maddr)
+ if err != nil {
+ return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
+ }
+ mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
- cst := cbor.NewCborStore(sm.cs.Blockstore())
- state, err := state.LoadStateTree(cst, st)
+ info, err := mas.Info()
if err != nil {
- return address.Undef, xerrors.Errorf("load state tree: %w", err)
+ return address.Undef, xerrors.Errorf("failed to load actor info: %w", err)
}
- info, err := mas.GetInfo(sm.cs.Store(ctx))
- if err != nil {
- return address.Address{}, err
- }
-
- return vm.ResolveToKeyAddr(state, cst, info.Worker)
+ return vm.ResolveToKeyAddr(state, sm.cs.Store(ctx), info.Worker)
}
-func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, error) {
+func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, bool, error) {
return GetPowerRaw(ctx, sm, ts.ParentState(), maddr)
}
-func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (power.Claim, power.Claim, error) {
- var ps power.State
- _, err := sm.LoadActorStateRaw(ctx, builtin.StoragePowerActorAddr, &ps, st)
+func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (power.Claim, power.Claim, bool, error) {
+ act, err := sm.LoadActorRaw(ctx, power.Address, st)
if err != nil {
- return power.Claim{}, power.Claim{}, xerrors.Errorf("(get sset) failed to load power actor state: %w", err)
+ return power.Claim{}, power.Claim{}, false, xerrors.Errorf("(get sset) failed to load power actor state: %w", err)
+ }
+
+ pas, err := power.Load(sm.cs.Store(ctx), act)
+ if err != nil {
+ return power.Claim{}, power.Claim{}, false, err
+ }
+
+ tpow, err := pas.TotalPower()
+ if err != nil {
+ return power.Claim{}, power.Claim{}, false, err
}
var mpow power.Claim
+ var minpow bool
if maddr != address.Undef {
- cm, err := adt.AsMap(sm.cs.Store(ctx), ps.Claims)
+ var found bool
+ mpow, found, err = pas.MinerPower(maddr)
+ if err != nil || !found {
+ // TODO: return an error when not found?
+ return power.Claim{}, power.Claim{}, false, err
+ }
+
+ minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr)
if err != nil {
- return power.Claim{}, power.Claim{}, err
+ return power.Claim{}, power.Claim{}, false, err
}
-
- var claim power.Claim
- if _, err := cm.Get(adt.AddrKey(maddr), &claim); err != nil {
- return power.Claim{}, power.Claim{}, err
- }
-
- mpow = claim
}
- return mpow, power.Claim{
- RawBytePower: ps.TotalRawBytePower,
- QualityAdjPower: ps.TotalQualityAdjPower,
- }, nil
+ return mpow, tpow, minpow, nil
}
-func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (miner.SectorPreCommitOnChainInfo, error) {
- var mas miner.State
- _, err := sm.LoadActorState(ctx, maddr, &mas, ts)
+func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorPreCommitOnChainInfo, error) {
+ act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
- return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
+ return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
- i, ok, err := mas.GetPrecommittedSector(sm.cs.Store(ctx), sid)
+ mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
- return miner.SectorPreCommitOnChainInfo{}, err
- }
- if !ok {
- return miner.SectorPreCommitOnChainInfo{}, xerrors.New("precommit not found")
+ return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
- return *i, nil
+ return mas.GetPrecommittedSector(sid)
}
func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorOnChainInfo, error) {
- var mas miner.State
- _, err := sm.LoadActorState(ctx, maddr, &mas, ts)
+ act, err := sm.LoadActor(ctx, maddr, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
+ }
+
+ mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
- sectorInfo, ok, err := mas.GetSector(sm.cs.Store(ctx), sid)
- if err != nil {
- return nil, err
- }
- if !ok {
- return nil, nil
- }
-
- return sectorInfo, nil
+ return mas.GetSector(sid)
}
-func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) {
- var mas miner.State
- _, err := sm.LoadActorState(ctx, maddr, &mas, ts)
+func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, snos *bitfield.BitField) ([]*miner.SectorOnChainInfo, error) {
+ act, err := sm.LoadActor(ctx, maddr, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
+ }
+
+ mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
- return LoadSectorsFromSet(ctx, sm.ChainStore().Blockstore(), mas.Sectors, filter, filterOut)
+ return mas.LoadSectors(snos)
}
-func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]abi.SectorInfo, error) {
- var partsProving []abi.BitField
- var mas *miner.State
- var info *miner.MinerInfo
-
- err := sm.WithStateTree(st, sm.WithActor(maddr, sm.WithActorState(ctx, func(store adt.Store, mst *miner.State) error {
- var err error
-
- mas = mst
-
- info, err = mas.GetInfo(store)
- if err != nil {
- return xerrors.Errorf("getting miner info: %w", err)
- }
-
- deadlines, err := mas.LoadDeadlines(store)
- if err != nil {
- return xerrors.Errorf("loading deadlines: %w", err)
- }
-
- return deadlines.ForEach(store, func(dlIdx uint64, deadline *miner.Deadline) error {
- partitions, err := deadline.PartitionsArray(store)
- if err != nil {
- return xerrors.Errorf("getting partition array: %w", err)
- }
-
- var partition miner.Partition
- return partitions.ForEach(&partition, func(partIdx int64) error {
- p, err := bitfield.SubtractBitField(partition.Sectors, partition.Faults)
- if err != nil {
- return xerrors.Errorf("subtract faults from partition sectors: %w", err)
- }
-
- partsProving = append(partsProving, p)
-
- return nil
- })
- })
- })))
+func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]proof0.SectorInfo, error) {
+ act, err := sm.LoadActorRaw(ctx, maddr, st)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
- provingSectors, err := bitfield.MultiMerge(partsProving...)
+ mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
- return nil, xerrors.Errorf("merge partition proving sets: %w", err)
+ return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ // TODO (!!): Actor Update: Make this active sectors
+
+ allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
+ if err != nil {
+ return nil, xerrors.Errorf("get all sectors: %w", err)
+ }
+
+ faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
+ if err != nil {
+ return nil, xerrors.Errorf("get faulty sectors: %w", err)
+ }
+
+ provingSectors, err := bitfield.SubtractBitField(allSectors, faultySectors) // TODO: This is wrong, as it can contain faaults, change to just ActiveSectors in an upgrade
+ if err != nil {
+ return nil, xerrors.Errorf("calc proving sectors: %w", err)
}
numProvSect, err := provingSectors.Count()
@@ -231,6 +197,11 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
return nil, nil
}
+ info, err := mas.Info()
+ if err != nil {
+ return nil, xerrors.Errorf("getting miner info: %w", err)
+ }
+
spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize)
if err != nil {
return nil, xerrors.Errorf("getting seal proof type: %w", err)
@@ -251,28 +222,31 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
}
- sectors, err := provingSectors.All(miner.SectorsMax)
+ iter, err := provingSectors.BitIterator()
if err != nil {
- return nil, xerrors.Errorf("failed to enumerate all sector IDs: %w", err)
+ return nil, xerrors.Errorf("iterating over proving sectors: %w", err)
}
- sectorAmt, err := adt.AsArray(sm.cs.Store(ctx), mas.Sectors)
- if err != nil {
- return nil, xerrors.Errorf("failed to load sectors amt: %w", err)
- }
-
- out := make([]abi.SectorInfo, len(ids))
- for i, n := range ids {
- sid := sectors[n]
-
- var sinfo miner.SectorOnChainInfo
- if found, err := sectorAmt.Get(sid, &sinfo); err != nil {
- return nil, xerrors.Errorf("failed to get sector %d: %w", sid, err)
- } else if !found {
- return nil, xerrors.Errorf("failed to find sector %d", sid)
+ // Select winning sectors by _index_ in the all-sectors bitfield.
+ selectedSectors := bitfield.New()
+ prev := uint64(0)
+ for _, n := range ids {
+ sno, err := iter.Nth(n - prev)
+ if err != nil {
+ return nil, xerrors.Errorf("iterating over proving sectors: %w", err)
}
+ selectedSectors.Set(sno)
+ prev = n
+ }
- out[i] = abi.SectorInfo{
+ sectors, err := mas.LoadSectors(&selectedSectors)
+ if err != nil {
+ return nil, xerrors.Errorf("loading proving sectors: %w", err)
+ }
+
+ out := make([]proof0.SectorInfo, len(sectors))
+ for i, sinfo := range sectors {
+ out[i] = proof0.SectorInfo{
SealProof: spt,
SectorNumber: sinfo.SectorNumber,
SealedCID: sinfo.SealedCID,
@@ -283,33 +257,40 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
}
func StateMinerInfo(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*miner.MinerInfo, error) {
- var mas miner.State
- _, err := sm.LoadActorStateRaw(ctx, maddr, &mas, ts.ParentState())
+ act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
- return nil, xerrors.Errorf("(get ssize) failed to load miner actor state: %w", err)
+ return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
- return mas.GetInfo(sm.cs.Store(ctx))
+ mas, err := miner.Load(sm.cs.Store(ctx), act)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ mi, err := mas.Info()
+ if err != nil {
+ return nil, err
+ }
+
+ return &mi, err
}
func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) {
- var spas power.State
- _, err := sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &spas, ts)
+ act, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
- return false, xerrors.Errorf("(get miner slashed) failed to load power actor state")
+ return false, xerrors.Errorf("failed to load power actor: %w", err)
}
- store := sm.cs.Store(ctx)
-
- claims, err := adt.AsMap(store, spas.Claims)
+ spas, err := power.Load(sm.cs.Store(ctx), act)
if err != nil {
- return false, err
+ return false, xerrors.Errorf("failed to load power actor state: %w", err)
}
- ok, err := claims.Get(power.AddrKey(maddr), nil)
+ _, ok, err := spas.MinerPower(maddr)
if err != nil {
- return false, err
+ return false, xerrors.Errorf("getting miner power: %w", err)
}
+
if !ok {
return true, nil
}
@@ -318,108 +299,61 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma
}
func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) {
- var state market.State
- if _, err := sm.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts); err != nil {
- return nil, err
+ act, err := sm.LoadActor(ctx, market.Address, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load market actor: %w", err)
}
- store := sm.ChainStore().Store(ctx)
- da, err := adt.AsArray(store, state.Proposals)
+ state, err := market.Load(sm.cs.Store(ctx), act)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load market actor state: %w", err)
+ }
+
+ proposals, err := state.Proposals()
if err != nil {
return nil, err
}
- var dp market.DealProposal
- if found, err := da.Get(uint64(dealID), &dp); err != nil {
+ proposal, found, err := proposals.Get(dealID)
+
+ if err != nil {
return nil, err
} else if !found {
return nil, xerrors.Errorf("deal %d not found", dealID)
}
- sa, err := market.AsDealStateArray(store, state.States)
+ states, err := state.States()
if err != nil {
return nil, err
}
- st, found, err := sa.Get(dealID)
+ st, found, err := states.Get(dealID)
if err != nil {
return nil, err
}
if !found {
- st = &market.DealState{
- SectorStartEpoch: -1,
- LastUpdatedEpoch: -1,
- SlashEpoch: -1,
- }
+ st = market.EmptyDealState()
}
return &api.MarketDeal{
- Proposal: dp,
+ Proposal: *proposal,
State: *st,
}, nil
}
func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([]address.Address, error) {
- var state power.State
- if _, err := sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &state, ts); err != nil {
- return nil, err
- }
-
- m, err := adt.AsMap(sm.cs.Store(ctx), state.Claims)
+ act, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("failed to load power actor: %w", err)
}
- var miners []address.Address
- err = m.ForEach(nil, func(k string) error {
- a, err := address.NewFromBytes([]byte(k))
- if err != nil {
- return err
- }
- miners = append(miners, a)
- return nil
- })
+ powState, err := power.Load(sm.cs.Store(ctx), act)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("failed to load power actor state: %w", err)
}
- return miners, nil
-}
-
-func LoadSectorsFromSet(ctx context.Context, bs blockstore.Blockstore, ssc cid.Cid, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) {
- a, err := adt.AsArray(store.ActorStore(ctx, bs), ssc)
- if err != nil {
- return nil, err
- }
-
- var sset []*api.ChainSectorInfo
- var v cbg.Deferred
- if err := a.ForEach(&v, func(i int64) error {
- if filter != nil {
- set, err := filter.IsSet(uint64(i))
- if err != nil {
- return xerrors.Errorf("filter check error: %w", err)
- }
- if set == filterOut {
- return nil
- }
- }
-
- var oci miner.SectorOnChainInfo
- if err := cbor.DecodeInto(v.Raw, &oci); err != nil {
- return err
- }
- sset = append(sset, &api.ChainSectorInfo{
- Info: oci,
- ID: abi.SectorNumber(i),
- })
- return nil
- }); err != nil {
- return nil, err
- }
-
- return sset, nil
+ return powState.ListAllMiners()
}
func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, msgs []*types.Message, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
@@ -432,6 +366,16 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
return cid.Undef, nil, err
}
+ for i := ts.Height(); i < height; i++ {
+ // handle state forks
+ base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts)
+ if err != nil {
+ return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
+ }
+
+ // TODO: should we also run cron here?
+ }
+
r := store.NewChainRand(sm.cs, ts.Cids())
vmopt := &vm.VMOpts{
StateBase: base,
@@ -440,23 +384,14 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetCirculatingSupply,
+ NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,
}
- vmi, err := vm.NewVM(vmopt)
+ vmi, err := sm.newVM(ctx, vmopt)
if err != nil {
return cid.Undef, nil, err
}
- for i := ts.Height(); i < height; i++ {
- // handle state forks
- err = sm.handleStateForks(ctx, vmi.StateTree(), i)
- if err != nil {
- return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
- }
-
- // TODO: should we also run cron here?
- }
-
for i, msg := range msgs {
// TODO: Use the signed message length for secp messages
ret, err := vmi.ApplyMessage(ctx, msg)
@@ -478,8 +413,9 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, error) {
var lbr abi.ChainEpoch
- if round > build.WinningPoStSectorSetLookback {
- lbr = round - build.WinningPoStSectorSetLookback
+ lb := policy.GetWinningPoStSectorSetLookback(sm.GetNtwkVersion(ctx, round))
+ if round > lb {
+ lbr = round - lb
}
// more null blocks than our lookback
@@ -495,7 +431,7 @@ func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.
return lbts, nil
}
-func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBeacon, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) {
+func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) {
ts, err := sm.ChainStore().LoadTipSet(tsk)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err)
@@ -510,7 +446,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
prev = &types.BeaconEntry{}
}
- entries, err := beacon.BeaconEntriesForBlock(ctx, bcn, round, *prev)
+ entries, err := beacon.BeaconEntriesForBlock(ctx, bcs, round, ts.Height(), *prev)
if err != nil {
return nil, err
}
@@ -530,9 +466,14 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
return nil, err
}
- var mas miner.State
- if _, err := sm.LoadActorStateRaw(ctx, maddr, &mas, lbst); err != nil {
- return nil, err
+ act, err := sm.LoadActorRaw(ctx, maddr, lbst)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor: %w", err)
+ }
+
+ mas, err := miner.Load(sm.cs.Store(ctx), act)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
buf := new(bytes.Buffer)
@@ -547,19 +488,19 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand)
if err != nil {
- return nil, xerrors.Errorf("getting wpost proving set: %w", err)
+ return nil, xerrors.Errorf("getting winning post proving set: %w", err)
}
if len(sectors) == 0 {
return nil, nil
}
- mpow, tpow, err := GetPowerRaw(ctx, sm, lbst, maddr)
+ mpow, tpow, _, err := GetPowerRaw(ctx, sm, lbst, maddr)
if err != nil {
return nil, xerrors.Errorf("failed to get power: %w", err)
}
- info, err := mas.GetInfo(sm.cs.Store(ctx))
+ info, err := mas.Info()
if err != nil {
return nil, err
}
@@ -569,88 +510,178 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
return nil, xerrors.Errorf("resolving worker address: %w", err)
}
- hmp, err := MinerHasMinPower(ctx, sm, maddr, lbts)
+ // TODO: Not ideal performance...This method reloads miner and power state (already looked up here and in GetPowerRaw)
+ eligible, err := MinerEligibleToMine(ctx, sm, maddr, ts, lbts)
if err != nil {
- return nil, xerrors.Errorf("determining if miner has min power failed: %w", err)
+ return nil, xerrors.Errorf("determining miner eligibility: %w", err)
}
return &api.MiningBaseInfo{
- MinerPower: mpow.QualityAdjPower,
- NetworkPower: tpow.QualityAdjPower,
- Sectors: sectors,
- WorkerKey: worker,
- SectorSize: info.SectorSize,
- PrevBeaconEntry: *prev,
- BeaconEntries: entries,
- HasMinPower: hmp,
+ MinerPower: mpow.QualityAdjPower,
+ NetworkPower: tpow.QualityAdjPower,
+ Sectors: sectors,
+ WorkerKey: worker,
+ SectorSize: info.SectorSize,
+ PrevBeaconEntry: *prev,
+ BeaconEntries: entries,
+ EligibleForMining: eligible,
}, nil
}
-type methodMeta struct {
+type MethodMeta struct {
Name string
Params reflect.Type
Ret reflect.Type
}
-var MethodsMap = map[cid.Cid][]methodMeta{}
+var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{}
func init() {
- cidToMethods := map[cid.Cid][2]interface{}{
- // builtin.SystemActorCodeID: {builtin.MethodsSystem, system.Actor{} }- apparently it doesn't have methods
- builtin.InitActorCodeID: {builtin.MethodsInit, init_.Actor{}},
- builtin.CronActorCodeID: {builtin.MethodsCron, cron.Actor{}},
- builtin.AccountActorCodeID: {builtin.MethodsAccount, account.Actor{}},
- builtin.StoragePowerActorCodeID: {builtin.MethodsPower, power.Actor{}},
- builtin.StorageMinerActorCodeID: {builtin.MethodsMiner, miner.Actor{}},
- builtin.StorageMarketActorCodeID: {builtin.MethodsMarket, market.Actor{}},
- builtin.PaymentChannelActorCodeID: {builtin.MethodsPaych, paych.Actor{}},
- builtin.MultisigActorCodeID: {builtin.MethodsMultisig, multisig.Actor{}},
- builtin.RewardActorCodeID: {builtin.MethodsReward, reward.Actor{}},
- builtin.VerifiedRegistryActorCodeID: {builtin.MethodsVerifiedRegistry, verifreg.Actor{}},
- }
+ // TODO: combine with the runtime actor registry.
+ var actors []rt.VMActor
+ actors = append(actors, exported0.BuiltinActors()...)
+ actors = append(actors, exported2.BuiltinActors()...)
- for c, m := range cidToMethods {
- rt := reflect.TypeOf(m[0])
- nf := rt.NumField()
+ for _, actor := range actors {
+ exports := actor.Exports()
+ methods := make(map[abi.MethodNum]MethodMeta, len(exports))
- MethodsMap[c] = append(MethodsMap[c], methodMeta{
+ // Explicitly add send, it's special.
+ // Note that builtin2.MethodSend = builtin0.MethodSend = 0.
+ methods[builtin0.MethodSend] = MethodMeta{
Name: "Send",
- Params: reflect.TypeOf(new(adt.EmptyValue)),
- Ret: reflect.TypeOf(new(adt.EmptyValue)),
- })
-
- exports := m[1].(abi.Invokee).Exports()
- for i := 0; i < nf; i++ {
- export := reflect.TypeOf(exports[i+1])
-
- MethodsMap[c] = append(MethodsMap[c], methodMeta{
- Name: rt.Field(i).Name,
- Params: export.In(1),
- Ret: export.Out(0),
- })
+ Params: reflect.TypeOf(new(abi.EmptyValue)),
+ Ret: reflect.TypeOf(new(abi.EmptyValue)),
}
+
+ // Iterate over exported methods. Some of these _may_ be nil and
+ // must be skipped.
+ for number, export := range exports {
+ if export == nil {
+ continue
+ }
+
+ ev := reflect.ValueOf(export)
+ et := ev.Type()
+
+ // Extract the method names using reflection. These
+ // method names always match the field names in the
+ // `builtin.Method*` structs (tested in the specs-actors
+ // tests).
+ fnName := runtime.FuncForPC(ev.Pointer()).Name()
+ fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
+
+ switch abi.MethodNum(number) {
+ case builtin0.MethodSend:
+ // Note that builtin2.MethodSend = builtin0.MethodSend = 0.
+ panic("method 0 is reserved for Send")
+ case builtin0.MethodConstructor:
+ // Note that builtin2.MethodConstructor = builtin0.MethodConstructor = 1.
+ if fnName != "Constructor" {
+ panic("method 1 is reserved for Constructor")
+ }
+ }
+
+ methods[abi.MethodNum(number)] = MethodMeta{
+ Name: fnName,
+ Params: et.In(1),
+ Ret: et.Out(0),
+ }
+ }
+ MethodsMap[actor.Code()] = methods
}
}
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
- var act types.Actor
- if err := sm.WithParentState(ts, sm.WithActor(to, GetActor(&act))); err != nil {
- return nil, xerrors.Errorf("getting actor: %w", err)
+ act, err := sm.LoadActor(ctx, to, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
- m := MethodsMap[act.Code][method]
+ m, found := MethodsMap[act.Code][method]
+ if !found {
+ return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code)
+ }
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}
-func MinerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) {
- var ps power.State
- _, err := sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &ps, ts)
+func minerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) {
+ pact, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return false, xerrors.Errorf("loading power actor state: %w", err)
}
- return ps.MinerNominalPowerMeetsConsensusMinimum(sm.ChainStore().Store(ctx), addr)
+ ps, err := power.Load(sm.cs.Store(ctx), pact)
+ if err != nil {
+ return false, err
+ }
+
+ return ps.MinerNominalPowerMeetsConsensusMinimum(addr)
+}
+
+func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Address, baseTs *types.TipSet, lookbackTs *types.TipSet) (bool, error) {
+ hmp, err := minerHasMinPower(ctx, sm, addr, lookbackTs)
+
+ // TODO: We're blurring the lines between a "runtime network version" and a "Lotus upgrade epoch", is that unavoidable?
+ if sm.GetNtwkVersion(ctx, baseTs.Height()) <= network.Version3 {
+ return hmp, err
+ }
+
+ if err != nil {
+ return false, err
+ }
+
+ if !hmp {
+ return false, nil
+ }
+
+ // Post actors v2, also check MinerEligibleForElection with base ts
+
+ pact, err := sm.LoadActor(ctx, power.Address, baseTs)
+ if err != nil {
+ return false, xerrors.Errorf("loading power actor state: %w", err)
+ }
+
+ pstate, err := power.Load(sm.cs.Store(ctx), pact)
+ if err != nil {
+ return false, err
+ }
+
+ mact, err := sm.LoadActor(ctx, addr, baseTs)
+ if err != nil {
+ return false, xerrors.Errorf("loading miner actor state: %w", err)
+ }
+
+ mstate, err := miner.Load(sm.cs.Store(ctx), mact)
+ if err != nil {
+ return false, err
+ }
+
+ // Non-empty power claim.
+ if claim, found, err := pstate.MinerPower(addr); err != nil {
+ return false, err
+ } else if !found {
+ return false, err
+ } else if claim.QualityAdjPower.LessThanEqual(big.Zero()) {
+ return false, err
+ }
+
+ // No fee debt.
+ if debt, err := mstate.FeeDebt(); err != nil {
+ return false, err
+ } else if !debt.IsZero() {
+ return false, err
+ }
+
+ // No active consensus faults.
+ if mInfo, err := mstate.Info(); err != nil {
+ return false, err
+ } else if baseTs.Height() <= mInfo.ConsensusFaultElapsed {
+ return false, nil
+ }
+
+ return true, nil
}
func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) {
diff --git a/chain/store/basefee.go b/chain/store/basefee.go
index de3f90a8f..33367abcc 100644
--- a/chain/store/basefee.go
+++ b/chain/store/basefee.go
@@ -3,22 +3,28 @@ package store
import (
"context"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
)
-func computeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int) types.BigInt {
- // deta := 1/PackingEfficiency * gasLimitUsed/noOfBlocks - build.BlockGasTarget
- // change := baseFee * deta / BlockGasTarget / BaseFeeMaxChangeDenom
+func ComputeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int, epoch abi.ChainEpoch) types.BigInt {
+ // deta := gasLimitUsed/noOfBlocks - build.BlockGasTarget
+ // change := baseFee * deta / BlockGasTarget
// nextBaseFee = baseFee + change
// nextBaseFee = max(nextBaseFee, build.MinimumBaseFee)
- delta := build.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * build.PackingEfficiencyNum)
- delta -= build.BlockGasTarget
+ var delta int64
+ if epoch > build.UpgradeSmokeHeight {
+ delta = gasLimitUsed / int64(noOfBlocks)
+ delta -= build.BlockGasTarget
+ } else {
+ delta = build.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * build.PackingEfficiencyNum)
+ delta -= build.BlockGasTarget
+ }
// cap change at 12.5% (BaseFeeMaxChangeDenom) by capping delta
if delta > build.BlockGasTarget {
@@ -40,6 +46,10 @@ func computeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int
}
func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi.TokenAmount, error) {
+ if build.UpgradeBreezeHeight >= 0 && ts.Height() > build.UpgradeBreezeHeight && ts.Height() < build.UpgradeBreezeHeight+build.BreezeGasTampingDuration {
+ return abi.NewTokenAmount(100), nil
+ }
+
zero := abi.NewTokenAmount(0)
// totalLimit is sum of GasLimits of unique messages in a tipset
@@ -69,5 +79,5 @@ func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi
}
parentBaseFee := ts.Blocks()[0].ParentBaseFee
- return computeNextBaseFee(parentBaseFee, totalLimit, len(ts.Blocks())), nil
+ return ComputeNextBaseFee(parentBaseFee, totalLimit, len(ts.Blocks()), ts.Height()), nil
}
diff --git a/chain/store/basefee_test.go b/chain/store/basefee_test.go
index 7a7cae911..b3d414cf5 100644
--- a/chain/store/basefee_test.go
+++ b/chain/store/basefee_test.go
@@ -11,24 +11,27 @@ import (
func TestBaseFee(t *testing.T) {
tests := []struct {
- basefee uint64
- limitUsed int64
- noOfBlocks int
- output uint64
+ basefee uint64
+ limitUsed int64
+ noOfBlocks int
+ preSmoke, postSmoke uint64
}{
- {100e6, 0, 1, 87.5e6},
- {100e6, 0, 5, 87.5e6},
- {100e6, build.BlockGasTarget, 1, 103.125e6},
- {100e6, build.BlockGasTarget * 2, 2, 103.125e6},
- {100e6, build.BlockGasLimit * 2, 2, 112.5e6},
- {100e6, build.BlockGasLimit * 1.5, 2, 110937500},
+ {100e6, 0, 1, 87.5e6, 87.5e6},
+ {100e6, 0, 5, 87.5e6, 87.5e6},
+ {100e6, build.BlockGasTarget, 1, 103.125e6, 100e6},
+ {100e6, build.BlockGasTarget * 2, 2, 103.125e6, 100e6},
+ {100e6, build.BlockGasLimit * 2, 2, 112.5e6, 112.5e6},
+ {100e6, build.BlockGasLimit * 1.5, 2, 110937500, 106.250e6},
}
for _, test := range tests {
test := test
t.Run(fmt.Sprintf("%v", test), func(t *testing.T) {
- output := computeNextBaseFee(types.NewInt(test.basefee), test.limitUsed, test.noOfBlocks)
- assert.Equal(t, fmt.Sprintf("%d", test.output), output.String())
+ preSmoke := ComputeNextBaseFee(types.NewInt(test.basefee), test.limitUsed, test.noOfBlocks, build.UpgradeSmokeHeight-1)
+ assert.Equal(t, fmt.Sprintf("%d", test.preSmoke), preSmoke.String())
+
+ postSmoke := ComputeNextBaseFee(types.NewInt(test.basefee), test.limitUsed, test.noOfBlocks, build.UpgradeSmokeHeight+1)
+ assert.Equal(t, fmt.Sprintf("%d", test.postSmoke), postSmoke.String())
})
}
}
diff --git a/chain/store/index.go b/chain/store/index.go
index 8f3e88417..a9da994af 100644
--- a/chain/store/index.go
+++ b/chain/store/index.go
@@ -5,8 +5,8 @@ import (
"os"
"strconv"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/abi"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/xerrors"
)
diff --git a/chain/store/index_test.go b/chain/store/index_test.go
index 5c49c6791..63e08070c 100644
--- a/chain/store/index_test.go
+++ b/chain/store/index_test.go
@@ -5,11 +5,11 @@ import (
"context"
"testing"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/specs-actors/actors/abi"
datastore "github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-datastore/sync"
"github.com/stretchr/testify/assert"
diff --git a/chain/store/store.go b/chain/store/store.go
index 2ae7fab2c..aac28e5d3 100644
--- a/chain/store/store.go
+++ b/chain/store/store.go
@@ -10,15 +10,20 @@ import (
"strconv"
"sync"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/minio/blake2b-simd"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/journal"
bstore "github.com/filecoin-project/lotus/lib/blockstore"
@@ -72,6 +77,20 @@ func init() {
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee func(rev, app []*types.TipSet) error
+// Journal event types.
+const (
+ evtTypeHeadChange = iota
+)
+
+type HeadChangeEvt struct {
+ From types.TipSetKey
+ FromHeight abi.ChainEpoch
+ To types.TipSetKey
+ ToHeight abi.ChainEpoch
+ RevertCount int
+ ApplyCount int
+}
+
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
@@ -103,6 +122,8 @@ type ChainStore struct {
tsCache *lru.ARCCache
vmcalls vm.SyscallBuilder
+
+ evtTypes [1]journal.EventType
}
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
@@ -118,6 +139,10 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallB
vmcalls: vmcalls,
}
+ cs.evtTypes = [1]journal.EventType{
+ evtTypeHeadChange: journal.J.RegisterEventType("sync", "head_change"),
+ }
+
ci := NewChainIndex(cs.LoadTipSet)
cs.cindex = ci
@@ -263,6 +288,16 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e
return nil
}
+func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
+ key := blockValidationCacheKeyPrefix.Instance(blkid.String())
+
+ if err := cs.ds.Delete(key); err != nil {
+ return xerrors.Errorf("removing from valid block cache: %w", err)
+ }
+
+ return nil
+}
+
func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
@@ -344,12 +379,15 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
continue
}
- journal.Add("sync", map[string]interface{}{
- "op": "headChange",
- "from": r.old.Key(),
- "to": r.new.Key(),
- "rev": len(revert),
- "apply": len(apply),
+ journal.J.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
+ return HeadChangeEvt{
+ From: r.old.Key(),
+ FromHeight: r.old.Height(),
+ To: r.new.Key(),
+ ToHeight: r.new.Height(),
+ RevertCount: len(revert),
+ ApplyCount: len(apply),
+ }
})
// reverse the apply array
@@ -443,14 +481,25 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
return v.(*types.TipSet), nil
}
- var blks []*types.BlockHeader
- for _, c := range tsk.Cids() {
- b, err := cs.GetBlock(c)
- if err != nil {
- return nil, xerrors.Errorf("get block %s: %w", c, err)
- }
+ // Fetch tipset block headers from blockstore in parallel
+ var eg errgroup.Group
+ cids := tsk.Cids()
+ blks := make([]*types.BlockHeader, len(cids))
+ for i, c := range cids {
+ i, c := i, c
+ eg.Go(func() error {
+ b, err := cs.GetBlock(c)
+ if err != nil {
+ return xerrors.Errorf("get block %s: %w", c, err)
+ }
- blks = append(blks, b)
+ blks[i] = b
+ return nil
+ })
+ }
+ err := eg.Wait()
+ if err != nil {
+ return nil, err
}
ts, err := types.NewTipSet(blks)
@@ -471,7 +520,7 @@ func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) {
cur := b
for !a.Equals(cur) && cur.Height() > a.Height() {
- next, err := cs.LoadTipSet(b.Parents())
+ next, err := cs.LoadTipSet(cur.Parents())
if err != nil {
return false, err
}
@@ -710,7 +759,8 @@ func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error)
func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) {
ctx := context.TODO()
- a, err := adt.AsArray(cs.Store(ctx), root)
+ // block headers use adt0, for now.
+ a, err := adt0.AsArray(cs.Store(ctx), root)
if err != nil {
return nil, xerrors.Errorf("amt load: %w", err)
}
@@ -744,32 +794,16 @@ type BlockMessages struct {
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
applied := make(map[address.Address]uint64)
- cst := cbor.NewCborStore(cs.bs)
- st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot)
- if err != nil {
- return nil, xerrors.Errorf("failed to load state tree")
- }
-
- preloadAddr := func(a address.Address) error {
- if _, ok := applied[a]; !ok {
- act, err := st.GetActor(a)
- if err != nil {
- return err
- }
-
- applied[a] = act.Nonce
- }
- return nil
- }
-
selectMsg := func(m *types.Message) (bool, error) {
- if err := preloadAddr(m.From); err != nil {
- return false, err
+ // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise
+ if _, ok := applied[m.From]; !ok {
+ applied[m.From] = m.Nonce
}
if applied[m.From] != m.Nonce {
return false, nil
}
+
applied[m.From]++
return true, nil
@@ -919,7 +953,8 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message,
func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) {
ctx := context.TODO()
- a, err := adt.AsArray(cs.Store(ctx), b.ParentMessageReceipts)
+ // block headers use adt0, for now.
+ a, err := adt0.AsArray(cs.Store(ctx), b.ParentMessageReceipts)
if err != nil {
return nil, xerrors.Errorf("amt load: %w", err)
}
@@ -1159,14 +1194,7 @@ func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.
return in, rerr
}
-func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, w io.Writer) error {
- if ts == nil {
- ts = cs.GetHeaviestTipSet()
- }
-
- seen := cid.NewSet()
- walked := cid.NewSet()
-
+func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
h := &car.CarHeader{
Roots: ts.Cids(),
Version: 1,
@@ -1176,30 +1204,64 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
return xerrors.Errorf("failed to write car header: %s", err)
}
+ return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, func(c cid.Cid) error {
+ blk, err := cs.bs.Get(c)
+ if err != nil {
+ return xerrors.Errorf("writing object to car, bs.Get: %w", err)
+ }
+
+ if err := carutil.LdWrite(w, c.Bytes(), blk.RawData()); err != nil {
+ return xerrors.Errorf("failed to write block to car output: %w", err)
+ }
+
+ return nil
+ })
+}
+
+func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, cb func(cid.Cid) error) error {
+ if ts == nil {
+ ts = cs.GetHeaviestTipSet()
+ }
+
+ seen := cid.NewSet()
+ walked := cid.NewSet()
+
blocksToWalk := ts.Cids()
+ currentMinHeight := ts.Height()
walkChain := func(blk cid.Cid) error {
if !seen.Visit(blk) {
return nil
}
+ if err := cb(blk); err != nil {
+ return err
+ }
+
data, err := cs.bs.Get(blk)
if err != nil {
return xerrors.Errorf("getting block: %w", err)
}
- if err := carutil.LdWrite(w, blk.Bytes(), data.RawData()); err != nil {
- return xerrors.Errorf("failed to write block to car output: %w", err)
- }
-
var b types.BlockHeader
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
}
- cids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
- if err != nil {
- return xerrors.Errorf("recursing messages failed: %w", err)
+ if currentMinHeight > b.Height {
+ currentMinHeight = b.Height
+ if currentMinHeight%builtin.EpochsInDay == 0 {
+ log.Infow("export", "height", currentMinHeight)
+ }
+ }
+
+ var cids []cid.Cid
+ if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots {
+ mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
+ if err != nil {
+ return xerrors.Errorf("recursing messages failed: %w", err)
+ }
+ cids = mcids
}
if b.Height > 0 {
@@ -1227,20 +1289,20 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
if c.Prefix().Codec != cid.DagCBOR {
continue
}
- data, err := cs.bs.Get(c)
- if err != nil {
- return xerrors.Errorf("writing object to car (get %s): %w", c, err)
+
+ if err := cb(c); err != nil {
+ return err
}
- if err := carutil.LdWrite(w, c.Bytes(), data.RawData()); err != nil {
- return xerrors.Errorf("failed to write out car object: %w", err)
- }
}
}
return nil
}
+ log.Infow("export started")
+ exportStart := build.Clock.Now()
+
for len(blocksToWalk) > 0 {
next := blocksToWalk[0]
blocksToWalk = blocksToWalk[1:]
@@ -1249,6 +1311,8 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
}
}
+ log.Infow("export finished", "duration", build.Clock.Now().Sub(exportStart).Seconds())
+
return nil
}
@@ -1291,7 +1355,7 @@ func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry
}, nil
}
- return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset")
+ return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
type chainRand struct {
diff --git a/chain/store/store_test.go b/chain/store/store_test.go
index 42de4c19d..b7adfb595 100644
--- a/chain/store/store_test.go
+++ b/chain/store/store_test.go
@@ -7,13 +7,10 @@ import (
datastore "github.com/ipfs/go-datastore"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@@ -22,11 +19,9 @@ import (
)
func init() {
- miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
- power.ConsensusMinerMinPower = big.NewInt(2048)
- verifreg.MinVerifiedDealSize = big.NewInt(256)
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
func BenchmarkGetRandomness(b *testing.B) {
@@ -96,7 +91,7 @@ func TestChainExportImport(t *testing.T) {
}
buf := new(bytes.Buffer)
- if err := cg.ChainStore().Export(context.TODO(), last, 0, buf); err != nil {
+ if err := cg.ChainStore().Export(context.TODO(), last, 0, false, buf); err != nil {
t.Fatal(err)
}
diff --git a/chain/store/weight.go b/chain/store/weight.go
index 2e8516f57..9100df315 100644
--- a/chain/store/weight.go
+++ b/chain/store/weight.go
@@ -4,12 +4,12 @@ import (
"context"
"math/big"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+
+ big2 "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
- big2 "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
)
@@ -34,16 +34,22 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
return types.NewInt(0), xerrors.Errorf("load state tree: %w", err)
}
- act, err := state.GetActor(builtin.StoragePowerActorAddr)
+ act, err := state.GetActor(power.Address)
if err != nil {
return types.NewInt(0), xerrors.Errorf("get power actor: %w", err)
}
- var st power.State
- if err := cst.Get(ctx, act.Head, &st); err != nil {
- return types.NewInt(0), xerrors.Errorf("get power actor head (%s, height=%d): %w", act.Head, ts.Height(), err)
+ powState, err := power.Load(cs.Store(ctx), act)
+ if err != nil {
+ return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err)
}
- tpow = st.TotalQualityAdjPower // TODO: REVIEW: Is this correct?
+
+ claim, err := powState.TotalPower()
+ if err != nil {
+ return types.NewInt(0), xerrors.Errorf("failed to get total power: %w", err)
+ }
+
+ tpow = claim.QualityAdjPower // TODO: REVIEW: Is this correct?
}
log2P := int64(0)
diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go
index 5c28aa835..d51c481d1 100644
--- a/chain/sub/incoming.go
+++ b/chain/sub/incoming.go
@@ -1,7 +1,6 @@
package sub
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -11,8 +10,6 @@ import (
"golang.org/x/xerrors"
address "github.com/filecoin-project/go-address"
- miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
lru "github.com/hashicorp/golang-lru"
blocks "github.com/ipfs/go-block-format"
bserv "github.com/ipfs/go-blockservice"
@@ -26,8 +23,11 @@ import (
"go.opencensus.io/stats"
"go.opencensus.io/tag"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
@@ -44,7 +44,11 @@ var log = logging.Logger("sub")
var ErrSoftFailure = errors.New("soft validation failure")
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
-func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bserv bserv.BlockService, cmgr connmgr.ConnManager) {
+func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bs bserv.BlockService, cmgr connmgr.ConnManager) {
+ // Timeout after (block time + propagation delay). This is useless at
+ // this point.
+ timeout := time.Duration(build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second
+
for {
msg, err := bsub.Next(ctx)
if err != nil {
@@ -65,15 +69,22 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
src := msg.GetFrom()
go func() {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ // NOTE: we could also share a single session between
+ // all requests but that may have other consequences.
+ ses := bserv.NewSession(ctx, bs)
+
start := build.Clock.Now()
log.Debug("about to fetch messages for block from pubsub")
- bmsgs, err := FetchMessagesByCids(context.TODO(), bserv, blk.BlsMessages)
+ bmsgs, err := FetchMessagesByCids(ctx, ses, blk.BlsMessages)
if err != nil {
log.Errorf("failed to fetch all bls messages for block received over pubusb: %s; source: %s", err, src)
return
}
- smsgs, err := FetchSignedMessagesByCids(context.TODO(), bserv, blk.SecpkMessages)
+ smsgs, err := FetchSignedMessagesByCids(ctx, ses, blk.SecpkMessages)
if err != nil {
log.Errorf("failed to fetch all secpk messages for block received over pubusb: %s; source: %s", err, src)
return
@@ -98,7 +109,7 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
func FetchMessagesByCids(
ctx context.Context,
- bserv bserv.BlockService,
+ bserv bserv.BlockGetter,
cids []cid.Cid,
) ([]*types.Message, error) {
out := make([]*types.Message, len(cids))
@@ -127,7 +138,7 @@ func FetchMessagesByCids(
// FIXME: Duplicate of above.
func FetchSignedMessagesByCids(
ctx context.Context,
- bserv bserv.BlockService,
+ bserv bserv.BlockGetter,
cids []cid.Cid,
) ([]*types.SignedMessage, error) {
out := make([]*types.SignedMessage, len(cids))
@@ -157,30 +168,28 @@ func FetchSignedMessagesByCids(
// blocks we did not request.
func fetchCids(
ctx context.Context,
- bserv bserv.BlockService,
+ bserv bserv.BlockGetter,
cids []cid.Cid,
cb func(int, blocks.Block) error,
) error {
- // FIXME: Why don't we use the context here?
- fetchedBlocks := bserv.GetBlocks(context.TODO(), cids)
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
cidIndex := make(map[cid.Cid]int)
for i, c := range cids {
cidIndex[c] = i
}
+ if len(cids) != len(cidIndex) {
+ return fmt.Errorf("duplicate CIDs in fetchCids input")
+ }
+
+ fetchedBlocks := bserv.GetBlocks(ctx, cids)
for i := 0; i < len(cids); i++ {
select {
case block, ok := <-fetchedBlocks:
if !ok {
- // Closed channel, no more blocks fetched, check if we have all
- // of the CIDs requested.
- // FIXME: Review this check. We don't call the callback on the
- // last index?
- if i == len(cids)-1 {
- break
- }
-
return fmt.Errorf("failed to fetch all messages")
}
@@ -369,16 +378,16 @@ func (bv *BlockValidator) decodeAndCheckBlock(msg *pubsub.Message) (*types.Block
func (bv *BlockValidator) isChainNearSynced() bool {
ts := bv.chain.GetHeaviestTipSet()
timestamp := ts.MinTimestamp()
- now := build.Clock.Now().UnixNano()
- cutoff := uint64(now) - uint64(6*time.Hour)
- return timestamp > cutoff
+ timestampTime := time.Unix(int64(timestamp), 0)
+ return build.Clock.Since(timestampTime) < 6*time.Hour
}
func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
// TODO there has to be a simpler way to do this without the blockstore dance
- store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary()))
- bmArr := adt.MakeEmptyArray(store)
- smArr := adt.MakeEmptyArray(store)
+ // block headers use adt0
+ store := adt0.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary()))
+ bmArr := adt0.MakeEmptyArray(store)
+ smArr := adt0.MakeEmptyArray(store)
for i, m := range msg.BlsMessages {
c := cbg.CborCid(m)
@@ -433,6 +442,7 @@ func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *typ
if err != nil {
return address.Undef, err
}
+
buf := bufbstore.NewBufferedBstore(bv.chain.Blockstore())
cst := cbor.NewCborStore(buf)
state, err := state.LoadStateTree(cst, st)
@@ -444,19 +454,12 @@ func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *typ
return address.Undef, err
}
- blk, err := bv.chain.Blockstore().Get(act.Head)
- if err != nil {
- return address.Undef, err
- }
- aso := blk.RawData()
-
- var mst miner.State
- err = mst.UnmarshalCBOR(bytes.NewReader(aso))
+ mst, err := miner.Load(bv.chain.Store(ctx), act)
if err != nil {
return address.Undef, err
}
- info, err := mst.GetInfo(adt.WrapStore(ctx, cst))
+ info, err := mst.Info()
if err != nil {
return address.Undef, err
}
@@ -481,14 +484,14 @@ func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *typ
return address.Undef, ErrSoftFailure
}
- hmp, err := stmgr.MinerHasMinPower(ctx, bv.stmgr, bh.Miner, lbts)
+ eligible, err := stmgr.MinerEligibleToMine(ctx, bv.stmgr, bh.Miner, baseTs, lbts)
if err != nil {
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
return address.Undef, ErrSoftFailure
}
- if !hmp {
- log.Warnf("incoming block's miner does not have minimum power")
+ if !eligible {
+ log.Warnf("incoming block's miner is ineligible")
return address.Undef, ErrInsufficientPower
}
diff --git a/chain/sub/incoming_test.go b/chain/sub/incoming_test.go
new file mode 100644
index 000000000..215439209
--- /dev/null
+++ b/chain/sub/incoming_test.go
@@ -0,0 +1,63 @@
+package sub
+
+import (
+ "context"
+ "testing"
+
+ address "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/chain/types"
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+)
+
+type getter struct {
+ msgs []*types.Message
+}
+
+func (g *getter) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { panic("NYI") }
+
+func (g *getter) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block {
+ ch := make(chan blocks.Block, len(g.msgs))
+ for _, m := range g.msgs {
+ by, err := m.Serialize()
+ if err != nil {
+ panic(err)
+ }
+ b, err := blocks.NewBlockWithCid(by, m.Cid())
+ if err != nil {
+ panic(err)
+ }
+ ch <- b
+ }
+ close(ch)
+ return ch
+}
+
+func TestFetchCidsWithDedup(t *testing.T) {
+ msgs := []*types.Message{}
+ for i := 0; i < 10; i++ {
+ msgs = append(msgs, &types.Message{
+ From: address.TestAddress,
+ To: address.TestAddress,
+
+ Nonce: uint64(i),
+ })
+ }
+ cids := []cid.Cid{}
+ for _, m := range msgs {
+ cids = append(cids, m.Cid())
+ }
+ g := &getter{msgs}
+
+ // the cids have a duplicate
+ res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0]))
+
+ t.Logf("err: %+v", err)
+ t.Logf("res: %+v", res)
+ if err == nil {
+ t.Errorf("there should be an error")
+ }
+ if err == nil && (res[0] == nil || res[len(res)-1] == nil) {
+ t.Fatalf("there is a nil message: first %p, last %p", res[0], res[len(res)-1])
+ }
+}
diff --git a/chain/sync.go b/chain/sync.go
index 1b1cbdde9..c280e3a40 100644
--- a/chain/sync.go
+++ b/chain/sync.go
@@ -7,12 +7,19 @@ import (
"fmt"
"os"
"sort"
- "strconv"
"strings"
+ "sync"
"time"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/Gurpartap/async"
"github.com/hashicorp/go-multierror"
+ blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
@@ -25,18 +32,18 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
blst "github.com/supranational/blst/bindings/go"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/beacon"
- "github.com/filecoin-project/lotus/chain/blocksync"
+ "github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
@@ -50,25 +57,20 @@ import (
)
// Blocks that are more than MaxHeightDrift epochs above
-//the theoretical max height based on systime are quickly rejected
+// the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
-var defaultMessageFetchWindowSize = 200
+var (
+ // LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
+ // where the Syncer publishes candidate chain heads to be synced.
+ LocalIncoming = "incoming"
-func init() {
- if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
- val, err := strconv.Atoi(s)
- if err != nil {
- log.Errorf("failed to parse LOTUS_BSYNC_MSG_WINDOW: %s", err)
- return
- }
- defaultMessageFetchWindowSize = val
- }
-}
+ log = logging.Logger("chain")
-var log = logging.Logger("chain")
-
-var LocalIncoming = "incoming"
+ concurrentSyncRequests = exchange.ShufflePeersPrefix
+ syncRequestBatchSize = 8
+ syncRequestRetries = 5
+)
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
@@ -87,7 +89,7 @@ var LocalIncoming = "incoming"
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
// interfacing processes are part of other components, such as the SyncManager
-// (which owns the sync scheduler and sync workers), BlockSync, the HELLO
+// (which owns the sync scheduler and sync workers), ChainExchange, the HELLO
// protocol, and the gossipsub block propagation layer.
//
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
@@ -98,7 +100,7 @@ type Syncer struct {
store *store.ChainStore
// handle to the random beacon for verification
- beacon beacon.RandomBeacon
+ beacon beacon.Schedule
// the state manager handles making state queries
sm *stmgr.StateManager
@@ -110,11 +112,11 @@ type Syncer struct {
bad *BadBlockCache
// handle to the block sync service
- Bsync *blocksync.BlockSync
+ Exchange exchange.Client
self peer.ID
- syncmgr *SyncManager
+ syncmgr SyncManager
connmgr connmgr.ConnManager
@@ -124,11 +126,19 @@ type Syncer struct {
verifier ffiwrapper.Verifier
- windowSize int
+ tickerCtxCancel context.CancelFunc
+
+ checkptLk sync.Mutex
+
+ checkpt types.TipSetKey
+
+ ds dtypes.MetadataDS
}
+type SyncManagerCtor func(syncFn SyncFunc) SyncManager
+
// NewSyncer creates a new Syncer object.
-func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) {
+func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
gen, err := sm.ChainStore().GetGenesis()
if err != nil {
return nil, xerrors.Errorf("getting genesis block: %w", err)
@@ -139,18 +149,24 @@ func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connm
return nil, err
}
+ cp, err := loadCheckpoint(ds)
+ if err != nil {
+ return nil, xerrors.Errorf("error loading mpool config: %w", err)
+ }
+
s := &Syncer{
+ ds: ds,
+ checkpt: cp,
beacon: beacon,
bad: NewBadBlockCache(),
Genesis: gent,
- Bsync: bsync,
+ Exchange: exchange,
store: sm.ChainStore(),
sm: sm,
self: self,
receiptTracker: newBlockReceiptTracker(),
connmgr: connmgr,
verifier: verifier,
- windowSize: defaultMessageFetchWindowSize,
incoming: pubsub.New(50),
}
@@ -161,22 +177,52 @@ func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connm
log.Warn("*********************************************************************************************")
}
- s.syncmgr = NewSyncManager(s.Sync)
+ s.syncmgr = syncMgrCtor(s.Sync)
return s, nil
}
func (syncer *Syncer) Start() {
+ tickerCtx, tickerCtxCancel := context.WithCancel(context.Background())
syncer.syncmgr.Start()
+
+ syncer.tickerCtxCancel = tickerCtxCancel
+
+ go syncer.runMetricsTricker(tickerCtx)
+}
+
+func (syncer *Syncer) runMetricsTricker(tickerCtx context.Context) {
+ genesisTime := time.Unix(int64(syncer.Genesis.MinTimestamp()), 0)
+ ticker := build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ sinceGenesis := build.Clock.Now().Sub(genesisTime)
+ expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs)
+
+ stats.Record(tickerCtx, metrics.ChainNodeHeightExpected.M(expectedHeight))
+ case <-tickerCtx.Done():
+ return
+ }
+ }
}
func (syncer *Syncer) Stop() {
syncer.syncmgr.Stop()
+ syncer.tickerCtxCancel()
}
// InformNewHead informs the syncer about a new potential tipset
// This should be called when connecting to new peers, and additionally
// when receiving new blocks from the network
func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Errorf("panic in InformNewHead: ", err)
+ }
+ }()
+
ctx := context.Background()
if fts == nil {
log.Errorf("got nil tipset in InformNewHead")
@@ -220,7 +266,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
return false
}
- syncer.Bsync.AddPeer(from)
+ syncer.Exchange.AddPeer(from)
bestPweight := syncer.store.GetHeaviestTipSet().ParentWeight()
targetWeight := fts.TipSet().ParentWeight()
@@ -338,21 +384,28 @@ func (syncer *Syncer) InformNewBlock(from peer.ID, blk *types.FullBlock) bool {
return syncer.InformNewHead(from, fts)
}
-func copyBlockstore(from, to bstore.Blockstore) error {
- cids, err := from.AllKeysChan(context.TODO())
+func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error {
+ ctx, span := trace.StartSpan(ctx, "copyBlockstore")
+ defer span.End()
+
+ cids, err := from.AllKeysChan(ctx)
if err != nil {
return err
}
+ // TODO: should probably expose better methods on the blockstore for this operation
+ var blks []blocks.Block
for c := range cids {
b, err := from.Get(c)
if err != nil {
return err
}
- if err := to.Put(b); err != nil {
- return err
- }
+ blks = append(blks, b)
+ }
+
+ if err := to.PutMany(blks); err != nil {
+ return err
}
return nil
@@ -411,9 +464,10 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types
// computeMsgMeta computes the root CID of the combined arrays of message CIDs
// of both types (BLS and Secpk).
func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) {
- store := adt.WrapStore(context.TODO(), bs)
- bmArr := adt.MakeEmptyArray(store)
- smArr := adt.MakeEmptyArray(store)
+ // block headers use adt0
+ store := adt0.WrapStore(context.TODO(), bs)
+ bmArr := adt0.MakeEmptyArray(store)
+ smArr := adt0.MakeEmptyArray(store)
for i, m := range bmsgCids {
c := cbg.CborCid(m)
@@ -451,7 +505,7 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, e
}
// FetchTipSet tries to load the provided tipset from the store, and falls back
-// to the network (BlockSync) by querying the supplied peer if not found
+// to the network (client) by querying the supplied peer if not found
// locally.
//
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
@@ -462,7 +516,7 @@ func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipS
}
// fall back to the network.
- return syncer.Bsync.GetFullTipSet(ctx, p, tsk)
+ return syncer.Exchange.GetFullTipSet(ctx, p, tsk)
}
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
@@ -553,7 +607,7 @@ func isPermanent(err error) bool {
return !errors.Is(err, ErrTemporal)
}
-func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet) error {
+func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error {
ctx, span := trace.StartSpan(ctx, "validateTipSet")
defer span.End()
@@ -569,7 +623,7 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
b := b // rebind to a scoped variable
futures = append(futures, async.Err(func() error {
- if err := syncer.ValidateBlock(ctx, b); err != nil {
+ if err := syncer.ValidateBlock(ctx, b, useCache); err != nil {
if isPermanent(err) {
syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error()))
}
@@ -591,26 +645,25 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
}
func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
- var spast power.State
-
- _, err := syncer.sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &spast, baseTs)
+ act, err := syncer.sm.LoadActor(ctx, power.Address, baseTs)
if err != nil {
- return err
+ return xerrors.Errorf("failed to load power actor: %w", err)
}
- cm, err := adt.AsMap(syncer.store.Store(ctx), spast.Claims)
+ powState, err := power.Load(syncer.store.Store(ctx), act)
if err != nil {
- return err
+ return xerrors.Errorf("failed to load power actor state: %w", err)
}
- var claim power.Claim
- exist, err := cm.Get(adt.AddrKey(maddr), &claim)
+ _, exist, err := powState.MinerPower(maddr)
if err != nil {
- return err
+ return xerrors.Errorf("failed to look up miner's claim: %w", err)
}
+
if !exist {
return xerrors.New("miner isn't valid")
}
+
return nil
}
@@ -637,7 +690,7 @@ func blockSanityChecks(h *types.BlockHeader) error {
}
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
-func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
+func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) {
defer func() {
// b.Cid() could panic for empty blocks that are used in tests.
if rerr := recover(); rerr != nil {
@@ -646,13 +699,15 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
}
}()
- isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
- if err != nil {
- return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
- }
+ if useCache {
+ isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
+ if err != nil {
+ return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
+ }
- if isValidated {
- return nil
+ if isValidated {
+ return nil
+ }
}
validationStart := build.Clock.Now()
@@ -682,7 +737,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
lbst, _, err := syncer.sm.TipSetState(ctx, lbts)
if err != nil {
- return xerrors.Errorf("failed to compute lookback tipset state: %w", err)
+ return xerrors.Errorf("failed to compute lookback tipset state (epoch %d): %w", lbts.Height(), err)
}
prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs)
@@ -739,31 +794,35 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
b.Header.ParentWeight, pweight)
}
- // Stuff that needs stateroot / worker address
- stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
- if err != nil {
- return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
- }
-
- if stateroot != h.ParentStateRoot {
- msgs, err := syncer.store.MessagesForTipset(baseTs)
+ stateRootCheck := async.Err(func() error {
+ stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
- log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
- } else {
- log.Warn("Messages for tipset with mismatching state:")
- for i, m := range msgs {
- mm := m.VMMessage()
- log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
- }
+ return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
- return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
- }
+ if stateroot != h.ParentStateRoot {
+ msgs, err := syncer.store.MessagesForTipset(baseTs)
+ if err != nil {
+ log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
+ } else {
+ log.Warn("Messages for tipset with mismatching state:")
+ for i, m := range msgs {
+ mm := m.VMMessage()
+ log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
+ }
+ }
- if precp != h.ParentMessageReceipts {
- return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
- }
+ return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
+ }
+ if precp != h.ParentMessageReceipts {
+ return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
+ }
+
+ return nil
+ })
+
+ // Stuff that needs worker address
waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
@@ -774,13 +833,13 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
return xerrors.Errorf("block is not claiming to be a winner")
}
- hp, err := stmgr.MinerHasMinPower(ctx, syncer.sm, h.Miner, lbts)
+ eligible, err := stmgr.MinerEligibleToMine(ctx, syncer.sm, h.Miner, baseTs, lbts)
if err != nil {
return xerrors.Errorf("determining if miner has min power failed: %w", err)
}
- if !hp {
- return xerrors.New("block's miner does not meet minimum power threshold")
+ if !eligible {
+ return xerrors.New("block's miner is ineligible to mine")
}
rBeacon := *prevBeacon
@@ -810,7 +869,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
return xerrors.Errorf("received block was from slashed or invalid miner")
}
- mpow, tpow, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner)
+ mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("failed getting power: %w", err)
}
@@ -835,7 +894,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
return nil
}
- if err := beacon.ValidateBlockValues(syncer.beacon, h, *prevBeacon); err != nil {
+ if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
}
return nil
@@ -847,10 +906,12 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
- beaconBase := *prevBeacon
- if len(h.BeaconEntries) == 0 {
+ if h.Height > build.UpgradeSmokeHeight {
buf.Write(baseTs.MinTicket().VRFProof)
- } else {
+ }
+
+ beaconBase := *prevBeacon
+ if len(h.BeaconEntries) != 0 {
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
@@ -882,6 +943,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
winnerCheck,
msgsCheck,
baseFeeCheck,
+ stateRootCheck,
}
var merr error
@@ -909,8 +971,10 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
return mulErr
}
- if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
- return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
+ if useCache {
+ if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
+ return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
+ }
}
return nil
@@ -940,7 +1004,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
if err != nil {
- return xerrors.Errorf("failed to get randomness for verifying winningPost proof: %w", err)
+ return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
}
mid, err := address.IDFromAddress(h.Miner)
@@ -953,7 +1017,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
return xerrors.Errorf("getting winning post sector set: %w", err)
}
- ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, abi.WinningPoStVerifyInfo{
+ ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,
@@ -1033,7 +1097,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
return xerrors.Errorf("failed to get actor: %w", err)
}
- if !act.IsAccountActor() {
+ if !builtin.IsAccountActor(act.Code) {
return xerrors.New("Sender must be an account actor")
}
nonces[m.From] = act.Nonce
@@ -1047,9 +1111,9 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
return nil
}
- store := adt.WrapStore(ctx, cst)
+ store := adt0.WrapStore(ctx, cst)
- bmArr := adt.MakeEmptyArray(store)
+ bmArr := adt0.MakeEmptyArray(store)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
@@ -1061,7 +1125,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
}
}
- smArr := adt.MakeEmptyArray(store)
+ smArr := adt0.MakeEmptyArray(store)
for i, m := range b.SecpkMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
@@ -1164,7 +1228,7 @@ func extractSyncState(ctx context.Context) *SyncerState {
// total equality of the BeaconEntries in each block.
// 3. Traverse the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
-// 3b. Query our peers via BlockSync in batches, requesting up to a
+// 3b. Query our peers via client in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
@@ -1223,9 +1287,11 @@ func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet
blockSet := []*types.TipSet{incoming}
+ // Parent of the new (possibly better) tipset that we need to fetch next.
at := incoming.Parents()
- // we want to sync all the blocks until the height above the block we have
+ // we want to sync all the blocks until the height above our
+ // best tipset so far
untilHeight := known.Height() + 1
ss.SetHeight(blockSet[len(blockSet)-1].Height())
@@ -1265,7 +1331,7 @@ loop:
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
window = gap
}
- blks, err := syncer.Bsync.GetBlocks(ctx, at, window)
+ blks, err := syncer.Exchange.GetBlocks(ctx, at, window)
if err != nil {
// Most likely our peers aren't fully synced yet, but forwarded
// new block message (ideally we'd find better peers)
@@ -1283,7 +1349,7 @@ loop:
// have. Since we fetch from the head backwards our reassembled chain
// is sorted in reverse here: we have a child -> parent order, our last
// tipset then should be child of the first tipset retrieved.
- // FIXME: The reassembly logic should be part of the `BlockSync`
+ // FIXME: The reassembly logic should be part of the `client`
// service, the consumer should not be concerned with the
// `MaxRequestLength` limitation, it should just be able to request
// an segment of arbitrary length. The same burden is put on
@@ -1319,13 +1385,17 @@ loop:
}
base := blockSet[len(blockSet)-1]
- if base.Parents() == known.Parents() {
- // common case: receiving a block thats potentially part of the same tipset as our best block
+ if base.IsChildOf(known) {
+ // common case: receiving blocks that are building on top of our best tipset
return blockSet, nil
}
- if types.CidArrsEqual(base.Parents().Cids(), known.Cids()) {
- // common case: receiving blocks that are building on top of our best tipset
+ knownParent, err := syncer.store.LoadTipSet(known.Parents())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
+ }
+ if base.IsChildOf(knownParent) {
+ // common case: receiving a block thats potentially part of the same tipset as our best block
return blockSet, nil
}
@@ -1333,7 +1403,7 @@ loop:
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height())
fork, err := syncer.syncFork(ctx, base, known)
if err != nil {
- if xerrors.Is(err, ErrForkTooLong) {
+ if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) {
// TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish?
log.Warn("adding forked chain to our bad tipset cache")
for _, b := range incoming.Blocks() {
@@ -1349,15 +1419,24 @@ loop:
}
var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
+var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkpointed block")
// syncFork tries to obtain the chain fragment that links a fork into a common
// ancestor in our view of the chain.
//
-// If the fork is too long (build.ForkLengthThreshold), we add the entire subchain to the
-// denylist. Else, we find the common ancestor, and add the missing chain
+// If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint),
+// we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
- tips, err := syncer.Bsync.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
+
+ chkpt := syncer.GetCheckpoint()
+ if known.Key() == chkpt {
+ return nil, ErrForkCheckpoint
+ }
+
+ // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2?
+ // Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
+ tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
if err != nil {
return nil, err
}
@@ -1382,12 +1461,18 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
if nts.Height() < tips[cur].Height() {
cur++
} else {
+ // We will be forking away from nts, check that it isn't checkpointed
+ if nts.Key() == chkpt {
+ return nil, ErrForkCheckpoint
+ }
+
nts, err = syncer.store.LoadTipSet(nts.Parents())
if err != nil {
return nil, xerrors.Errorf("loading next local tipset: %w", err)
}
}
}
+
return nil, ErrForkTooLong
}
@@ -1397,7 +1482,7 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*
return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error {
log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids()))
- if err := syncer.ValidateTipSet(ctx, fts); err != nil {
+ if err := syncer.ValidateTipSet(ctx, fts, true); err != nil {
log.Errorf("failed to validate tipset: %+v", err)
return xerrors.Errorf("message processing failed: %w", err)
}
@@ -1411,13 +1496,12 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*
// fills out each of the given tipsets with messages and calls the callback with it
func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error {
+ ss := extractSyncState(ctx)
ctx, span := trace.StartSpan(ctx, "iterFullTipsets")
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
- windowSize := syncer.windowSize
-mainLoop:
for i := len(headers) - 1; i >= 0; {
fts, err := syncer.store.TryFillTipSet(headers[i])
if err != nil {
@@ -1431,31 +1515,18 @@ mainLoop:
continue
}
- batchSize := windowSize
+ batchSize := concurrentSyncRequests * syncRequestBatchSize
if i < batchSize {
- batchSize = i
+ batchSize = i + 1
}
- nextI := (i + 1) - batchSize // want to fetch batchSize values, 'i' points to last one we want to fetch, so its 'inclusive' of our request, thus we need to add one to our request start index
+ ss.SetStage(api.StageFetchingMessages)
+ startOffset := i + 1 - batchSize
+ bstout, batchErr := syncer.fetchMessages(ctx, headers[startOffset:startOffset+batchSize], startOffset)
+ ss.SetStage(api.StageMessages)
- var bstout []*blocksync.CompactedMessages
- for len(bstout) < batchSize {
- next := headers[nextI]
-
- nreq := batchSize - len(bstout)
- bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(nreq))
- if err != nil {
- // TODO check errors for temporary nature
- if windowSize > 1 {
- windowSize /= 2
- log.Infof("error fetching messages: %s; reducing window size to %d and trying again", err, windowSize)
- continue mainLoop
- }
- return xerrors.Errorf("message processing failed: %w", err)
- }
-
- bstout = append(bstout, bstips...)
- nextI += len(bstips)
+ if batchErr != nil {
+ return xerrors.Errorf("failed to fetch messages: %w", err)
}
for bsi := 0; bsi < len(bstout); bsi++ {
@@ -1477,36 +1548,91 @@ mainLoop:
return err
}
- if err := persistMessages(bs, bstip); err != nil {
+ if err := persistMessages(ctx, bs, bstip); err != nil {
return err
}
- if err := copyBlockstore(bs, syncer.store.Blockstore()); err != nil {
+ if err := copyBlockstore(ctx, bs, syncer.store.Blockstore()); err != nil {
return xerrors.Errorf("message processing failed: %w", err)
}
}
- if i >= windowSize {
- newWindowSize := windowSize + 10
- if newWindowSize > int(blocksync.MaxRequestLength) {
- newWindowSize = int(blocksync.MaxRequestLength)
- }
- if newWindowSize > windowSize {
- windowSize = newWindowSize
- log.Infof("successfully fetched %d messages; increasing window size to %d", len(bstout), windowSize)
- }
- }
-
i -= batchSize
}
- // remember our window size
- syncer.windowSize = windowSize
-
return nil
}
-func persistMessages(bs bstore.Blockstore, bst *blocksync.CompactedMessages) error {
+func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) {
+ batchSize := len(headers)
+ batch := make([]*exchange.CompactedMessages, batchSize)
+
+ var wg sync.WaitGroup
+ var mx sync.Mutex
+ var batchErr error
+
+ start := build.Clock.Now()
+
+ for j := 0; j < batchSize; j += syncRequestBatchSize {
+ wg.Add(1)
+ go func(j int) {
+ defer wg.Done()
+
+ nreq := syncRequestBatchSize
+ if j+nreq > batchSize {
+ nreq = batchSize - j
+ }
+
+ failed := false
+ for offset := 0; !failed && offset < nreq; {
+ nextI := j + offset
+ lastI := j + nreq
+
+ var requestErr error
+ var requestResult []*exchange.CompactedMessages
+ for retry := 0; requestResult == nil && retry < syncRequestRetries; retry++ {
+ if retry > 0 {
+ log.Infof("fetching messages at %d (retry %d)", startOffset+nextI, retry)
+ } else {
+ log.Infof("fetching messages at %d", startOffset+nextI)
+ }
+
+ result, err := syncer.Exchange.GetChainMessages(ctx, headers[nextI:lastI])
+ if err != nil {
+ requestErr = multierror.Append(requestErr, err)
+ } else {
+ requestResult = result
+ }
+ }
+
+ mx.Lock()
+ if requestResult != nil {
+ copy(batch[j+offset:], requestResult)
+ offset += len(requestResult)
+ } else {
+ log.Errorf("error fetching messages at %d: %s", nextI, requestErr)
+ batchErr = multierror.Append(batchErr, requestErr)
+ failed = true
+ }
+ mx.Unlock()
+ }
+ }(j)
+ }
+ wg.Wait()
+
+ if batchErr != nil {
+ return nil, batchErr
+ }
+
+ log.Infof("fetching messages for %d tipsets at %d done; took %s", batchSize, startOffset, build.Clock.Since(start))
+
+ return batch, nil
+}
+
+func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.CompactedMessages) error {
+ _, span := trace.StartSpan(ctx, "persistMessages")
+ defer span.End()
+
for _, m := range bst.Bls {
//log.Infof("putting BLS message: %s", m.Cid())
if _, err := store.PutMessage(bs, m); err != nil {
@@ -1601,11 +1727,7 @@ func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []b
}
func (syncer *Syncer) State() []SyncerState {
- var out []SyncerState
- for _, ss := range syncer.syncmgr.syncStates {
- out = append(out, ss.Snapshot())
- }
- return out
+ return syncer.syncmgr.State()
}
// MarkBad manually adds a block to the "bad blocks" cache.
@@ -1613,6 +1735,11 @@ func (syncer *Syncer) MarkBad(blk cid.Cid) {
syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad"))
}
+// UnmarkBad manually adds a block to the "bad blocks" cache.
+func (syncer *Syncer) UnmarkBad(blk cid.Cid) {
+ syncer.bad.Remove(blk)
+}
+
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
bbr, ok := syncer.bad.Has(blk)
return bbr.String(), ok
@@ -1637,7 +1764,7 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet)
cur = next
}
- return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset")
+ return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
diff --git a/chain/sync_manager.go b/chain/sync_manager.go
index 8c77b47c5..811092bc7 100644
--- a/chain/sync_manager.go
+++ b/chain/sync_manager.go
@@ -20,7 +20,28 @@ const (
type SyncFunc func(context.Context, *types.TipSet) error
-type SyncManager struct {
+// SyncManager manages the chain synchronization process, both at bootstrap time
+// and during ongoing operation.
+//
+// It receives candidate chain heads in the form of tipsets from peers,
+// and schedules them onto sync workers, deduplicating processing for
+// already-active syncs.
+type SyncManager interface {
+ // Start starts the SyncManager.
+ Start()
+
+ // Stop stops the SyncManager.
+ Stop()
+
+ // SetPeerHead informs the SyncManager that the supplied peer reported the
+ // supplied tipset.
+ SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet)
+
+ // State retrieves the state of the sync workers.
+ State() []SyncerState
+}
+
+type syncManager struct {
lk sync.Mutex
peerHeads map[peer.ID]*types.TipSet
@@ -48,6 +69,8 @@ type SyncManager struct {
workerChan chan *types.TipSet
}
+var _ SyncManager = (*syncManager)(nil)
+
type syncResult struct {
ts *types.TipSet
success bool
@@ -55,8 +78,8 @@ type syncResult struct {
const syncWorkerCount = 3
-func NewSyncManager(sync SyncFunc) *SyncManager {
- return &SyncManager{
+func NewSyncManager(sync SyncFunc) SyncManager {
+ return &syncManager{
bspThresh: 1,
peerHeads: make(map[peer.ID]*types.TipSet),
syncTargets: make(chan *types.TipSet),
@@ -69,18 +92,18 @@ func NewSyncManager(sync SyncFunc) *SyncManager {
}
}
-func (sm *SyncManager) Start() {
+func (sm *syncManager) Start() {
go sm.syncScheduler()
for i := 0; i < syncWorkerCount; i++ {
go sm.syncWorker(i)
}
}
-func (sm *SyncManager) Stop() {
+func (sm *syncManager) Stop() {
close(sm.stop)
}
-func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
+func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
sm.lk.Lock()
defer sm.lk.Unlock()
sm.peerHeads[p] = ts
@@ -105,6 +128,14 @@ func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.Tip
sm.incomingTipSets <- ts
}
+func (sm *syncManager) State() []SyncerState {
+ ret := make([]SyncerState, 0, len(sm.syncStates))
+ for _, s := range sm.syncStates {
+ ret = append(ret, s.Snapshot())
+ }
+ return ret
+}
+
type syncBucketSet struct {
buckets []*syncTargetBucket
}
@@ -234,7 +265,7 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
return best
}
-func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
+func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) {
var buckets syncBucketSet
var peerHeads []*types.TipSet
@@ -258,7 +289,7 @@ func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
return buckets.Heaviest(), nil
}
-func (sm *SyncManager) syncScheduler() {
+func (sm *syncManager) syncScheduler() {
for {
select {
@@ -280,7 +311,7 @@ func (sm *SyncManager) syncScheduler() {
}
}
-func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
+func (sm *syncManager) scheduleIncoming(ts *types.TipSet) {
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
if sm.getBootstrapState() == BSStateSelected {
sm.setBootstrapState(BSStateScheduled)
@@ -328,10 +359,11 @@ func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
}
}
-func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
+func (sm *syncManager) scheduleProcessResult(res *syncResult) {
if res.success && sm.getBootstrapState() != BSStateComplete {
sm.setBootstrapState(BSStateComplete)
}
+
delete(sm.activeSyncs, res.ts.Key())
relbucket := sm.activeSyncTips.PopRelated(res.ts)
if relbucket != nil {
@@ -360,7 +392,7 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
}
}
-func (sm *SyncManager) scheduleWorkSent() {
+func (sm *syncManager) scheduleWorkSent() {
hts := sm.nextSyncTarget.heaviestTipSet()
sm.activeSyncs[hts.Key()] = hts
@@ -372,7 +404,7 @@ func (sm *SyncManager) scheduleWorkSent() {
}
}
-func (sm *SyncManager) syncWorker(id int) {
+func (sm *syncManager) syncWorker(id int) {
ss := &SyncerState{}
sm.syncStates[id] = ss
for {
@@ -397,7 +429,7 @@ func (sm *SyncManager) syncWorker(id int) {
}
}
-func (sm *SyncManager) syncedPeerCount() int {
+func (sm *syncManager) syncedPeerCount() int {
var count int
for _, ts := range sm.peerHeads {
if ts.Height() > 0 {
@@ -407,19 +439,19 @@ func (sm *SyncManager) syncedPeerCount() int {
return count
}
-func (sm *SyncManager) getBootstrapState() int {
+func (sm *syncManager) getBootstrapState() int {
sm.bssLk.Lock()
defer sm.bssLk.Unlock()
return sm.bootstrapState
}
-func (sm *SyncManager) setBootstrapState(v int) {
+func (sm *syncManager) setBootstrapState(v int) {
sm.bssLk.Lock()
defer sm.bssLk.Unlock()
sm.bootstrapState = v
}
-func (sm *SyncManager) IsBootstrapped() bool {
+func (sm *syncManager) IsBootstrapped() bool {
sm.bssLk.Lock()
defer sm.bssLk.Unlock()
return sm.bootstrapState == BSStateComplete
diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go
index ca2ced856..269b3a62e 100644
--- a/chain/sync_manager_test.go
+++ b/chain/sync_manager_test.go
@@ -17,7 +17,7 @@ type syncOp struct {
done func()
}
-func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *SyncManager, chan *syncOp)) {
+func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *syncManager, chan *syncOp)) {
syncTargets := make(chan *syncOp)
sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error {
ch := make(chan struct{})
@@ -27,7 +27,7 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T,
}
<-ch
return nil
- })
+ }).(*syncManager)
sm.bspThresh = thresh
sm.Start()
@@ -77,12 +77,12 @@ func TestSyncManager(t *testing.T) {
c3 := mock.TipSet(mock.MkBlock(b, 3, 5))
d := mock.TipSet(mock.MkBlock(c1, 4, 5))
- runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
+ runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
sm.SetPeerHead(ctx, "peer1", c1)
assertGetSyncOp(t, stc, c1)
})
- runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
+ runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
sm.SetPeerHead(ctx, "peer1", c1)
assertNoOp(t, stc)
@@ -90,7 +90,7 @@ func TestSyncManager(t *testing.T) {
assertGetSyncOp(t, stc, c1)
})
- runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
+ runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
sm.SetPeerHead(ctx, "peer1", b)
assertGetSyncOp(t, stc, b)
@@ -101,7 +101,7 @@ func TestSyncManager(t *testing.T) {
assertGetSyncOp(t, stc, c2)
})
- runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
+ runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
sm.SetPeerHead(ctx, "peer1", a)
assertGetSyncOp(t, stc, a)
@@ -122,7 +122,7 @@ func TestSyncManager(t *testing.T) {
assertGetSyncOp(t, stc, d)
})
- runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
+ runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
sm.SetPeerHead(ctx, "peer1", a)
assertGetSyncOp(t, stc, a)
diff --git a/chain/sync_test.go b/chain/sync_test.go
index 9a98b3b36..2c9f2f131 100644
--- a/chain/sync_test.go
+++ b/chain/sync_test.go
@@ -7,6 +7,8 @@ import (
"testing"
"time"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
@@ -16,14 +18,11 @@ import (
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/store"
@@ -41,11 +40,9 @@ func init() {
if err != nil {
panic(err)
}
- miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
- power.ConsensusMinerMinPower = big.NewInt(2048)
- verifreg.MinVerifiedDealSize = big.NewInt(256)
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
const source = 0
@@ -331,6 +328,36 @@ func (tu *syncTestUtil) compareSourceState(with int) {
}
}
+func (tu *syncTestUtil) assertBad(node int, ts *types.TipSet) {
+ for _, blk := range ts.Cids() {
+ rsn, err := tu.nds[node].SyncCheckBad(context.TODO(), blk)
+ require.NoError(tu.t, err)
+ require.True(tu.t, len(rsn) != 0)
+ }
+}
+
+func (tu *syncTestUtil) getHead(node int) *types.TipSet {
+ ts, err := tu.nds[node].ChainHead(context.TODO())
+ require.NoError(tu.t, err)
+ return ts
+}
+
+func (tu *syncTestUtil) checkpointTs(node int, tsk types.TipSetKey) {
+ require.NoError(tu.t, tu.nds[node].SyncCheckpoint(context.TODO(), tsk))
+}
+
+func (tu *syncTestUtil) waitUntilNodeHasTs(node int, tsk types.TipSetKey) {
+ for {
+ _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk)
+ if err != nil {
+ break
+ }
+ }
+
+ // Time to allow for syncing and validation
+ time.Sleep(2 * time.Second)
+}
+
func (tu *syncTestUtil) waitUntilSync(from, to int) {
target, err := tu.nds[from].ChainHead(tu.ctx)
if err != nil {
@@ -442,8 +469,8 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
return []uint64{1}, nil
}
-func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
- return []abi.PoStProof{
+func (wpp badWpp) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) {
+ return []proof.PoStProof{
{
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
ProofBytes: []byte("evil"),
@@ -630,6 +657,49 @@ func TestDuplicateNonce(t *testing.T) {
require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message")
}
+// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
+// be applied on the parent state.
+func TestBadNonce(t *testing.T) {
+ H := 10
+ tu := prepSyncTest(t, H)
+
+ base := tu.g.CurTipset
+
+ // Produce a message from the banker with a bad nonce
+ makeBadMsg := func() *types.SignedMessage {
+
+ ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
+ require.NoError(t, err)
+ msg := types.Message{
+ To: tu.g.Banker(),
+ From: tu.g.Banker(),
+
+ Nonce: ba.Nonce + 5,
+
+ Value: types.NewInt(1),
+
+ Method: 0,
+
+ GasLimit: 100_000_000,
+ GasFeeCap: types.NewInt(0),
+ GasPremium: types.NewInt(0),
+ }
+
+ sig, err := tu.g.Wallet().Sign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes())
+ require.NoError(t, err)
+
+ return &types.SignedMessage{
+ Message: msg,
+ Signature: *sig,
+ }
+ }
+
+ msgs := make([][]*types.SignedMessage, 1)
+ msgs[0] = []*types.SignedMessage{makeBadMsg()}
+
+ tu.mineOnBlock(base, 0, []int{0}, true, true, msgs)
+}
+
func BenchmarkSyncBasic(b *testing.B) {
for i := 0; i < b.N; i++ {
runSyncBenchLength(b, 100)
@@ -662,7 +732,7 @@ func TestSyncInputs(t *testing.T) {
err := s.ValidateBlock(context.TODO(), &types.FullBlock{
Header: &types.BlockHeader{},
- })
+ }, false)
if err == nil {
t.Fatal("should error on empty block")
}
@@ -671,8 +741,92 @@ func TestSyncInputs(t *testing.T) {
h.ElectionProof = nil
- err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h})
+ err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h}, false)
if err == nil {
t.Fatal("should error on block with nil election proof")
}
}
+
+func TestSyncCheckpointHead(t *testing.T) {
+ H := 10
+ tu := prepSyncTest(t, H)
+
+ p1 := tu.addClientNode()
+ p2 := tu.addClientNode()
+
+ fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
+ tu.loadChainToNode(p1)
+ tu.loadChainToNode(p2)
+
+ base := tu.g.CurTipset
+ fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
+
+ // The two nodes fork at this point into 'a' and 'b'
+ a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
+ a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
+ a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
+
+ tu.waitUntilSyncTarget(p1, a.TipSet())
+ tu.checkpointTs(p1, a.TipSet().Key())
+
+ require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
+ // chain B will now be heaviest
+ b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+
+ fmt.Println("A: ", a.Cids(), a.TipSet().Height())
+ fmt.Println("B: ", b.Cids(), b.TipSet().Height())
+
+ // Now for the fun part!! p1 should mark p2's head as BAD.
+
+ require.NoError(t, tu.mn.LinkAll())
+ tu.connect(p1, p2)
+ tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
+ p1Head := tu.getHead(p1)
+ require.Equal(tu.t, p1Head, a.TipSet())
+ tu.assertBad(p1, b.TipSet())
+}
+
+func TestSyncCheckpointEarlierThanHead(t *testing.T) {
+ H := 10
+ tu := prepSyncTest(t, H)
+
+ p1 := tu.addClientNode()
+ p2 := tu.addClientNode()
+
+ fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
+ tu.loadChainToNode(p1)
+ tu.loadChainToNode(p2)
+
+ base := tu.g.CurTipset
+ fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
+
+ // The two nodes fork at this point into 'a' and 'b'
+ a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
+ a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
+ a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
+
+ tu.waitUntilSyncTarget(p1, a.TipSet())
+ tu.checkpointTs(p1, a1.TipSet().Key())
+
+ require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
+ // chain B will now be heaviest
+ b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+
+ fmt.Println("A: ", a.Cids(), a.TipSet().Height())
+ fmt.Println("B: ", b.Cids(), b.TipSet().Height())
+
+ // Now for the fun part!! p1 should mark p2's head as BAD.
+
+ require.NoError(t, tu.mn.LinkAll())
+ tu.connect(p1, p2)
+ tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
+ p1Head := tu.getHead(p1)
+ require.Equal(tu.t, p1Head, a.TipSet())
+ tu.assertBad(p1, b.TipSet())
+}
diff --git a/chain/syncstate.go b/chain/syncstate.go
index aaca88303..06cd5d91e 100644
--- a/chain/syncstate.go
+++ b/chain/syncstate.go
@@ -1,34 +1,16 @@
package chain
import (
- "fmt"
"sync"
"time"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
)
-func SyncStageString(v api.SyncStateStage) string {
- switch v {
- case api.StageHeaders:
- return "header sync"
- case api.StagePersistHeaders:
- return "persisting headers"
- case api.StageMessages:
- return "message sync"
- case api.StageSyncComplete:
- return "complete"
- case api.StageSyncErrored:
- return "error"
- default:
- return fmt.Sprintf("", v)
- }
-}
-
type SyncerState struct {
lk sync.Mutex
Target *types.TipSet
diff --git a/chain/types/actor.go b/chain/types/actor.go
index bb5635995..a9974a01f 100644
--- a/chain/types/actor.go
+++ b/chain/types/actor.go
@@ -4,8 +4,6 @@ import (
"errors"
"github.com/ipfs/go-cid"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
)
var ErrActorNotFound = errors.New("actor not found")
@@ -17,7 +15,3 @@ type Actor struct {
Nonce uint64
Balance BigInt
}
-
-func (a *Actor) IsAccountActor() bool {
- return a.Code == builtin.AccountActorCodeID
-}
diff --git a/chain/types/bigint.go b/chain/types/bigint.go
index 466b9c556..da4857d5b 100644
--- a/chain/types/bigint.go
+++ b/chain/types/bigint.go
@@ -4,7 +4,7 @@ import (
"fmt"
"math/big"
- big2 "github.com/filecoin-project/specs-actors/actors/abi/big"
+ big2 "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
)
diff --git a/chain/types/blockheader.go b/chain/types/blockheader.go
index 36b43c012..0ec33fe42 100644
--- a/chain/types/blockheader.go
+++ b/chain/types/blockheader.go
@@ -4,10 +4,12 @@ import (
"bytes"
"math/big"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/minio/blake2b-simd"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
@@ -53,7 +55,7 @@ type BlockHeader struct {
BeaconEntries []BeaconEntry // 3
- WinPoStProof []abi.PoStProof // 4
+ WinPoStProof []proof.PoStProof // 4
Parents []cid.Cid // 5
diff --git a/chain/types/blockheader_test.go b/chain/types/blockheader_test.go
index e4b545cca..f5faac3b3 100644
--- a/chain/types/blockheader_test.go
+++ b/chain/types/blockheader_test.go
@@ -7,12 +7,14 @@ import (
"reflect"
"testing"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
cid "github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
)
func testBlockHeader(t testing.TB) *BlockHeader {
@@ -80,7 +82,7 @@ func TestInteropBH(t *testing.T) {
t.Fatal(err)
}
- posts := []abi.PoStProof{
+ posts := []proof.PoStProof{
{PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, ProofBytes: []byte{0x07}},
}
diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go
index 35abf2828..d063ce8c9 100644
--- a/chain/types/cbor_gen.go
+++ b/chain/types/cbor_gen.go
@@ -6,9 +6,10 @@ import (
"fmt"
"io"
- abi "github.com/filecoin-project/specs-actors/actors/abi"
- crypto "github.com/filecoin-project/specs-actors/actors/crypto"
- exitcode "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ abi "github.com/filecoin-project/go-state-types/abi"
+ crypto "github.com/filecoin-project/go-state-types/crypto"
+ exitcode "github.com/filecoin-project/go-state-types/exitcode"
+ proof "github.com/filecoin-project/specs-actors/actors/runtime/proof"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
@@ -58,7 +59,7 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error {
}
}
- // t.WinPoStProof ([]abi.PoStProof) (slice)
+ // t.WinPoStProof ([]proof.PoStProof) (slice)
if len(t.WinPoStProof) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.WinPoStProof was too long")
}
@@ -243,7 +244,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
t.BeaconEntries[i] = v
}
- // t.WinPoStProof ([]abi.PoStProof) (slice)
+ // t.WinPoStProof ([]proof.PoStProof) (slice)
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
@@ -259,12 +260,12 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
}
if extra > 0 {
- t.WinPoStProof = make([]abi.PoStProof, extra)
+ t.WinPoStProof = make([]proof.PoStProof, extra)
}
for i := 0; i < int(extra); i++ {
- var v abi.PoStProof
+ var v proof.PoStProof
if err := v.UnmarshalCBOR(br); err != nil {
return err
}
@@ -1633,3 +1634,131 @@ func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) error {
}
return nil
}
+
+var lengthBufStateRoot = []byte{131}
+
+func (t *StateRoot) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufStateRoot); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Version (types.StateTreeVersion) (uint64)
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil {
+ return err
+ }
+
+ // t.Actors (cid.Cid) (struct)
+
+ if err := cbg.WriteCidBuf(scratch, w, t.Actors); err != nil {
+ return xerrors.Errorf("failed to write cid field t.Actors: %w", err)
+ }
+
+ // t.Info (cid.Cid) (struct)
+
+ if err := cbg.WriteCidBuf(scratch, w, t.Info); err != nil {
+ return xerrors.Errorf("failed to write cid field t.Info: %w", err)
+ }
+
+ return nil
+}
+
+func (t *StateRoot) UnmarshalCBOR(r io.Reader) error {
+ *t = StateRoot{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 3 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Version (types.StateTreeVersion) (uint64)
+
+ {
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.Version = StateTreeVersion(extra)
+
+ }
+ // t.Actors (cid.Cid) (struct)
+
+ {
+
+ c, err := cbg.ReadCid(br)
+ if err != nil {
+ return xerrors.Errorf("failed to read cid field t.Actors: %w", err)
+ }
+
+ t.Actors = c
+
+ }
+ // t.Info (cid.Cid) (struct)
+
+ {
+
+ c, err := cbg.ReadCid(br)
+ if err != nil {
+ return xerrors.Errorf("failed to read cid field t.Info: %w", err)
+ }
+
+ t.Info = c
+
+ }
+ return nil
+}
+
+var lengthBufStateInfo0 = []byte{128}
+
+func (t *StateInfo0) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufStateInfo0); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *StateInfo0) UnmarshalCBOR(r io.Reader) error {
+ *t = StateInfo0{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 0 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ return nil
+}
diff --git a/chain/types/fil.go b/chain/types/fil.go
index 99a896e38..7eac8ce93 100644
--- a/chain/types/fil.go
+++ b/chain/types/fil.go
@@ -12,11 +12,15 @@ import (
type FIL BigInt
func (f FIL) String() string {
+ return f.Unitless() + " FIL"
+}
+
+func (f FIL) Unitless() string {
r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(build.FilecoinPrecision)))
if r.Sign() == 0 {
- return "0 FIL"
+ return "0"
}
- return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".") + " FIL"
+ return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".")
}
func (f FIL) Format(s fmt.State, ch rune) {
diff --git a/chain/types/message.go b/chain/types/message.go
index 288fcf6d9..4fead44bc 100644
--- a/chain/types/message.go
+++ b/chain/types/message.go
@@ -4,9 +4,9 @@ import (
"bytes"
"fmt"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
xerrors "golang.org/x/xerrors"
diff --git a/chain/types/message_receipt.go b/chain/types/message_receipt.go
index 6671595ff..57761680d 100644
--- a/chain/types/message_receipt.go
+++ b/chain/types/message_receipt.go
@@ -3,7 +3,7 @@ package types
import (
"bytes"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/exitcode"
)
type MessageReceipt struct {
diff --git a/chain/types/message_test.go b/chain/types/message_test.go
index a7b4927e5..f57385a09 100644
--- a/chain/types/message_test.go
+++ b/chain/types/message_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
)
diff --git a/chain/types/mock/chain.go b/chain/types/mock/chain.go
index 19b1352ad..7a9c82cba 100644
--- a/chain/types/mock/chain.go
+++ b/chain/types/mock/chain.go
@@ -5,10 +5,11 @@ import (
"fmt"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/ipfs/go-cid"
+ "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
)
@@ -80,6 +81,7 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types
Height: height,
ParentStateRoot: pstateRoot,
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")},
+ ParentBaseFee: types.NewInt(uint64(build.MinimumBaseFee)),
}
}
diff --git a/chain/types/signature_test.go b/chain/types/signature_test.go
index 751f55252..9ade3c046 100644
--- a/chain/types/signature_test.go
+++ b/chain/types/signature_test.go
@@ -4,7 +4,7 @@ import (
"bytes"
"testing"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
)
func TestSignatureSerializeRoundTrip(t *testing.T) {
diff --git a/chain/types/signedmessage.go b/chain/types/signedmessage.go
index 47592feb1..17d2f5d94 100644
--- a/chain/types/signedmessage.go
+++ b/chain/types/signedmessage.go
@@ -3,8 +3,8 @@ package types
import (
"bytes"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
)
diff --git a/chain/types/state.go b/chain/types/state.go
new file mode 100644
index 000000000..a96883604
--- /dev/null
+++ b/chain/types/state.go
@@ -0,0 +1,26 @@
+package types
+
+import "github.com/ipfs/go-cid"
+
+// StateTreeVersion is the version of the state tree itself, independent of the
+// network version or the actors version.
+type StateTreeVersion uint64
+
+const (
+ // StateTreeVersion0 corresponds to actors < v2.
+ StateTreeVersion0 StateTreeVersion = iota
+ // StateTreeVersion1 corresponds to actors >= v2.
+ StateTreeVersion1
+)
+
+type StateRoot struct {
+ // State tree version.
+ Version StateTreeVersion
+ // Actors tree. The structure depends on the state root version.
+ Actors cid.Cid
+ // Info. The structure depends on the state root version.
+ Info cid.Cid
+}
+
+// TODO: version this.
+type StateInfo0 struct{}
diff --git a/chain/types/tipset.go b/chain/types/tipset.go
index 4217d2a86..07eff3734 100644
--- a/chain/types/tipset.go
+++ b/chain/types/tipset.go
@@ -7,7 +7,7 @@ import (
"io"
"sort"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/minio/blake2b-simd"
@@ -167,12 +167,16 @@ func (ts *TipSet) Equals(ots *TipSet) bool {
return false
}
- if len(ts.blks) != len(ots.blks) {
+ if ts.height != ots.height {
return false
}
- for i, b := range ts.blks {
- if b.Cid() != ots.blks[i].Cid() {
+ if len(ts.cids) != len(ots.cids) {
+ return false
+ }
+
+ for i, cid := range ts.cids {
+ if cid != ots.cids[i] {
return false
}
}
diff --git a/chain/types/tipset_key.go b/chain/types/tipset_key.go
index ee1994f5a..e5bc7750d 100644
--- a/chain/types/tipset_key.go
+++ b/chain/types/tipset_key.go
@@ -5,7 +5,7 @@ import (
"encoding/json"
"strings"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
)
diff --git a/chain/types/voucher.go b/chain/types/voucher.go
deleted file mode 100644
index 687109c33..000000000
--- a/chain/types/voucher.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package types
-
-import (
- "encoding/base64"
-
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- cbor "github.com/ipfs/go-ipld-cbor"
-)
-
-func DecodeSignedVoucher(s string) (*paych.SignedVoucher, error) {
- data, err := base64.RawURLEncoding.DecodeString(s)
- if err != nil {
- return nil, err
- }
-
- var sv paych.SignedVoucher
- if err := cbor.DecodeInto(data, &sv); err != nil {
- return nil, err
- }
-
- return &sv, nil
-}
diff --git a/chain/types_test.go b/chain/types_test.go
index 7d68da68d..b47471c9d 100644
--- a/chain/types_test.go
+++ b/chain/types_test.go
@@ -1,9 +1,12 @@
package chain
import (
+ "crypto/rand"
"encoding/json"
"testing"
+ "github.com/filecoin-project/lotus/build"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -35,3 +38,40 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) {
t.Fatal(err)
}
}
+
+func TestAddressType(t *testing.T) {
+ build.SetAddressNetwork(address.Testnet)
+ addr, err := makeRandomAddress()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(addr[0]) != address.TestnetPrefix {
+ t.Fatalf("address should start with %s", address.TestnetPrefix)
+ }
+
+ build.SetAddressNetwork(address.Mainnet)
+ addr, err = makeRandomAddress()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(addr[0]) != address.MainnetPrefix {
+ t.Fatalf("address should start with %s", address.MainnetPrefix)
+ }
+}
+
+func makeRandomAddress() (string, error) {
+ bytes := make([]byte, 32)
+ _, err := rand.Read(bytes)
+ if err != nil {
+ return "", err
+ }
+
+ addr, err := address.NewActorAddress(bytes)
+ if err != nil {
+ return "", err
+ }
+
+ return addr.String(), nil
+}
diff --git a/chain/validation/applier.go b/chain/validation/applier.go
deleted file mode 100644
index ac2fccf85..000000000
--- a/chain/validation/applier.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package validation
-
-import (
- "context"
-
- "golang.org/x/xerrors"
-
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/puppet"
- "github.com/ipfs/go-cid"
-
- vtypes "github.com/filecoin-project/chain-validation/chain/types"
- vstate "github.com/filecoin-project/chain-validation/state"
-
- "github.com/filecoin-project/lotus/chain/stmgr"
- "github.com/filecoin-project/lotus/chain/store"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/vm"
-)
-
-// Applier applies messages to state trees and storage.
-type Applier struct {
- stateWrapper *StateWrapper
- syscalls vm.SyscallBuilder
-}
-
-var _ vstate.Applier = &Applier{}
-
-func NewApplier(sw *StateWrapper, syscalls vm.SyscallBuilder) *Applier {
- return &Applier{sw, syscalls}
-}
-
-func (a *Applier) ApplyMessage(epoch abi.ChainEpoch, message *vtypes.Message) (vtypes.ApplyMessageResult, error) {
- lm := toLotusMsg(message)
- receipt, penalty, reward, err := a.applyMessage(epoch, lm)
- return vtypes.ApplyMessageResult{
- Msg: *message,
- Receipt: receipt,
- Penalty: penalty,
- Reward: reward,
- Root: a.stateWrapper.Root().String(),
- }, err
-}
-
-func (a *Applier) ApplySignedMessage(epoch abi.ChainEpoch, msg *vtypes.SignedMessage) (vtypes.ApplyMessageResult, error) {
- var lm types.ChainMsg
- switch msg.Signature.Type {
- case crypto.SigTypeSecp256k1:
- lm = toLotusSignedMsg(msg)
- case crypto.SigTypeBLS:
- lm = toLotusMsg(&msg.Message)
- default:
- return vtypes.ApplyMessageResult{}, xerrors.New("Unknown signature type")
- }
- // TODO: Validate the sig first
- receipt, penalty, reward, err := a.applyMessage(epoch, lm)
- return vtypes.ApplyMessageResult{
- Msg: msg.Message,
- Receipt: receipt,
- Penalty: penalty,
- Reward: reward,
- Root: a.stateWrapper.Root().String(),
- }, err
-
-}
-
-func (a *Applier) ApplyTipSetMessages(epoch abi.ChainEpoch, blocks []vtypes.BlockMessagesInfo, rnd vstate.RandomnessSource) (vtypes.ApplyTipSetResult, error) {
- cs := store.NewChainStore(a.stateWrapper.bs, a.stateWrapper.ds, a.syscalls)
- sm := stmgr.NewStateManager(cs)
-
- var bms []store.BlockMessages
- for _, b := range blocks {
- bm := store.BlockMessages{
- Miner: b.Miner,
- WinCount: 1,
- }
-
- for _, m := range b.BLSMessages {
- bm.BlsMessages = append(bm.BlsMessages, toLotusMsg(m))
- }
-
- for _, m := range b.SECPMessages {
- bm.SecpkMessages = append(bm.SecpkMessages, toLotusSignedMsg(m))
- }
-
- bms = append(bms, bm)
- }
-
- var receipts []vtypes.MessageReceipt
- // TODO: base fee
- sroot, _, err := sm.ApplyBlocks(context.TODO(), epoch-1, a.stateWrapper.Root(), bms, epoch, &randWrapper{rnd}, func(c cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
- if msg.From == builtin.SystemActorAddr {
- return nil // ignore reward and cron calls
- }
- rval := ret.Return
- if rval == nil {
- rval = []byte{} // chain validation tests expect empty arrays to not be nil...
- }
- receipts = append(receipts, vtypes.MessageReceipt{
- ExitCode: ret.ExitCode,
- ReturnValue: rval,
-
- GasUsed: vtypes.GasUnits(ret.GasUsed),
- })
- return nil
- }, abi.NewTokenAmount(100))
- if err != nil {
- return vtypes.ApplyTipSetResult{}, err
- }
-
- a.stateWrapper.stateRoot = sroot
-
- return vtypes.ApplyTipSetResult{
- Receipts: receipts,
- Root: a.stateWrapper.Root().String(),
- }, nil
-}
-
-type randWrapper struct {
- rand vstate.RandomnessSource
-}
-
-// TODO: these should really be two different randomness sources
-func (w *randWrapper) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
- return w.rand.Randomness(ctx, pers, round, entropy)
-}
-
-func (w *randWrapper) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
- return w.rand.Randomness(ctx, pers, round, entropy)
-}
-
-type vmRand struct {
-}
-
-func (*vmRand) GetChainRandomness(ctx context.Context, dst crypto.DomainSeparationTag, h abi.ChainEpoch, input []byte) ([]byte, error) {
- panic("implement me")
-}
-
-func (*vmRand) GetBeaconRandomness(ctx context.Context, dst crypto.DomainSeparationTag, h abi.ChainEpoch, input []byte) ([]byte, error) {
- panic("implement me")
-}
-
-func (a *Applier) applyMessage(epoch abi.ChainEpoch, lm types.ChainMsg) (vtypes.MessageReceipt, abi.TokenAmount, abi.TokenAmount, error) {
- ctx := context.TODO()
- base := a.stateWrapper.Root()
-
- vmopt := &vm.VMOpts{
- StateBase: base,
- Epoch: epoch,
- Rand: &vmRand{},
- Bstore: a.stateWrapper.bs,
- Syscalls: a.syscalls,
- CircSupplyCalc: nil,
- BaseFee: abi.NewTokenAmount(100),
- }
-
- lotusVM, err := vm.NewVM(vmopt)
- // need to modify the VM invoker to add the puppet actor
- chainValInvoker := vm.NewInvoker()
- chainValInvoker.Register(puppet.PuppetActorCodeID, puppet.Actor{}, puppet.State{})
- lotusVM.SetInvoker(chainValInvoker)
- if err != nil {
- return vtypes.MessageReceipt{}, big.Zero(), big.Zero(), err
- }
-
- ret, err := lotusVM.ApplyMessage(ctx, lm)
- if err != nil {
- return vtypes.MessageReceipt{}, big.Zero(), big.Zero(), err
- }
-
- rval := ret.Return
- if rval == nil {
- rval = []byte{}
- }
-
- a.stateWrapper.stateRoot, err = lotusVM.Flush(ctx)
- if err != nil {
- return vtypes.MessageReceipt{}, big.Zero(), big.Zero(), err
- }
-
- mr := vtypes.MessageReceipt{
- ExitCode: ret.ExitCode,
- ReturnValue: rval,
- GasUsed: vtypes.GasUnits(ret.GasUsed),
- }
-
- return mr, ret.Penalty, abi.NewTokenAmount(ret.GasUsed), nil
-}
-
-func toLotusMsg(msg *vtypes.Message) *types.Message {
- return &types.Message{
- To: msg.To,
- From: msg.From,
-
- Nonce: msg.CallSeqNum,
- Method: msg.Method,
-
- Value: msg.Value,
- GasLimit: msg.GasLimit,
- GasFeeCap: msg.GasFeeCap,
- GasPremium: msg.GasPremium,
-
- Params: msg.Params,
- }
-}
-
-func toLotusSignedMsg(msg *vtypes.SignedMessage) *types.SignedMessage {
- return &types.SignedMessage{
- Message: *toLotusMsg(&msg.Message),
- Signature: msg.Signature,
- }
-}
diff --git a/chain/validation/config.go b/chain/validation/config.go
deleted file mode 100644
index 1e5936350..000000000
--- a/chain/validation/config.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package validation
-
-//
-// Config
-//
-
-type Config struct {
- trackGas bool
- checkExitCode bool
- checkReturnValue bool
- checkState bool
-}
-
-func NewConfig(gas, exit, ret, state bool) *Config {
- return &Config{
- trackGas: gas,
- checkExitCode: exit,
- checkReturnValue: ret,
- checkState: state,
- }
-}
-
-func (v Config) ValidateGas() bool {
- return v.trackGas
-}
-
-func (v Config) ValidateExitCode() bool {
- return v.checkExitCode
-}
-
-func (v Config) ValidateReturnValue() bool {
- return v.checkReturnValue
-}
-
-func (v Config) ValidateStateRoot() bool {
- return v.checkState
-}
diff --git a/chain/validation/factories.go b/chain/validation/factories.go
deleted file mode 100644
index b7781cacc..000000000
--- a/chain/validation/factories.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package validation
-
-import (
- "context"
-
- "github.com/filecoin-project/lotus/chain/state"
- "github.com/filecoin-project/specs-actors/actors/runtime"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- vstate "github.com/filecoin-project/chain-validation/state"
-)
-
-type Factories struct {
- *Applier
-}
-
-var _ vstate.Factories = &Factories{}
-
-func NewFactories() *Factories {
- return &Factories{}
-}
-
-func (f *Factories) NewStateAndApplier(syscalls runtime.Syscalls) (vstate.VMWrapper, vstate.Applier) {
- st := NewState()
- return st, NewApplier(st, func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls {
- return syscalls
- })
-}
-
-func (f *Factories) NewKeyManager() vstate.KeyManager {
- return newKeyManager()
-}
-
-func (f *Factories) NewValidationConfig() vstate.ValidationConfig {
- trackGas := true
- checkExit := true
- checkRet := true
- checkState := true
- return NewConfig(trackGas, checkExit, checkRet, checkState)
-}
diff --git a/chain/validation/keymanager.go b/chain/validation/keymanager.go
deleted file mode 100644
index e93f169bf..000000000
--- a/chain/validation/keymanager.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package validation
-
-import (
- "fmt"
- "math/rand"
-
- "github.com/minio/blake2b-simd"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-crypto"
- acrypto "github.com/filecoin-project/specs-actors/actors/crypto"
-
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/wallet"
-)
-
-type KeyManager struct {
- // Private keys by address
- keys map[address.Address]*wallet.Key
-
- // Seed for deterministic secp key generation.
- secpSeed int64
- // Seed for deterministic bls key generation.
- blsSeed int64 // nolint: structcheck
-}
-
-func newKeyManager() *KeyManager {
- return &KeyManager{
- keys: make(map[address.Address]*wallet.Key),
- secpSeed: 0,
- }
-}
-
-func (k *KeyManager) NewSECP256k1AccountAddress() address.Address {
- secpKey := k.newSecp256k1Key()
- k.keys[secpKey.Address] = secpKey
- return secpKey.Address
-}
-
-func (k *KeyManager) NewBLSAccountAddress() address.Address {
- blsKey := k.newBLSKey()
- k.keys[blsKey.Address] = blsKey
- return blsKey.Address
-}
-
-func (k *KeyManager) Sign(addr address.Address, data []byte) (acrypto.Signature, error) {
- ki, ok := k.keys[addr]
- if !ok {
- return acrypto.Signature{}, fmt.Errorf("unknown address %v", addr)
- }
- var sigType acrypto.SigType
- if ki.Type == wallet.KTSecp256k1 {
- sigType = acrypto.SigTypeBLS
- hashed := blake2b.Sum256(data)
- sig, err := crypto.Sign(ki.PrivateKey, hashed[:])
- if err != nil {
- return acrypto.Signature{}, err
- }
-
- return acrypto.Signature{
- Type: sigType,
- Data: sig,
- }, nil
- } else if ki.Type == wallet.KTBLS {
- panic("lotus validator cannot sign BLS messages")
- } else {
- panic("unknown signature type")
- }
-
-}
-
-func (k *KeyManager) newSecp256k1Key() *wallet.Key {
- randSrc := rand.New(rand.NewSource(k.secpSeed)) // nolint
- prv, err := crypto.GenerateKeyFromSeed(randSrc)
- if err != nil {
- panic(err)
- }
- k.secpSeed++
- key, err := wallet.NewKey(types.KeyInfo{
- Type: wallet.KTSecp256k1,
- PrivateKey: prv,
- })
- if err != nil {
- panic(err)
- }
- return key
-}
-
-func (k *KeyManager) newBLSKey() *wallet.Key {
- // FIXME: bls needs deterministic key generation
- //sk := ffi.PrivateKeyGenerate(s.blsSeed)
- // s.blsSeed++
- sk := [32]byte{}
- sk[0] = uint8(k.blsSeed) // hack to keep gas values determinist
- k.blsSeed++
- key, err := wallet.NewKey(types.KeyInfo{
- Type: wallet.KTBLS,
- PrivateKey: sk[:],
- })
- if err != nil {
- panic(err)
- }
- return key
-}
diff --git a/chain/validation/state.go b/chain/validation/state.go
deleted file mode 100644
index 2a10eb6af..000000000
--- a/chain/validation/state.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package validation
-
-import (
- "context"
-
- "github.com/ipfs/go-cid"
- "github.com/ipfs/go-datastore"
- cbor "github.com/ipfs/go-ipld-cbor"
- "golang.org/x/xerrors"
-
- vstate "github.com/filecoin-project/chain-validation/state"
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/runtime"
-
- "github.com/filecoin-project/lotus/chain/state"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-var _ vstate.VMWrapper = &StateWrapper{}
-
-type StateWrapper struct {
- // The blockstore underlying the state tree and storage.
- bs blockstore.Blockstore
-
- ds datastore.Batching
- // HAMT-CBOR store on top of the blockstore.
- cst cbor.IpldStore
-
- // CID of the root of the state tree.
- stateRoot cid.Cid
-}
-
-func NewState() *StateWrapper {
- bs := blockstore.NewTemporary()
- cst := cbor.NewCborStore(bs)
- // Put EmptyObjectCid value in the store. When an actor is initially created its Head is set to this value.
- _, err := cst.Put(context.TODO(), map[string]string{})
- if err != nil {
- panic(err)
- }
-
- treeImpl, err := state.NewStateTree(cst)
- if err != nil {
- panic(err) // Never returns error, the error return should be removed.
- }
- root, err := treeImpl.Flush(context.TODO())
- if err != nil {
- panic(err)
- }
- return &StateWrapper{
- bs: bs,
- ds: datastore.NewMapDatastore(),
- cst: cst,
- stateRoot: root,
- }
-}
-
-func (s *StateWrapper) NewVM() {
- return
-}
-
-func (s *StateWrapper) Root() cid.Cid {
- return s.stateRoot
-}
-
-// StoreGet the value at key from vm store
-func (s *StateWrapper) StoreGet(key cid.Cid, out runtime.CBORUnmarshaler) error {
- tree, err := state.LoadStateTree(s.cst, s.stateRoot)
- if err != nil {
- return err
- }
- return tree.Store.Get(context.Background(), key, out)
-}
-
-// StorePut `value` into vm store
-func (s *StateWrapper) StorePut(value runtime.CBORMarshaler) (cid.Cid, error) {
- tree, err := state.LoadStateTree(s.cst, s.stateRoot)
- if err != nil {
- return cid.Undef, err
- }
- return tree.Store.Put(context.Background(), value)
-}
-
-func (s *StateWrapper) Actor(addr address.Address) (vstate.Actor, error) {
- tree, err := state.LoadStateTree(s.cst, s.stateRoot)
- if err != nil {
- return nil, err
- }
- fcActor, err := tree.GetActor(addr)
- if err != nil {
- return nil, err
- }
- return &actorWrapper{*fcActor}, nil
-}
-
-func (s *StateWrapper) SetActorState(addr address.Address, balance abi.TokenAmount, actorState runtime.CBORMarshaler) (vstate.Actor, error) {
- tree, err := state.LoadStateTree(s.cst, s.stateRoot)
- if err != nil {
- return nil, err
- }
- // actor should exist
- act, err := tree.GetActor(addr)
- if err != nil {
- return nil, err
- }
- // add the state to the store and get a new head cid
- actHead, err := tree.Store.Put(context.Background(), actorState)
- if err != nil {
- return nil, err
- }
- // update the actor object with new head and balance parameter
- actr := &actorWrapper{types.Actor{
- Code: act.Code,
- Nonce: act.Nonce,
- // updates
- Head: actHead,
- Balance: balance,
- }}
- if err := tree.SetActor(addr, &actr.Actor); err != nil {
- return nil, err
- }
- return actr, s.flush(tree)
-}
-
-func (s *StateWrapper) CreateActor(code cid.Cid, addr address.Address, balance abi.TokenAmount, actorState runtime.CBORMarshaler) (vstate.Actor, address.Address, error) {
- idAddr := addr
- tree, err := state.LoadStateTree(s.cst, s.stateRoot)
- if err != nil {
- return nil, address.Undef, err
- }
- if addr.Protocol() != address.ID {
-
- actHead, err := tree.Store.Put(context.Background(), actorState)
- if err != nil {
- return nil, address.Undef, err
- }
- actr := &actorWrapper{types.Actor{
- Code: code,
- Head: actHead,
- Balance: balance,
- }}
-
- idAddr, err = tree.RegisterNewAddress(addr)
- if err != nil {
- return nil, address.Undef, xerrors.Errorf("register new address for actor: %w", err)
- }
-
- if err := tree.SetActor(addr, &actr.Actor); err != nil {
- return nil, address.Undef, xerrors.Errorf("setting new actor for actor: %w", err)
- }
- }
-
- // store newState
- head, err := tree.Store.Put(context.Background(), actorState)
- if err != nil {
- return nil, address.Undef, err
- }
-
- // create and store actor object
- a := types.Actor{
- Code: code,
- Head: head,
- Balance: balance,
- }
- if err := tree.SetActor(idAddr, &a); err != nil {
- return nil, address.Undef, err
- }
-
- return &actorWrapper{a}, idAddr, s.flush(tree)
-}
-
-// Flushes a state tree to storage and sets this state's root to that tree's root CID.
-func (s *StateWrapper) flush(tree *state.StateTree) (err error) {
- s.stateRoot, err = tree.Flush(context.TODO())
- return
-}
-
-//
-// Actor Wrapper
-//
-
-type actorWrapper struct {
- types.Actor
-}
-
-func (a *actorWrapper) Code() cid.Cid {
- return a.Actor.Code
-}
-
-func (a *actorWrapper) Head() cid.Cid {
- return a.Actor.Head
-}
-
-func (a *actorWrapper) CallSeqNum() uint64 {
- return a.Actor.Nonce
-}
-
-func (a *actorWrapper) Balance() big.Int {
- return a.Actor.Balance
-
-}
-
-//
-// Storage
-//
-
-type contextStore struct {
- cbor.IpldStore
- ctx context.Context
-}
-
-func (s *contextStore) Context() context.Context {
- return s.ctx
-}
diff --git a/chain/vectors/gen/main.go b/chain/vectors/gen/main.go
index 631fb6ad1..096548e04 100644
--- a/chain/vectors/gen/main.go
+++ b/chain/vectors/gen/main.go
@@ -10,24 +10,22 @@ import (
"github.com/filecoin-project/go-address"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/vectors"
"github.com/filecoin-project/lotus/chain/wallet"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/crypto"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
)
func init() {
- verifreg.MinVerifiedDealSize = big.NewInt(2048)
- power.ConsensusMinerMinPower = big.NewInt(2048)
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(2048))
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
}
func MakeHeaderVectors() []vectors.HeaderVector {
diff --git a/chain/vectors/vector_types.go b/chain/vectors/vector_types.go
index 73216a049..7e014fb77 100644
--- a/chain/vectors/vector_types.go
+++ b/chain/vectors/vector_types.go
@@ -1,8 +1,8 @@
package vectors
import (
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/crypto"
)
type HeaderVector struct {
diff --git a/chain/vm/burn.go b/chain/vm/burn.go
index e9b6802c1..9f9b95755 100644
--- a/chain/vm/burn.go
+++ b/chain/vm/burn.go
@@ -1,8 +1,8 @@
package vm
import (
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
)
const (
@@ -22,6 +22,17 @@ type GasOutputs struct {
GasBurned int64
}
+// ZeroGasOutputs returns a logically zeroed GasOutputs.
+func ZeroGasOutputs() GasOutputs {
+ return GasOutputs{
+ BaseFeeBurn: big.Zero(),
+ OverEstimationBurn: big.Zero(),
+ MinerPenalty: big.Zero(),
+ MinerTip: big.Zero(),
+ Refund: big.Zero(),
+ }
+}
+
// ComputeGasOverestimationBurn computes amount of gas to be refunded and amount of gas to be burned
// Result is (refund, burn)
func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) {
@@ -58,13 +69,7 @@ func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) {
func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount) GasOutputs {
gasUsedBig := big.NewInt(gasUsed)
- out := GasOutputs{
- BaseFeeBurn: big.Zero(),
- OverEstimationBurn: big.Zero(),
- MinerPenalty: big.Zero(),
- MinerTip: big.Zero(),
- Refund: big.Zero(),
- }
+ out := ZeroGasOutputs()
baseFeeToPay := baseFee
if baseFee.Cmp(feeCap.Int) > 0 {
diff --git a/chain/vm/gas.go b/chain/vm/gas.go
index 72a7df8fc..6802013e5 100644
--- a/chain/vm/gas.go
+++ b/chain/vm/gas.go
@@ -3,11 +3,12 @@ package vm
import (
"fmt"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/filecoin-project/go-address"
addr "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
vmr "github.com/filecoin-project/specs-actors/actors/runtime"
"github.com/ipfs/go-cid"
)
@@ -77,8 +78,8 @@ type Pricelist interface {
OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error)
OnHashing(dataSize int) GasCharge
OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge
- OnVerifySeal(info abi.SealVerifyInfo) GasCharge
- OnVerifyPost(info abi.WindowPoStVerifyInfo) GasCharge
+ OnVerifySeal(info proof.SealVerifyInfo) GasCharge
+ OnVerifyPost(info proof.WindowPoStVerifyInfo) GasCharge
OnVerifyConsensusFault() GasCharge
}
@@ -183,7 +184,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p
}
// Verifies a sector seal proof.
-func (ps pricedSyscalls) VerifySeal(vi abi.SealVerifyInfo) error {
+func (ps pricedSyscalls) VerifySeal(vi proof.SealVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifySeal(vi))
defer ps.chargeGas(gasOnActorExec)
@@ -191,7 +192,7 @@ func (ps pricedSyscalls) VerifySeal(vi abi.SealVerifyInfo) error {
}
// Verifies a proof of spacetime.
-func (ps pricedSyscalls) VerifyPoSt(vi abi.WindowPoStVerifyInfo) error {
+func (ps pricedSyscalls) VerifyPoSt(vi proof.WindowPoStVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifyPost(vi))
defer ps.chargeGas(gasOnActorExec)
@@ -208,14 +209,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi abi.WindowPoStVerifyInfo) error {
// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the
// blocks in the parent of h2 (i.e. h2's grandparent).
// Returns nil and an error if the headers don't prove a fault.
-func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*runtime.ConsensusFault, error) {
+func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr.ConsensusFault, error) {
ps.chargeGas(ps.pl.OnVerifyConsensusFault())
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyConsensusFault(h1, h2, extra)
}
-func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
+func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) {
count := int64(0)
for _, svis := range inp {
count += int64(len(svis))
diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go
index f13710a1b..bfb49c345 100644
--- a/chain/vm/gas_v0.go
+++ b/chain/vm/gas_v0.go
@@ -3,10 +3,12 @@ package vm
import (
"fmt"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
)
type scalingCost struct {
@@ -110,14 +112,14 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M
if big.Cmp(value, abi.NewTokenAmount(0)) != 0 {
ret += pl.sendTransferFunds
- if methodNum == builtin.MethodSend {
+ if methodNum == builtin0.MethodSend {
// transfer only
ret += pl.sendTransferOnlyPremium
}
extra += "t"
}
- if methodNum != builtin.MethodSend {
+ if methodNum != builtin0.MethodSend {
extra += "i"
// running actors is cheaper becase we hand over to actors
ret += pl.sendInvokeMethod
@@ -173,14 +175,14 @@ func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealPr
}
// OnVerifySeal
-func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) GasCharge {
+func (pl *pricelistV0) OnVerifySeal(info proof.SealVerifyInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus
// this is not used
return newGasCharge("OnVerifySeal", pl.verifySealBase, 0)
}
// OnVerifyPost
-func (pl *pricelistV0) OnVerifyPost(info abi.WindowPoStVerifyInfo) GasCharge {
+func (pl *pricelistV0) OnVerifyPost(info proof.WindowPoStVerifyInfo) GasCharge {
sectorSize := "unknown"
var proofType abi.RegisteredPoStProof
diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go
index 56f769da2..661e31178 100644
--- a/chain/vm/invoker.go
+++ b/chain/vm/invoker.go
@@ -6,92 +6,126 @@ import (
"fmt"
"reflect"
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/cron"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- "github.com/filecoin-project/specs-actors/actors/builtin/system"
- "github.com/filecoin-project/specs-actors/actors/runtime"
- vmr "github.com/filecoin-project/specs-actors/actors/runtime"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
+ exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
+ vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ rtt "github.com/filecoin-project/go-state-types/rt"
+
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
+ "github.com/filecoin-project/lotus/chain/types"
)
-type Invoker struct {
- builtInCode map[cid.Cid]nativeCode
- builtInState map[cid.Cid]reflect.Type
+type ActorRegistry struct {
+ actors map[cid.Cid]*actorInfo
}
-type invokeFunc func(rt runtime.Runtime, params []byte) ([]byte, aerrors.ActorError)
+// An ActorPredicate returns an error if the given actor is not valid for the given runtime environment (e.g., chain height, version, etc.).
+type ActorPredicate func(vmr.Runtime, rtt.VMActor) error
+
+func ActorsVersionPredicate(ver actors.Version) ActorPredicate {
+ return func(rt vmr.Runtime, v rtt.VMActor) error {
+ nver := actors.VersionForNetwork(rt.NetworkVersion())
+ if nver != ver {
+ return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d", v.Code(), ver, nver, rt.CurrEpoch())
+ }
+ return nil
+ }
+}
+
+type invokeFunc func(rt vmr.Runtime, params []byte) ([]byte, aerrors.ActorError)
type nativeCode []invokeFunc
-func NewInvoker() *Invoker {
- inv := &Invoker{
- builtInCode: make(map[cid.Cid]nativeCode),
- builtInState: make(map[cid.Cid]reflect.Type),
- }
+type actorInfo struct {
+ methods nativeCode
+ vmActor rtt.VMActor
+ // TODO: consider making this a network version range?
+ predicate ActorPredicate
+}
+
+func NewActorRegistry() *ActorRegistry {
+ inv := &ActorRegistry{actors: make(map[cid.Cid]*actorInfo)}
+
+ // TODO: define all these properties on the actors themselves, in specs-actors.
// add builtInCode using: register(cid, singleton)
- inv.Register(builtin.SystemActorCodeID, system.Actor{}, adt.EmptyValue{})
- inv.Register(builtin.InitActorCodeID, init_.Actor{}, init_.State{})
- inv.Register(builtin.RewardActorCodeID, reward.Actor{}, reward.State{})
- inv.Register(builtin.CronActorCodeID, cron.Actor{}, cron.State{})
- inv.Register(builtin.StoragePowerActorCodeID, power.Actor{}, power.State{})
- inv.Register(builtin.StorageMarketActorCodeID, market.Actor{}, market.State{})
- inv.Register(builtin.StorageMinerActorCodeID, miner.Actor{}, miner.State{})
- inv.Register(builtin.MultisigActorCodeID, multisig.Actor{}, multisig.State{})
- inv.Register(builtin.PaymentChannelActorCodeID, paych.Actor{}, paych.State{})
- inv.Register(builtin.VerifiedRegistryActorCodeID, verifreg.Actor{}, verifreg.State{})
- inv.Register(builtin.AccountActorCodeID, account.Actor{}, account.State{})
+ inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
+ inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
return inv
}
-func (inv *Invoker) Invoke(codeCid cid.Cid, rt runtime.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
-
- code, ok := inv.builtInCode[codeCid]
+func (ar *ActorRegistry) Invoke(codeCid cid.Cid, rt vmr.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
+ act, ok := ar.actors[codeCid]
if !ok {
- log.Errorf("no code for actor %s (Addr: %s)", codeCid, rt.Message().Receiver())
+ log.Errorf("no code for actor %s (Addr: %s)", codeCid, rt.Receiver())
return nil, aerrors.Newf(exitcode.SysErrorIllegalActor, "no code for actor %s(%d)(%s)", codeCid, method, hex.EncodeToString(params))
}
- if method >= abi.MethodNum(len(code)) || code[method] == nil {
+ if err := act.predicate(rt, act.vmActor); err != nil {
+ return nil, aerrors.Newf(exitcode.SysErrorIllegalActor, "unsupported actor: %s", err)
+ }
+ if method >= abi.MethodNum(len(act.methods)) || act.methods[method] == nil {
return nil, aerrors.Newf(exitcode.SysErrInvalidMethod, "no method %d on actor", method)
}
- return code[method](rt, params)
+ return act.methods[method](rt, params)
}
-func (inv *Invoker) Register(c cid.Cid, instance Invokee, state interface{}) {
- code, err := inv.transform(instance)
- if err != nil {
- panic(xerrors.Errorf("%s: %w", string(c.Hash()), err))
+func (ar *ActorRegistry) Register(pred ActorPredicate, actors ...rtt.VMActor) {
+ if pred == nil {
+ pred = func(vmr.Runtime, rtt.VMActor) error { return nil }
+ }
+ for _, a := range actors {
+ code, err := ar.transform(a)
+ if err != nil {
+ panic(xerrors.Errorf("%s: %w", string(a.Code().Hash()), err))
+ }
+ ar.actors[a.Code()] = &actorInfo{
+ methods: code,
+ vmActor: a,
+ predicate: pred,
+ }
}
- inv.builtInCode[c] = code
- inv.builtInState[c] = reflect.TypeOf(state)
}
-type Invokee interface {
+func (ar *ActorRegistry) Create(codeCid cid.Cid, rt vmr.Runtime) (*types.Actor, aerrors.ActorError) {
+ act, ok := ar.actors[codeCid]
+ if !ok {
+ return nil, aerrors.Newf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.")
+ }
+
+ if err := act.predicate(rt, act.vmActor); err != nil {
+ return nil, aerrors.Newf(exitcode.SysErrorIllegalArgument, "Cannot create actor: %w", err)
+ }
+
+ if rtt.IsSingletonActor(act.vmActor) {
+ return nil, aerrors.Newf(exitcode.SysErrorIllegalArgument, "Can only have one instance of singleton actors.")
+ }
+ return &types.Actor{
+ Code: codeCid,
+ Head: EmptyObjectCid,
+ Nonce: 0,
+ Balance: abi.NewTokenAmount(0),
+ }, nil
+}
+
+type invokee interface {
Exports() []interface{}
}
-func (*Invoker) transform(instance Invokee) (nativeCode, error) {
+func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
itype := reflect.TypeOf(instance)
exports := instance.Exports()
+ runtimeType := reflect.TypeOf((*vmr.Runtime)(nil)).Elem()
for i, m := range exports {
i := i
newErr := func(format string, args ...interface{}) error {
@@ -112,11 +146,11 @@ func (*Invoker) transform(instance Invokee) (nativeCode, error) {
return nil, newErr("wrong number of inputs should be: " +
"vmr.Runtime, ")
}
- if t.In(0) != reflect.TypeOf((*vmr.Runtime)(nil)).Elem() {
+ if !runtimeType.Implements(t.In(0)) {
return nil, newErr("first arguemnt should be vmr.Runtime")
}
if t.In(1).Kind() != reflect.Ptr {
- return nil, newErr("second argument should be Runtime")
+ return nil, newErr("second argument should be of kind reflect.Ptr")
}
if t.NumOut() != 1 {
@@ -130,6 +164,9 @@ func (*Invoker) transform(instance Invokee) (nativeCode, error) {
}
code := make(nativeCode, len(exports))
for id, m := range exports {
+ if m == nil {
+ continue
+ }
meth := reflect.ValueOf(m)
code[id] = reflect.MakeFunc(reflect.TypeOf((invokeFunc)(nil)),
func(in []reflect.Value) []reflect.Value {
@@ -174,27 +211,22 @@ func DecodeParams(b []byte, out interface{}) error {
return um.UnmarshalCBOR(bytes.NewReader(b))
}
-func DumpActorState(code cid.Cid, b []byte) (interface{}, error) {
- if code == builtin.AccountActorCodeID { // Account code special case
+func DumpActorState(act *types.Actor, b []byte) (interface{}, error) {
+ if builtin.IsAccountActor(act.Code) { // Account code special case
return nil, nil
}
- i := NewInvoker() // TODO: register builtins in init block
+ i := NewActorRegistry() // TODO: register builtins in init block
- typ, ok := i.builtInState[code]
+ actInfo, ok := i.actors[act.Code]
if !ok {
- return nil, xerrors.Errorf("state type for actor %s not found", code)
- }
-
- rv := reflect.New(typ)
- um, ok := rv.Interface().(cbg.CBORUnmarshaler)
- if !ok {
- return nil, xerrors.New("state type does not implement CBORUnmarshaler")
+ return nil, xerrors.Errorf("state type for actor %s not found", act.Code)
}
+ um := actInfo.vmActor.State()
if err := um.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
return nil, xerrors.Errorf("unmarshaling actor state: %w", err)
}
- return rv.Elem().Interface(), nil
+ return um, nil
}
diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go
index 55b276421..4005dd42f 100644
--- a/chain/vm/invoker_test.go
+++ b/chain/vm/invoker_test.go
@@ -5,15 +5,16 @@ import (
"io"
"testing"
+ "github.com/filecoin-project/go-state-types/abi"
+
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/stretchr/testify/assert"
cbg "github.com/whyrusleeping/cbor-gen"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/specs-actors/actors/runtime"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
)
type basicContract struct{}
@@ -60,23 +61,23 @@ func (b basicContract) Exports() []interface{} {
}
}
-func (basicContract) InvokeSomething0(rt runtime.Runtime, params *basicParams) *adt.EmptyValue {
+func (basicContract) InvokeSomething0(rt runtime.Runtime, params *basicParams) *abi.EmptyValue {
rt.Abortf(exitcode.ExitCode(params.B), "params.B")
return nil
}
-func (basicContract) BadParam(rt runtime.Runtime, params *basicParams) *adt.EmptyValue {
+func (basicContract) BadParam(rt runtime.Runtime, params *basicParams) *abi.EmptyValue {
rt.Abortf(255, "bad params")
return nil
}
-func (basicContract) InvokeSomething10(rt runtime.Runtime, params *basicParams) *adt.EmptyValue {
+func (basicContract) InvokeSomething10(rt runtime.Runtime, params *basicParams) *abi.EmptyValue {
rt.Abortf(exitcode.ExitCode(params.B+10), "params.B")
return nil
}
func TestInvokerBasic(t *testing.T) {
- inv := Invoker{}
+ inv := ActorRegistry{}
code, err := inv.transform(basicContract{})
assert.NoError(t, err)
diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go
index ef4382df1..22a2acb8b 100644
--- a/chain/vm/mkactor.go
+++ b/chain/vm/mkactor.go
@@ -3,14 +3,16 @@ package vm
import (
"context"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/types"
@@ -29,49 +31,47 @@ func init() {
var EmptyObjectCid cid.Cid
// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
-func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, aerrors.ActorError) {
+func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) {
if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil {
- return nil, err
+ return nil, address.Undef, err
}
addrID, err := rt.state.RegisterNewAddress(addr)
if err != nil {
- return nil, aerrors.Escalate(err, "registering actor address")
+ return nil, address.Undef, aerrors.Escalate(err, "registering actor address")
}
- act, aerr := makeActor(addr)
+ act, aerr := makeActor(actors.VersionForNetwork(rt.NetworkVersion()), addr)
if aerr != nil {
- return nil, aerr
+ return nil, address.Undef, aerr
}
if err := rt.state.SetActor(addrID, act); err != nil {
- return nil, aerrors.Escalate(err, "creating new actor failed")
+ return nil, address.Undef, aerrors.Escalate(err, "creating new actor failed")
}
p, err := actors.SerializeParams(&addr)
if err != nil {
- return nil, aerrors.Escalate(err, "couldn't serialize params for actor construction")
+ return nil, address.Undef, aerrors.Escalate(err, "couldn't serialize params for actor construction")
}
// call constructor on account
- _, aerr = rt.internalSend(builtin.SystemActorAddr, addrID, builtin.MethodsAccount.Constructor, big.Zero(), p)
+ _, aerr = rt.internalSend(builtin0.SystemActorAddr, addrID, builtin0.MethodsAccount.Constructor, big.Zero(), p)
if aerr != nil {
- return nil, aerrors.Wrap(aerr, "failed to invoke account constructor")
+ return nil, address.Undef, aerrors.Wrap(aerr, "failed to invoke account constructor")
}
act, err = rt.state.GetActor(addrID)
if err != nil {
- return nil, aerrors.Escalate(err, "loading newly created actor failed")
+ return nil, address.Undef, aerrors.Escalate(err, "loading newly created actor failed")
}
- return act, nil
+ return act, addrID, nil
}
-func makeActor(addr address.Address) (*types.Actor, aerrors.ActorError) {
+func makeActor(ver actors.Version, addr address.Address) (*types.Actor, aerrors.ActorError) {
switch addr.Protocol() {
- case address.BLS:
- return NewBLSAccountActor(), nil
- case address.SECP256K1:
- return NewSecp256k1AccountActor(), nil
+ case address.BLS, address.SECP256K1:
+ return newAccountActor(ver), nil
case address.ID:
return nil, aerrors.Newf(exitcode.SysErrInvalidReceiver, "no actor with given ID: %s", addr)
case address.Actor:
@@ -81,19 +81,19 @@ func makeActor(addr address.Address) (*types.Actor, aerrors.ActorError) {
}
}
-func NewBLSAccountActor() *types.Actor {
+func newAccountActor(ver actors.Version) *types.Actor {
+ // TODO: ActorsUpgrade use a global actor registry?
+ var code cid.Cid
+ switch ver {
+ case actors.Version0:
+ code = builtin0.AccountActorCodeID
+ case actors.Version2:
+ code = builtin2.AccountActorCodeID
+ default:
+ panic("unsupported actors version")
+ }
nact := &types.Actor{
- Code: builtin.AccountActorCodeID,
- Balance: types.NewInt(0),
- Head: EmptyObjectCid,
- }
-
- return nact
-}
-
-func NewSecp256k1AccountActor() *types.Actor {
- nact := &types.Actor{
- Code: builtin.AccountActorCodeID,
+ Code: code,
Balance: types.NewInt(0),
Head: EmptyObjectCid,
}
diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go
index 99333fc04..8f124247c 100644
--- a/chain/vm/runtime.go
+++ b/chain/vm/runtime.go
@@ -5,21 +5,19 @@ import (
"context"
"encoding/binary"
"fmt"
- gruntime "runtime"
"time"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime"
- vmr "github.com/filecoin-project/specs-actors/actors/runtime"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/filecoin-project/go-state-types/network"
+ rtt "github.com/filecoin-project/go-state-types/rt"
+ rt0 "github.com/filecoin-project/specs-actors/actors/runtime"
+ rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
"github.com/ipfs/go-cid"
- cbor "github.com/ipfs/go-ipld-cbor"
- cbg "github.com/whyrusleeping/cbor-gen"
+ ipldcbor "github.com/ipfs/go-ipld-cbor"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@@ -29,21 +27,46 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
+type Message struct {
+ msg types.Message
+}
+
+func (m *Message) Caller() address.Address {
+ if m.msg.From.Protocol() != address.ID {
+ panic("runtime message has a non-ID caller")
+ }
+ return m.msg.From
+}
+
+func (m *Message) Receiver() address.Address {
+ if m.msg.To != address.Undef && m.msg.To.Protocol() != address.ID {
+ panic("runtime message has a non-ID receiver")
+ }
+ return m.msg.To
+}
+
+func (m *Message) ValueReceived() abi.TokenAmount {
+ return m.msg.Value
+}
+
+// EnableGasTracing, if true, outputs gas tracing in execution traces.
+var EnableGasTracing = false
+
type Runtime struct {
+ rt0.Message
+ rt0.Syscalls
+
ctx context.Context
vm *VM
state *state.StateTree
- vmsg vmr.Message
height abi.ChainEpoch
- cst cbor.IpldStore
+ cst ipldcbor.IpldStore
pricelist Pricelist
gasAvailable int64
gasUsed int64
- sys runtime.Syscalls
-
// address that started invoke chain
origin address.Address
originNonce uint64
@@ -56,6 +79,10 @@ type Runtime struct {
lastGasCharge *types.GasTrace
}
+func (rt *Runtime) NetworkVersion() network.Version {
+ return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch())
+}
+
func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount {
cs, err := rt.vm.GetCircSupply(rt.ctx)
if err != nil {
@@ -80,11 +107,11 @@ type notFoundErr interface {
IsNotFound() bool
}
-func (rt *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool {
+func (rt *Runtime) StoreGet(c cid.Cid, o cbor.Unmarshaler) bool {
if err := rt.cst.Get(context.TODO(), c, o); err != nil {
var nfe notFoundErr
if xerrors.As(err, &nfe) && nfe.IsNotFound() {
- if xerrors.As(err, new(cbor.SerializationError)) {
+ if xerrors.As(err, new(ipldcbor.SerializationError)) {
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to unmarshal cbor object %s", err))
}
return false
@@ -95,10 +122,10 @@ func (rt *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool {
return true
}
-func (rt *Runtime) Put(x vmr.CBORMarshaler) cid.Cid {
+func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid {
c, err := rt.cst.Put(context.TODO(), x)
if err != nil {
- if xerrors.As(err, new(cbor.SerializationError)) {
+ if xerrors.As(err, new(ipldcbor.SerializationError)) {
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err))
}
panic(aerrors.Fatalf("failed to put cbor object: %s", err))
@@ -106,7 +133,8 @@ func (rt *Runtime) Put(x vmr.CBORMarshaler) cid.Cid {
return c
}
-var _ vmr.Runtime = (*Runtime)(nil)
+var _ rt0.Runtime = (*Runtime)(nil)
+var _ rt2.Runtime = (*Runtime)(nil)
func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
defer func() {
@@ -119,7 +147,11 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
//log.Desugar().WithOptions(zap.AddStacktrace(zapcore.ErrorLevel)).
//Sugar().Errorf("spec actors failure: %s", r)
log.Errorf("spec actors failure: %s", r)
- aerr = aerrors.Newf(1, "spec actors failure: %s", r)
+ if rt.NetworkVersion() <= network.Version3 {
+ aerr = aerrors.Newf(1, "spec actors failure: %s", r)
+ } else {
+ aerr = aerrors.Newf(exitcode.SysErrReserved1, "spec actors failure: %s", r)
+ }
}
}()
@@ -132,9 +164,9 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
switch ret := ret.(type) {
case []byte:
return ret, nil
- case *adt.EmptyValue:
+ case *abi.EmptyValue:
return nil, nil
- case cbg.CBORMarshaler:
+ case cbor.Marshaler:
buf := new(bytes.Buffer)
if err := ret.MarshalCBOR(buf); err != nil {
return nil, aerrors.Absorb(err, 2, "failed to marshal response to cbor")
@@ -147,17 +179,13 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
}
}
-func (rt *Runtime) Message() vmr.Message {
- return rt.vmsg
-}
-
func (rt *Runtime) ValidateImmediateCallerAcceptAny() {
rt.abortIfAlreadyValidated()
return
}
func (rt *Runtime) CurrentBalance() abi.TokenAmount {
- b, err := rt.GetBalance(rt.Message().Receiver())
+ b, err := rt.GetBalance(rt.Receiver())
if err != nil {
rt.Abortf(err.RetCode(), "get current balance: %v", err)
}
@@ -193,10 +221,6 @@ func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparati
return res
}
-func (rt *Runtime) Store() vmr.Store {
- return rt
-}
-
func (rt *Runtime) NewActorAddress() address.Address {
var b bytes.Buffer
oa, _ := ResolveToKeyAddr(rt.vm.cstate, rt.vm.cst, rt.origin)
@@ -220,12 +244,9 @@ func (rt *Runtime) NewActorAddress() address.Address {
}
func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
- if !builtin.IsBuiltinActor(codeID) {
- rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.")
- }
-
- if builtin.IsSingletonActor(codeID) {
- rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only have one instance of singleton actors.")
+ act, aerr := rt.vm.areg.Create(codeID, rt)
+ if aerr != nil {
+ rt.Abortf(aerr.RetCode(), aerr.Error())
}
_, err := rt.state.GetActor(address)
@@ -235,12 +256,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
rt.chargeGas(rt.Pricelist().OnCreateActor())
- err = rt.state.SetActor(address, &types.Actor{
- Code: codeID,
- Head: EmptyObjectCid,
- Nonce: 0,
- Balance: big.Zero(),
- })
+ err = rt.state.SetActor(address, act)
if err != nil {
panic(aerrors.Fatalf("creating actor entry: %v", err))
}
@@ -253,7 +269,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
// May only be called by the actor itself.
func (rt *Runtime) DeleteActor(beneficiary address.Address) {
rt.chargeGas(rt.Pricelist().OnDeleteActor())
- act, err := rt.state.GetActor(rt.Message().Receiver())
+ act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
rt.Abortf(exitcode.SysErrorIllegalActor, "failed to load actor in delete actor: %s", err)
@@ -262,36 +278,32 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) {
}
if !act.Balance.IsZero() {
// Transfer the executing actor's balance to the beneficiary
- if err := rt.vm.transfer(rt.Message().Receiver(), beneficiary, act.Balance); err != nil {
+ if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil {
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
}
}
// Delete the executing actor
- if err := rt.state.DeleteActor(rt.Message().Receiver()); err != nil {
+ if err := rt.state.DeleteActor(rt.Receiver()); err != nil {
panic(aerrors.Fatalf("failed to delete actor: %s", err))
}
_ = rt.chargeGasSafe(gasOnActorExec)
}
-func (rt *Runtime) Syscalls() vmr.Syscalls {
- return rt.sys
-}
-
-func (rt *Runtime) StartSpan(name string) vmr.TraceSpan {
+func (rt *Runtime) StartSpan(name string) func() {
panic("implement me")
}
func (rt *Runtime) ValidateImmediateCallerIs(as ...address.Address) {
rt.abortIfAlreadyValidated()
- imm := rt.Message().Caller()
+ imm := rt.Caller()
for _, a := range as {
if imm == a {
return
}
}
- rt.Abortf(exitcode.SysErrForbidden, "caller %s is not one of %s", rt.Message().Caller(), as)
+ rt.Abortf(exitcode.SysErrForbidden, "caller %s is not one of %s", rt.Caller(), as)
}
func (rt *Runtime) Context() context.Context {
@@ -309,7 +321,7 @@ func (rt *Runtime) AbortStateMsg(msg string) {
func (rt *Runtime) ValidateImmediateCallerType(ts ...cid.Cid) {
rt.abortIfAlreadyValidated()
- callerCid, ok := rt.GetActorCodeCID(rt.Message().Caller())
+ callerCid, ok := rt.GetActorCodeCID(rt.Caller())
if !ok {
panic(aerrors.Fatalf("failed to lookup code cid for caller"))
}
@@ -325,15 +337,7 @@ func (rt *Runtime) CurrEpoch() abi.ChainEpoch {
return rt.height
}
-type dumbWrapperType struct {
- val []byte
-}
-
-func (dwt *dumbWrapperType) Into(um vmr.CBORUnmarshaler) error {
- return um.UnmarshalCBOR(bytes.NewReader(dwt.val))
-}
-
-func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMarshaler, value abi.TokenAmount) (vmr.SendReturn, exitcode.ExitCode) {
+func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode {
if !rt.allowInternal {
rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed")
}
@@ -341,21 +345,25 @@ func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMars
if m != nil {
buf := new(bytes.Buffer)
if err := m.MarshalCBOR(buf); err != nil {
- rt.Abortf(exitcode.SysErrInvalidParameters, "failed to marshal input parameters: %s", err)
+ rt.Abortf(exitcode.ErrSerialization, "failed to marshal input parameters: %s", err)
}
params = buf.Bytes()
}
- ret, err := rt.internalSend(rt.Message().Receiver(), to, method, value, params)
+ ret, err := rt.internalSend(rt.Receiver(), to, method, value, params)
if err != nil {
if err.IsFatal() {
panic(err)
}
log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err)
- return &dumbWrapperType{nil}, err.RetCode()
+ return err.RetCode()
}
_ = rt.chargeGasSafe(gasOnActorExec)
- return &dumbWrapperType{ret}, 0
+
+ if err := out.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
+ rt.Abortf(exitcode.ErrSerialization, "failed to unmarshal return value: %s", err)
+ }
+ return 0
}
func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) {
@@ -394,54 +402,46 @@ func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum,
if subrt != nil {
rt.numActorsCreated = subrt.numActorsCreated
+ rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace)
}
- rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace)
return ret, errSend
}
-func (rt *Runtime) State() vmr.StateHandle {
- return &shimStateHandle{rt: rt}
-}
-
-type shimStateHandle struct {
- rt *Runtime
-}
-
-func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
- c := ssh.rt.Put(obj)
- err := ssh.rt.stateCommit(EmptyObjectCid, c)
+func (rt *Runtime) StateCreate(obj cbor.Marshaler) {
+ c := rt.StorePut(obj)
+ err := rt.stateCommit(EmptyObjectCid, c)
if err != nil {
panic(fmt.Errorf("failed to commit state after creating object: %w", err))
}
}
-func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) {
- act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver())
+func (rt *Runtime) StateReadonly(obj cbor.Unmarshaler) {
+ act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
- ssh.rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err)
+ rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err)
}
- ssh.rt.Get(act.Head, obj)
+ rt.StoreGet(act.Head, obj)
}
-func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func()) {
+func (rt *Runtime) StateTransaction(obj cbor.Er, f func()) {
if obj == nil {
- ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()")
+ rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()")
}
- act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver())
+ act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
- ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err)
+ rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err)
}
baseState := act.Head
- ssh.rt.Get(baseState, obj)
+ rt.StoreGet(baseState, obj)
- ssh.rt.allowInternal = false
+ rt.allowInternal = false
f()
- ssh.rt.allowInternal = true
+ rt.allowInternal = true
- c := ssh.rt.Put(obj)
+ c := rt.StorePut(obj)
- err = ssh.rt.stateCommit(baseState, c)
+ err = rt.stateCommit(baseState, c)
if err != nil {
panic(fmt.Errorf("failed to commit state after transaction: %w", err))
}
@@ -461,7 +461,7 @@ func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorErr
func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
// TODO: we can make this more efficient in the future...
- act, err := rt.state.GetActor(rt.Message().Receiver())
+ act, err := rt.state.GetActor(rt.Receiver())
if err != nil {
return aerrors.Escalate(err, "failed to get actor to commit state")
}
@@ -472,7 +472,7 @@ func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
act.Head = newh
- if err := rt.state.SetActor(rt.Message().Receiver(), act); err != nil {
+ if err := rt.state.SetActor(rt.Receiver(), act); err != nil {
return aerrors.Fatalf("failed to set actor in commit state: %s", err)
}
@@ -480,8 +480,10 @@ func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
}
func (rt *Runtime) finilizeGasTracing() {
- if rt.lastGasCharge != nil {
- rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime)
+ if EnableGasTracing {
+ if rt.lastGasCharge != nil {
+ rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime)
+ }
}
}
@@ -512,32 +514,35 @@ func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) {
func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError {
toUse := gas.Total()
- var callers [10]uintptr
- cout := gruntime.Callers(2+skip, callers[:])
+ if EnableGasTracing {
+ var callers [10]uintptr
- now := build.Clock.Now()
- if rt.lastGasCharge != nil {
- rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime)
+ cout := 0 //gruntime.Callers(2+skip, callers[:])
+
+ now := build.Clock.Now()
+ if rt.lastGasCharge != nil {
+ rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime)
+ }
+
+ gasTrace := types.GasTrace{
+ Name: gas.Name,
+ Extra: gas.Extra,
+
+ TotalGas: toUse,
+ ComputeGas: gas.ComputeGas,
+ StorageGas: gas.StorageGas,
+
+ TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
+ VirtualComputeGas: gas.VirtualCompute,
+ VirtualStorageGas: gas.VirtualStorage,
+
+ Callers: callers[:cout],
+ }
+ rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
+ rt.lastGasChargeTime = now
+ rt.lastGasCharge = &gasTrace
}
- gasTrace := types.GasTrace{
- Name: gas.Name,
- Extra: gas.Extra,
-
- TotalGas: toUse,
- ComputeGas: gas.ComputeGas,
- StorageGas: gas.StorageGas,
-
- TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
- VirtualComputeGas: gas.VirtualCompute,
- VirtualStorageGas: gas.VirtualStorage,
-
- Callers: callers[:cout],
- }
- rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
- rt.lastGasChargeTime = now
- rt.lastGasCharge = &gasTrace
-
// overflow safe
if rt.gasUsed > rt.gasAvailable-toUse {
rt.gasUsed = rt.gasAvailable
@@ -567,15 +572,15 @@ func (rt *Runtime) abortIfAlreadyValidated() {
rt.callerValidated = true
}
-func (rt *Runtime) Log(level vmr.LogLevel, msg string, args ...interface{}) {
+func (rt *Runtime) Log(level rtt.LogLevel, msg string, args ...interface{}) {
switch level {
- case vmr.DEBUG:
+ case rtt.DEBUG:
actorLog.Debugf(msg, args...)
- case vmr.INFO:
+ case rtt.INFO:
actorLog.Infof(msg, args...)
- case vmr.WARN:
+ case rtt.WARN:
actorLog.Warnf(msg, args...)
- case vmr.ERROR:
+ case rtt.ERROR:
actorLog.Errorf(msg, args...)
}
}
diff --git a/chain/vm/runtime_test.go b/chain/vm/runtime_test.go
index b5c75c177..9fc87f7c5 100644
--- a/chain/vm/runtime_test.go
+++ b/chain/vm/runtime_test.go
@@ -8,7 +8,7 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
)
@@ -42,6 +42,26 @@ func TestRuntimePutErrors(t *testing.T) {
cst: cbor.NewCborStore(nil),
}
- rt.Put(&NotAVeryGoodMarshaler{})
+ rt.StorePut(&NotAVeryGoodMarshaler{})
t.Error("expected panic")
}
+
+func BenchmarkRuntime_CreateRuntimeChargeGas_TracingDisabled(b *testing.B) {
+ var (
+ cst = cbor.NewCborStore(nil)
+ gch = newGasCharge("foo", 1000, 1000)
+ )
+
+ b.ResetTimer()
+
+ EnableGasTracing = false
+ noop := func() bool { return EnableGasTracing }
+ for n := 0; n < b.N; n++ {
+ // flip the value and access it to make sure
+ // the compiler doesn't optimize away
+ EnableGasTracing = true
+ _ = noop()
+ EnableGasTracing = false
+ _ = (&Runtime{cst: cst}).chargeGasInternal(gch, 0)
+ }
+}
diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go
index 41ed9c762..a7f5dab0c 100644
--- a/chain/vm/syscalls.go
+++ b/chain/vm/syscalls.go
@@ -7,6 +7,8 @@ import (
goruntime "runtime"
"sync"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
@@ -14,14 +16,14 @@ import (
mh "github.com/multiformats/go-multihash"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
)
@@ -190,12 +192,12 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
}
// use that to get the miner state
- var mas miner.State
- if err = ss.cst.Get(ss.ctx, act.Head, &mas); err != nil {
+ mas, err := miner.Load(adt.WrapStore(ss.ctx, ss.cst), act)
+ if err != nil {
return err
}
- info, err := mas.GetInfo(adt.WrapStore(ss.ctx, ss.cst))
+ info, err := mas.Info()
if err != nil {
return err
}
@@ -213,7 +215,7 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
return nil
}
-func (ss *syscallShim) VerifyPoSt(proof abi.WindowPoStVerifyInfo) error {
+func (ss *syscallShim) VerifyPoSt(proof proof.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
if err != nil {
return err
@@ -224,7 +226,7 @@ func (ss *syscallShim) VerifyPoSt(proof abi.WindowPoStVerifyInfo) error {
return nil
}
-func (ss *syscallShim) VerifySeal(info abi.SealVerifyInfo) error {
+func (ss *syscallShim) VerifySeal(info proof.SealVerifyInfo) error {
//_, span := trace.StartSpan(ctx, "ValidatePoRep")
//defer span.End()
@@ -264,7 +266,7 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres
var BatchSealVerifyParallelism = goruntime.NumCPU()
-func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
+func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) {
out := make(map[address.Address][]bool)
sema := make(chan struct{}, BatchSealVerifyParallelism)
@@ -276,7 +278,7 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerify
for i, s := range seals {
wg.Add(1)
- go func(ma address.Address, ix int, svi abi.SealVerifyInfo, res []bool) {
+ go func(ma address.Address, ix int, svi proof.SealVerifyInfo, res []bool) {
defer wg.Done()
sema <- struct{}{}
diff --git a/chain/vm/validation_test.go b/chain/vm/validation_test.go
deleted file mode 100644
index 880b33401..000000000
--- a/chain/vm/validation_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package vm_test
-
-import (
- "fmt"
- "reflect"
- "runtime"
- "strings"
- "testing"
-
- suites "github.com/filecoin-project/chain-validation/suites"
-
- factory "github.com/filecoin-project/lotus/chain/validation"
-)
-
-// TestSkipper contains a list of test cases skipped by the implementation.
-type TestSkipper struct {
- testSkips []suites.TestCase
-}
-
-// Skip return true if the sutire.TestCase should be skipped.
-func (ts *TestSkipper) Skip(test suites.TestCase) bool {
- for _, skip := range ts.testSkips {
- if reflect.ValueOf(skip).Pointer() == reflect.ValueOf(test).Pointer() {
- fmt.Printf("=== SKIP %v\n", runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name())
- return true
- }
- }
- return false
-}
-
-// TestSuiteSkips contains tests we wish to skip.
-var TestSuiteSkipper TestSkipper
-
-func init() {
- // initialize the test skipper with tests being skipped
- TestSuiteSkipper = TestSkipper{testSkips: []suites.TestCase{
- // tests to skip go here
- }}
-}
-
-func TestChainValidationMessageSuite(t *testing.T) {
- f := factory.NewFactories()
- for _, testCase := range suites.MessageTestCases() {
- testCase := testCase
- if TestSuiteSkipper.Skip(testCase) {
- continue
- }
- t.Run(caseName(testCase), func(t *testing.T) {
- testCase(t, f)
- })
- }
-}
-
-func TestChainValidationTipSetSuite(t *testing.T) {
- f := factory.NewFactories()
- for _, testCase := range suites.TipSetTestCases() {
- testCase := testCase
- if TestSuiteSkipper.Skip(testCase) {
- continue
- }
- t.Run(caseName(testCase), func(t *testing.T) {
- testCase(t, f)
- })
- }
-}
-
-func caseName(testCase suites.TestCase) string {
- fqName := runtime.FuncForPC(reflect.ValueOf(testCase).Pointer()).Name()
- toks := strings.Split(fqName, ".")
- return toks[len(toks)-1]
-}
diff --git a/chain/vm/vm.go b/chain/vm/vm.go
index f51cbff29..a4efccb29 100644
--- a/chain/vm/vm.go
+++ b/chain/vm/vm.go
@@ -5,12 +5,10 @@ import (
"context"
"fmt"
"reflect"
+ "sync/atomic"
"time"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
-
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
block "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
@@ -22,16 +20,21 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/account"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/blockstore"
+ bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/bufbstore"
)
@@ -39,27 +42,29 @@ var log = logging.Logger("vm")
var actorLog = logging.Logger("actors")
var gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
+// stat counters
+var (
+ StatSends uint64
+ StatApplied uint64
+)
+
// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`.
-func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, aerrors.ActorError) {
+func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, error) {
if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 {
return addr, nil
}
act, err := state.GetActor(addr)
if err != nil {
- return address.Undef, aerrors.Newf(exitcode.SysErrInvalidParameters, "failed to find actor: %s", addr)
+ return address.Undef, xerrors.Errorf("failed to find actor: %s", addr)
}
- if act.Code != builtin.AccountActorCodeID {
- return address.Undef, aerrors.Newf(exitcode.SysErrInvalidParameters, "address %s was not for an account actor", addr)
+ aast, err := account.Load(adt.WrapStore(context.TODO(), cst), act)
+ if err != nil {
+ return address.Undef, xerrors.Errorf("failed to get account actor state for %s: %w", addr, err)
}
- var aast account.State
- if err := cst.Get(context.TODO(), act.Head, &aast); err != nil {
- return address.Undef, aerrors.Absorb(err, exitcode.SysErrInvalidParameters, fmt.Sprintf("failed to get account actor state for %s", addr))
- }
-
- return aast.Address, nil
+ return aast.PubkeyAddress()
}
var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
@@ -114,7 +119,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres
Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks},
Atlas: vm.cst.Atlas,
}
- rt.sys = pricedSyscalls{
+ rt.Syscalls = pricedSyscalls{
under: vm.Syscalls(ctx, vm.cstate, rt.cst),
chargeGas: rt.chargeGasFunc(1),
pl: rt.pricelist,
@@ -126,7 +131,15 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres
rt.Abortf(exitcode.SysErrInvalidReceiver, "resolve msg.From address failed")
}
vmm.From = resF
- rt.vmsg = &vmm
+
+ if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version3 {
+ rt.Message = &vmm
+ } else {
+ resT, _ := rt.ResolveAddress(msg.To)
+ // may be set to undef if recipient doesn't exist yet
+ vmm.To = resT
+ rt.Message = &Message{msg: vmm}
+ }
return rt
}
@@ -140,6 +153,7 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message, origin
}
type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
+type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
type VM struct {
cstate *state.StateTree
@@ -147,9 +161,10 @@ type VM struct {
cst *cbor.BasicIpldStore
buf *bufbstore.BufferedBS
blockHeight abi.ChainEpoch
- inv *Invoker
+ areg *ActorRegistry
rand Rand
circSupplyCalc CircSupplyCalculator
+ ntwkVersion NtwkVersionGetter
baseFee abi.TokenAmount
Syscalls SyscallBuilder
@@ -162,10 +177,11 @@ type VMOpts struct {
Bstore bstore.Blockstore
Syscalls SyscallBuilder
CircSupplyCalc CircSupplyCalculator
+ NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter
BaseFee abi.TokenAmount
}
-func NewVM(opts *VMOpts) (*VM, error) {
+func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
buf := bufbstore.NewBufferedBstore(opts.Bstore)
cst := cbor.NewCborStore(buf)
state, err := state.LoadStateTree(cst, opts.StateBase)
@@ -179,9 +195,10 @@ func NewVM(opts *VMOpts) (*VM, error) {
cst: cst,
buf: buf,
blockHeight: opts.Epoch,
- inv: NewInvoker(),
+ areg: NewActorRegistry(),
rand: opts.Rand, // TODO: Probably should be a syscall
circSupplyCalc: opts.CircSupplyCalc,
+ ntwkVersion: opts.NtwkVersion,
Syscalls: opts.Syscalls,
baseFee: opts.BaseFee,
}, nil
@@ -195,15 +212,16 @@ type Rand interface {
type ApplyRet struct {
types.MessageReceipt
ActorErr aerrors.ActorError
- Penalty types.BigInt
- MinerTip types.BigInt
ExecutionTrace types.ExecutionTrace
Duration time.Duration
+ GasCosts GasOutputs
}
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) {
+ defer atomic.AddUint64(&StatSends, 1)
+
st := vm.cstate
origin := msg.From
@@ -218,14 +236,21 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
}
rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac)
- rt.lastGasChargeTime = start
+ if EnableGasTracing {
+ rt.lastGasChargeTime = start
+ if parent != nil {
+ rt.lastGasChargeTime = parent.lastGasChargeTime
+ rt.lastGasCharge = parent.lastGasCharge
+ defer func() {
+ parent.lastGasChargeTime = rt.lastGasChargeTime
+ parent.lastGasCharge = rt.lastGasCharge
+ }()
+ }
+ }
+
if parent != nil {
- rt.lastGasChargeTime = parent.lastGasChargeTime
- rt.lastGasCharge = parent.lastGasCharge
defer func() {
parent.gasUsed = rt.gasUsed
- parent.lastGasChargeTime = rt.lastGasChargeTime
- parent.lastGasCharge = rt.lastGasCharge
}()
}
if gasCharge != nil {
@@ -240,11 +265,24 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
toActor, err := st.GetActor(msg.To)
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
- a, err := TryCreateAccountActor(rt, msg.To)
+ a, aid, err := TryCreateAccountActor(rt, msg.To)
if err != nil {
return nil, aerrors.Wrapf(err, "could not create account")
}
toActor = a
+ if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version3 {
+ // Leave the rt.Message as is
+ } else {
+ nmsg := Message{
+ msg: types.Message{
+ To: aid,
+ From: rt.Message.Caller(),
+ Value: rt.Message.ValueReceived(),
+ },
+ }
+
+ rt.Message = &nmsg
+ }
} else {
return nil, aerrors.Escalate(err, "getting actor")
}
@@ -312,6 +350,7 @@ func checkMessage(msg *types.Message) error {
func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
start := build.Clock.Now()
+ defer atomic.AddUint64(&StatApplied, 1)
ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start)
rt.finilizeGasTracing()
return &ApplyRet{
@@ -322,8 +361,7 @@ func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*Ap
},
ActorErr: actorErr,
ExecutionTrace: rt.executionTrace,
- Penalty: types.NewInt(0),
- MinerTip: types.NewInt(0),
+ GasCosts: GasOutputs{},
Duration: time.Since(start),
}, actorErr
}
@@ -332,6 +370,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
start := build.Clock.Now()
ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage")
defer span.End()
+ defer atomic.AddUint64(&StatApplied, 1)
msg := cmsg.VMMessage()
if span.IsRecordingEvents() {
span.AddAttributes(
@@ -351,14 +390,15 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
msgGasCost := msgGas.Total()
// this should never happen, but is currently still exercised by some tests
if msgGasCost > msg.GasLimit {
+ gasOutputs := ZeroGasOutputs()
+ gasOutputs.MinerPenalty = types.BigMul(vm.baseFee, abi.NewTokenAmount(msgGasCost))
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
ExitCode: exitcode.SysErrOutOfGas,
GasUsed: 0,
},
- Penalty: types.BigMul(vm.baseFee, abi.NewTokenAmount(msgGasCost)),
+ GasCosts: gasOutputs,
Duration: time.Since(start),
- MinerTip: big.Zero(),
}, nil
}
@@ -369,35 +409,39 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
// this should never happen, but is currently still exercised by some tests
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
+ gasOutputs := ZeroGasOutputs()
+ gasOutputs.MinerPenalty = minerPenaltyAmount
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
ExitCode: exitcode.SysErrSenderInvalid,
GasUsed: 0,
},
ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "actor not found: %s", msg.From),
- Penalty: minerPenaltyAmount,
+ GasCosts: gasOutputs,
Duration: time.Since(start),
- MinerTip: big.Zero(),
}, nil
}
return nil, xerrors.Errorf("failed to look up from actor: %w", err)
}
// this should never happen, but is currently still exercised by some tests
- if !fromActor.Code.Equals(builtin.AccountActorCodeID) {
+ if !builtin.IsAccountActor(fromActor.Code) {
+ gasOutputs := ZeroGasOutputs()
+ gasOutputs.MinerPenalty = minerPenaltyAmount
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
ExitCode: exitcode.SysErrSenderInvalid,
GasUsed: 0,
},
ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "send from not account actor: %s", fromActor.Code),
- Penalty: minerPenaltyAmount,
+ GasCosts: gasOutputs,
Duration: time.Since(start),
- MinerTip: big.Zero(),
}, nil
}
if msg.Nonce != fromActor.Nonce {
+ gasOutputs := ZeroGasOutputs()
+ gasOutputs.MinerPenalty = minerPenaltyAmount
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
ExitCode: exitcode.SysErrSenderStateInvalid,
@@ -405,14 +449,16 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
},
ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid,
"actor nonce invalid: msg:%d != state:%d", msg.Nonce, fromActor.Nonce),
- Penalty: minerPenaltyAmount,
+
+ GasCosts: gasOutputs,
Duration: time.Since(start),
- MinerTip: big.Zero(),
}, nil
}
gascost := types.BigMul(types.NewInt(uint64(msg.GasLimit)), msg.GasFeeCap)
if fromActor.Balance.LessThan(gascost) {
+ gasOutputs := ZeroGasOutputs()
+ gasOutputs.MinerPenalty = minerPenaltyAmount
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
ExitCode: exitcode.SysErrSenderStateInvalid,
@@ -420,9 +466,8 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
},
ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid,
"actor balance less than needed: %s < %s", types.FIL(fromActor.Balance), types.FIL(gascost)),
- Penalty: minerPenaltyAmount,
+ GasCosts: gasOutputs,
Duration: time.Since(start),
- MinerTip: big.Zero(),
}, nil
}
@@ -489,7 +534,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
return nil, xerrors.Errorf("failed to burn base fee: %w", err)
}
- if err := vm.transferFromGasHolder(builtin.RewardActorAddr, gasHolder, gasOutputs.MinerTip); err != nil {
+ if err := vm.transferFromGasHolder(reward.Address, gasHolder, gasOutputs.MinerTip); err != nil {
return nil, xerrors.Errorf("failed to give miner gas reward: %w", err)
}
@@ -515,8 +560,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
},
ActorErr: actorErr,
ExecutionTrace: rt.executionTrace,
- Penalty: gasOutputs.MinerPenalty,
- MinerTip: gasOutputs.MinerTip,
+ GasCosts: gasOutputs,
Duration: time.Since(start),
}, nil
}
@@ -542,7 +586,7 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
}
- if err := Copy(from, to, root); err != nil {
+ if err := Copy(ctx, from, to, root); err != nil {
return cid.Undef, xerrors.Errorf("copying tree: %w", err)
}
@@ -596,9 +640,18 @@ func linksForObj(blk block.Block, cb func(cid.Cid)) error {
}
}
-func Copy(from, to blockstore.Blockstore, root cid.Cid) error {
+func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error {
+ ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint
+ defer span.End()
+
+ var numBlocks int
+ var totalCopySize int
+
var batch []block.Block
batchCp := func(blk block.Block) error {
+ numBlocks++
+ totalCopySize += len(blk.RawData())
+
batch = append(batch, blk)
if len(batch) > 100 {
if err := to.PutMany(batch); err != nil {
@@ -619,6 +672,11 @@ func Copy(from, to blockstore.Blockstore, root cid.Cid) error {
}
}
+ span.AddAttributes(
+ trace.Int64Attribute("numBlocks", int64(numBlocks)),
+ trace.Int64Attribute("copySize", int64(totalCopySize)),
+ )
+
return nil
}
@@ -694,9 +752,9 @@ func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params
defer span.End()
if span.IsRecordingEvents() {
span.AddAttributes(
- trace.StringAttribute("to", rt.Message().Receiver().String()),
+ trace.StringAttribute("to", rt.Receiver().String()),
trace.Int64Attribute("method", int64(method)),
- trace.StringAttribute("value", rt.Message().ValueReceived().String()),
+ trace.StringAttribute("value", rt.ValueReceived().String()),
)
}
@@ -705,15 +763,19 @@ func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params
defer func() {
rt.ctx = oldCtx
}()
- ret, err := vm.inv.Invoke(act.Code, rt, method, params)
+ ret, err := vm.areg.Invoke(act.Code, rt, method, params)
if err != nil {
return nil, err
}
return ret, nil
}
-func (vm *VM) SetInvoker(i *Invoker) {
- vm.inv = i
+func (vm *VM) SetInvoker(i *ActorRegistry) {
+ vm.areg = i
+}
+
+func (vm *VM) GetNtwkVersion(ctx context.Context, ce abi.ChainEpoch) network.Version {
+ return vm.ntwkVersion(ctx, ce)
}
func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
diff --git a/chain/wallet/wallet.go b/chain/wallet/wallet.go
index bcc8d07b1..b4c846d37 100644
--- a/chain/wallet/wallet.go
+++ b/chain/wallet/wallet.go
@@ -6,7 +6,7 @@ import (
"strings"
"sync"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
@@ -101,7 +101,7 @@ func (w *LocalWallet) findKey(addr address.Address) (*Key, error) {
return nil, nil
}
- ki, err := w.keystore.Get(KNamePrefix + addr.String())
+ ki, err := w.tryFind(addr)
if err != nil {
if xerrors.Is(err, types.ErrKeyInfoNotFound) {
return nil, nil
@@ -116,7 +116,43 @@ func (w *LocalWallet) findKey(addr address.Address) (*Key, error) {
return k, nil
}
-func (w *LocalWallet) WalletExport(ctx context.Context, addr address.Address) (*types.KeyInfo, error) {
+func (w *LocalWallet) tryFind(addr address.Address) (types.KeyInfo, error) {
+
+ ki, err := w.keystore.Get(KNamePrefix + addr.String())
+ if err == nil {
+ return ki, err
+ }
+
+ if !xerrors.Is(err, types.ErrKeyInfoNotFound) {
+ return types.KeyInfo{}, err
+ }
+
+ // We got an ErrKeyInfoNotFound error
+ // Try again, this time with the testnet prefix
+
+ aChars := []rune(addr.String())
+ prefixRunes := []rune(address.TestnetPrefix)
+ if len(prefixRunes) != 1 {
+ return types.KeyInfo{}, xerrors.Errorf("unexpected prefix length: %d", len(prefixRunes))
+ }
+
+ aChars[0] = prefixRunes[0]
+ ki, err = w.keystore.Get(KNamePrefix + string(aChars))
+ if err != nil {
+ return types.KeyInfo{}, err
+ }
+
+ // We found it with the testnet prefix
+ // Add this KeyInfo with the mainnet prefix address string
+ err = w.keystore.Put(KNamePrefix+addr.String(), ki)
+ if err != nil {
+ return types.KeyInfo{}, err
+ }
+
+ return ki, nil
+}
+
+func (w *LocalWallet) Export(ctx context.Context, addr address.Address) (*types.KeyInfo, error) {
k, err := w.findKey(addr)
if err != nil {
return nil, xerrors.Errorf("failed to find key to export: %w", err)
@@ -149,6 +185,7 @@ func (w *LocalWallet) WalletList(ctx context.Context) ([]address.Address, error)
sort.Strings(all)
+ seen := map[address.Address]struct{}{}
out := make([]address.Address, 0, len(all))
for _, a := range all {
if strings.HasPrefix(a, KNamePrefix) {
@@ -157,10 +194,19 @@ func (w *LocalWallet) WalletList(ctx context.Context) ([]address.Address, error)
if err != nil {
return nil, xerrors.Errorf("converting name to address: %w", err)
}
+ if _, ok := seen[addr]; ok {
+ continue // got duplicate with a different prefix
+ }
+ seen[addr] = struct{}{}
+
out = append(out, addr)
}
}
+ sort.Slice(out, func(i, j int) bool {
+ return out[i].String() < out[j].String()
+ })
+
return out, nil
}
diff --git a/cli/backup.go b/cli/backup.go
new file mode 100644
index 000000000..c748e47c4
--- /dev/null
+++ b/cli/backup.go
@@ -0,0 +1,125 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ logging "github.com/ipfs/go-log/v2"
+ "github.com/mitchellh/go-homedir"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-jsonrpc"
+
+ "github.com/filecoin-project/lotus/lib/backupds"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+type BackupAPI interface {
+ CreateBackup(ctx context.Context, fpath string) error
+}
+
+type BackupApiFn func(ctx *cli.Context) (BackupAPI, jsonrpc.ClientCloser, error)
+
+func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Command {
+ var offlineBackup = func(cctx *cli.Context) error {
+ logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
+
+ repoPath := cctx.String(repoFlag)
+ r, err := repo.NewFS(repoPath)
+ if err != nil {
+ return err
+ }
+
+ ok, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return xerrors.Errorf("repo at '%s' is not initialized", cctx.String(repoFlag))
+ }
+
+ lr, err := r.LockRO(rt)
+ if err != nil {
+ return xerrors.Errorf("locking repo: %w", err)
+ }
+ defer lr.Close() // nolint:errcheck
+
+ mds, err := lr.Datastore("/metadata")
+ if err != nil {
+ return xerrors.Errorf("getting metadata datastore: %w", err)
+ }
+
+ bds := backupds.Wrap(mds)
+
+ fpath, err := homedir.Expand(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("expanding file path: %w", err)
+ }
+
+ out, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return xerrors.Errorf("opening backup file %s: %w", fpath, err)
+ }
+
+ if err := bds.Backup(out); err != nil {
+ if cerr := out.Close(); cerr != nil {
+ log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err)
+ }
+ return xerrors.Errorf("backup error: %w", err)
+ }
+
+ if err := out.Close(); err != nil {
+ return xerrors.Errorf("closing backup file: %w", err)
+ }
+
+ return nil
+ }
+
+ var onlineBackup = func(cctx *cli.Context) error {
+ api, closer, err := getApi(cctx)
+ if err != nil {
+ return xerrors.Errorf("getting api: %w (if the node isn't running you can use the --offline flag)", err)
+ }
+ defer closer()
+
+ err = api.CreateBackup(ReqContext(cctx), cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Success")
+
+ return nil
+ }
+
+ return &cli.Command{
+ Name: "backup",
+ Usage: "Create node metadata backup",
+ Description: `The backup command writes a copy of node metadata under the specified path
+
+Online backups:
+For security reasons, the daemon must be have LOTUS_BACKUP_BASE_PATH env var set
+to a path where backup files are supposed to be saved, and the path specified in
+this command must be within this base path`,
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "offline",
+ Usage: "create backup without the node running",
+ },
+ },
+ ArgsUsage: "[backup file path]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("expected 1 argument")
+ }
+
+ if cctx.Bool("offline") {
+ return offlineBackup(cctx)
+ }
+
+ return onlineBackup(cctx)
+ },
+ }
+}
diff --git a/cli/chain.go b/cli/chain.go
index 1d203639a..763752f23 100644
--- a/cli/chain.go
+++ b/cli/chain.go
@@ -8,14 +8,15 @@ import (
"os"
"os/exec"
"path"
+ "sort"
"strconv"
"strings"
"time"
"github.com/filecoin-project/go-address"
cborutil "github.com/filecoin-project/go-cbor-util"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/account"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
@@ -27,9 +28,10 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
- "github.com/filecoin-project/lotus/api"
+ lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/stmgr"
types "github.com/filecoin-project/lotus/chain/types"
)
@@ -40,6 +42,7 @@ var chainCmd = &cli.Command{
chainHeadCmd,
chainGetBlock,
chainReadObjCmd,
+ chainDeleteObjCmd,
chainStatObjCmd,
chainGetMsgCmd,
chainSetHeadCmd,
@@ -49,6 +52,7 @@ var chainCmd = &cli.Command{
chainExportCmd,
slashConsensusFault,
chainGasPriceCmd,
+ chainInspectUsage,
},
}
@@ -158,7 +162,7 @@ var chainGetBlock = &cli.Command{
},
}
-func apiMsgCids(in []api.Message) []cid.Cid {
+func apiMsgCids(in []lapi.Message) []cid.Cid {
out := make([]cid.Cid, len(in))
for k, v := range in {
out[k] = v.Cid
@@ -193,6 +197,43 @@ var chainReadObjCmd = &cli.Command{
},
}
+var chainDeleteObjCmd = &cli.Command{
+ Name: "delete-obj",
+ Usage: "Delete an object from the chain blockstore",
+ Description: "WARNING: Removing wrong objects from the chain blockstore may lead to sync issues",
+ ArgsUsage: "[objectCid]",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ c, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("failed to parse cid input: %s", err)
+ }
+
+ if !cctx.Bool("really-do-it") {
+ return xerrors.Errorf("pass the --really-do-it flag to proceed")
+ }
+
+ err = api.ChainDeleteObj(ctx, c)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Obj %s deleted\n", c.String())
+ return nil
+ },
+}
+
var chainStatObjCmd = &cli.Command{
Name: "stat-obj",
Usage: "Collect size and ipld link counts for objs",
@@ -337,23 +378,146 @@ var chainSetHeadCmd = &cli.Command{
},
}
-func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
- var headers []*types.BlockHeader
- for _, c := range vals {
- blkc, err := cid.Decode(c)
+var chainInspectUsage = &cli.Command{
+ Name: "inspect-usage",
+ Usage: "Inspect block space usage of a given tipset",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset to view block space usage of",
+ Value: "@head",
+ },
+ &cli.IntFlag{
+ Name: "length",
+ Usage: "length of chain to inspect block space usage for",
+ Value: 1,
+ },
+ &cli.IntFlag{
+ Name: "num-results",
+ Usage: "number of results to print per category",
+ Value: 10,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
- return nil, err
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ ts, err := LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
}
- bh, err := api.ChainGetBlock(ctx, blkc)
- if err != nil {
- return nil, err
+ cur := ts
+ var msgs []lapi.Message
+ for i := 0; i < cctx.Int("length"); i++ {
+ pmsgs, err := api.ChainGetParentMessages(ctx, cur.Blocks()[0].Cid())
+ if err != nil {
+ return err
+ }
+
+ msgs = append(msgs, pmsgs...)
+
+ next, err := api.ChainGetTipSet(ctx, cur.Parents())
+ if err != nil {
+ return err
+ }
+
+ cur = next
}
- headers = append(headers, bh)
- }
+ codeCache := make(map[address.Address]cid.Cid)
- return types.NewTipSet(headers)
+ lookupActorCode := func(a address.Address) (cid.Cid, error) {
+ c, ok := codeCache[a]
+ if ok {
+ return c, nil
+ }
+
+ act, err := api.StateGetActor(ctx, a, ts.Key())
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ codeCache[a] = act.Code
+ return act.Code, nil
+ }
+
+ bySender := make(map[string]int64)
+ byDest := make(map[string]int64)
+ byMethod := make(map[string]int64)
+ bySenderC := make(map[string]int64)
+ byDestC := make(map[string]int64)
+ byMethodC := make(map[string]int64)
+
+ var sum int64
+ for _, m := range msgs {
+ bySender[m.Message.From.String()] += m.Message.GasLimit
+ bySenderC[m.Message.From.String()]++
+ byDest[m.Message.To.String()] += m.Message.GasLimit
+ byDestC[m.Message.To.String()]++
+ sum += m.Message.GasLimit
+
+ code, err := lookupActorCode(m.Message.To)
+ if err != nil {
+ return err
+ }
+
+ mm := stmgr.MethodsMap[code][m.Message.Method]
+
+ byMethod[mm.Name] += m.Message.GasLimit
+ byMethodC[mm.Name]++
+ }
+
+ type keyGasPair struct {
+ Key string
+ Gas int64
+ }
+
+ mapToSortedKvs := func(m map[string]int64) []keyGasPair {
+ var vals []keyGasPair
+ for k, v := range m {
+ vals = append(vals, keyGasPair{
+ Key: k,
+ Gas: v,
+ })
+ }
+ sort.Slice(vals, func(i, j int) bool {
+ return vals[i].Gas > vals[j].Gas
+ })
+ return vals
+ }
+
+ senderVals := mapToSortedKvs(bySender)
+ destVals := mapToSortedKvs(byDest)
+ methodVals := mapToSortedKvs(byMethod)
+
+ numRes := cctx.Int("num-results")
+
+ fmt.Printf("Total Gas Limit: %d\n", sum)
+ fmt.Printf("By Sender:\n")
+ for i := 0; i < numRes && i < len(senderVals); i++ {
+ sv := senderVals[i]
+ fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key])
+ }
+ fmt.Println()
+ fmt.Printf("By Receiver:\n")
+ for i := 0; i < numRes && i < len(destVals); i++ {
+ sv := destVals[i]
+ fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key])
+ }
+ fmt.Println()
+ fmt.Printf("By Method:\n")
+ for i := 0; i < numRes && i < len(methodVals); i++ {
+ sv := methodVals[i]
+ fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key])
+ }
+
+ return nil
+ },
}
var chainListCmd = &cli.Command{
@@ -629,7 +793,7 @@ var chainGetCmd = &cli.Command{
type apiIpldStore struct {
ctx context.Context
- api api.FullNode
+ api lapi.FullNode
}
func (ht *apiIpldStore) Context() context.Context {
@@ -657,7 +821,7 @@ func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error)
panic("No mutations allowed")
}
-func handleAmt(ctx context.Context, api api.FullNode, r cid.Cid) error {
+func handleAmt(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
s := &apiIpldStore{ctx, api}
mp, err := adt.AsArray(s, r)
if err != nil {
@@ -670,7 +834,7 @@ func handleAmt(ctx context.Context, api api.FullNode, r cid.Cid) error {
})
}
-func handleHamtEpoch(ctx context.Context, api api.FullNode, r cid.Cid) error {
+func handleHamtEpoch(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
s := &apiIpldStore{ctx, api}
mp, err := adt.AsMap(s, r)
if err != nil {
@@ -678,7 +842,7 @@ func handleHamtEpoch(ctx context.Context, api api.FullNode, r cid.Cid) error {
}
return mp.ForEach(nil, func(key string) error {
- ik, err := adt.ParseIntKey(key)
+ ik, err := abi.ParseIntKey(key)
if err != nil {
return err
}
@@ -688,7 +852,7 @@ func handleHamtEpoch(ctx context.Context, api api.FullNode, r cid.Cid) error {
})
}
-func handleHamtAddress(ctx context.Context, api api.FullNode, r cid.Cid) error {
+func handleHamtAddress(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
s := &apiIpldStore{ctx, api}
mp, err := adt.AsMap(s, r)
if err != nil {
@@ -863,6 +1027,9 @@ var chainExportCmd = &cli.Command{
Name: "recent-stateroots",
Usage: "specify the number of recent state roots to include in the export",
},
+ &cli.BoolFlag{
+ Name: "skip-old-msgs",
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@@ -897,18 +1064,31 @@ var chainExportCmd = &cli.Command{
return err
}
- stream, err := api.ChainExport(ctx, rsrs, ts.Key())
+ skipold := cctx.Bool("skip-old-msgs")
+
+ if rsrs == 0 && skipold {
+ return fmt.Errorf("must pass recent stateroots along with skip-old-msgs")
+ }
+
+ stream, err := api.ChainExport(ctx, rsrs, skipold, ts.Key())
if err != nil {
return err
}
+ var last bool
for b := range stream {
+ last = len(b) == 0
+
_, err := fi.Write(b)
if err != nil {
return err
}
}
+ if !last {
+ return xerrors.Errorf("incomplete export (remote connection lost?)")
+ }
+
return nil
},
}
diff --git a/cli/client.go b/cli/client.go
index 17b24ba6a..34d151ace 100644
--- a/cli/client.go
+++ b/cli/client.go
@@ -17,6 +17,7 @@ import (
"github.com/fatih/color"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer"
@@ -27,13 +28,13 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-multistore"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/tablewriter"
)
@@ -474,6 +475,7 @@ func interactiveDeal(cctx *cli.Context) error {
var ask storagemarket.StorageAsk
var epochPrice big.Int
var epochs abi.ChainEpoch
+ var verified bool
var a address.Address
if from := cctx.String("from"); from != "" {
@@ -527,9 +529,14 @@ func interactiveDeal(cctx *cli.Context) error {
continue
}
+ if days < int(build.MinDealDuration/builtin.EpochsInDay) {
+ printErr(xerrors.Errorf("minimum duration is %d days", int(build.MinDealDuration/builtin.EpochsInDay)))
+ continue
+ }
+
state = "miner"
case "miner":
- fmt.Print("Miner Address (t0..): ")
+ fmt.Print("Miner Address (f0..): ")
var maddrStr string
_, err := fmt.Scan(&maddrStr)
@@ -562,9 +569,56 @@ func interactiveDeal(cctx *cli.Context) error {
continue
}
- ask = *a.Ask
+ ask = *a
// TODO: run more validation
+ state = "verified"
+ case "verified":
+ ts, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ dcap, err := api.StateVerifiedClientStatus(ctx, a, ts.Key())
+ if err != nil {
+ return err
+ }
+
+ if dcap == nil {
+ state = "confirm"
+ continue
+ }
+
+ color.Blue(".. checking verified deal eligibility\n")
+ ds, err := api.ClientDealSize(ctx, data)
+ if err != nil {
+ return err
+ }
+
+ if dcap.Uint64() < uint64(ds.PieceSize) {
+ color.Yellow(".. not enough DataCap available for a verified deal\n")
+ state = "confirm"
+ continue
+ }
+
+ fmt.Print("\nMake this a verified deal? (yes/no): ")
+
+ var yn string
+ _, err = fmt.Scan(&yn)
+ if err != nil {
+ return err
+ }
+
+ switch yn {
+ case "yes":
+ verified = true
+ case "no":
+ verified = false
+ default:
+ fmt.Println("Type in full 'yes' or 'no'")
+ continue
+ }
+
state = "confirm"
case "confirm":
fromBal, err := api.WalletBalance(ctx, a)
@@ -583,10 +637,15 @@ func interactiveDeal(cctx *cli.Context) error {
epochs = abi.ChainEpoch(dur / (time.Duration(build.BlockDelaySecs) * time.Second))
// TODO: do some more or epochs math (round to miner PP, deal start buffer)
+ pricePerGib := ask.Price
+ if verified {
+ pricePerGib = ask.VerifiedPrice
+ }
+
gib := types.NewInt(1 << 30)
// TODO: price is based on PaddedPieceSize, right?
- epochPrice = types.BigDiv(types.BigMul(ask.Price, types.NewInt(uint64(ds.PieceSize))), gib)
+ epochPrice = types.BigDiv(types.BigMul(pricePerGib, types.NewInt(uint64(ds.PieceSize))), gib)
totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs)))
fmt.Printf("-----\n")
@@ -596,6 +655,7 @@ func interactiveDeal(cctx *cli.Context) error {
fmt.Printf("Piece size: %s (Payload size: %s)\n", units.BytesSize(float64(ds.PieceSize)), units.BytesSize(float64(ds.PayloadSize)))
fmt.Printf("Duration: %s\n", dur)
fmt.Printf("Total price: ~%s (%s per epoch)\n", types.FIL(totalPrice), types.FIL(epochPrice))
+ fmt.Printf("Verified: %v\n", verified)
state = "accept"
case "accept":
@@ -630,7 +690,7 @@ func interactiveDeal(cctx *cli.Context) error {
MinBlocksDuration: uint64(epochs),
DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")),
FastRetrieval: cctx.Bool("fast-retrieval"),
- VerifiedDeal: false, // TODO: Allow setting
+ VerifiedDeal: verified,
})
if err != nil {
return err
@@ -944,15 +1004,15 @@ var clientQueryAskCmd = &cli.Command{
}
fmt.Printf("Ask: %s\n", maddr)
- fmt.Printf("Price per GiB: %s\n", types.FIL(ask.Ask.Price))
- fmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.Ask.VerifiedPrice))
- fmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.Ask.MaxPieceSize))))
+ fmt.Printf("Price per GiB: %s\n", types.FIL(ask.Price))
+ fmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.VerifiedPrice))
+ fmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))))
size := cctx.Int64("size")
if size == 0 {
return nil
}
- perEpoch := types.BigDiv(types.BigMul(ask.Ask.Price, types.NewInt(uint64(size))), types.NewInt(1<<30))
+ perEpoch := types.BigDiv(types.BigMul(ask.Price, types.NewInt(uint64(size))), types.NewInt(1<<30))
fmt.Printf("Price per Block: %s\n", types.FIL(perEpoch))
duration := cctx.Int64("duration")
@@ -979,6 +1039,10 @@ var clientListDeals = &cli.Command{
Usage: "use color in display output",
Value: true,
},
+ &cli.BoolFlag{
+ Name: "show-failed",
+ Usage: "show failed/failing deals",
+ },
&cli.BoolFlag{
Name: "watch",
Usage: "watch deal updates in real-time, rather than a one time list",
@@ -995,6 +1059,7 @@ var clientListDeals = &cli.Command{
verbose := cctx.Bool("verbose")
color := cctx.Bool("color")
watch := cctx.Bool("watch")
+ showFailed := cctx.Bool("show-failed")
localDeals, err := api.ClientListDeals(ctx)
if err != nil {
@@ -1011,7 +1076,7 @@ var clientListDeals = &cli.Command{
tm.Clear()
tm.MoveCursor(1, 1)
- err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color)
+ err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color, showFailed)
if err != nil {
return err
}
@@ -1037,19 +1102,15 @@ var clientListDeals = &cli.Command{
}
}
- return outputStorageDeals(ctx, os.Stdout, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"))
+ return outputStorageDeals(ctx, os.Stdout, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"), showFailed)
},
}
func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet, v api.DealInfo) deal {
if v.DealID == 0 {
return deal{
- LocalDeal: v,
- OnChainDealState: market.DealState{
- SectorStartEpoch: -1,
- LastUpdatedEpoch: -1,
- SlashEpoch: -1,
- },
+ LocalDeal: v,
+ OnChainDealState: *market.EmptyDealState(),
}
}
@@ -1064,7 +1125,7 @@ func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet
}
}
-func outputStorageDeals(ctx context.Context, out io.Writer, full api.FullNode, localDeals []api.DealInfo, verbose bool, color bool) error {
+func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode, localDeals []lapi.DealInfo, verbose bool, color bool, showFailed bool) error {
sort.Slice(localDeals, func(i, j int) bool {
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
})
@@ -1076,12 +1137,14 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full api.FullNode, l
var deals []deal
for _, localDeal := range localDeals {
- deals = append(deals, dealFromDealInfo(ctx, full, head, localDeal))
+ if showFailed || localDeal.State != storagemarket.StorageDealError {
+ deals = append(deals, dealFromDealInfo(ctx, full, head, localDeal))
+ }
}
if verbose {
w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0)
- fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tMessage\n")
+ fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tVerified\tMessage\n")
for _, d := range deals {
onChain := "N"
if d.OnChainDealState.SectorStartEpoch != -1 {
@@ -1094,7 +1157,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full api.FullNode, l
}
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
- fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message)
+ fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%v\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Verified, d.LocalDeal.Message)
}
return w.Flush()
}
@@ -1109,6 +1172,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full api.FullNode, l
tablewriter.Col("Size"),
tablewriter.Col("Price"),
tablewriter.Col("Duration"),
+ tablewriter.Col("Verified"),
tablewriter.NewLineCol("Message"))
for _, d := range deals {
@@ -1138,6 +1202,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full api.FullNode, l
"PieceCID": piece,
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
"Price": price,
+ "Verified": d.LocalDeal.Verified,
"Duration": d.LocalDeal.Duration,
"Message": d.LocalDeal.Message,
})
@@ -1420,7 +1485,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
otherPartyColumn: otherParty,
"Root Cid": rootCid,
"Initiated?": initiated,
- "Transferred": channel.Transferred,
+ "Transferred": units.BytesSize(float64(channel.Transferred)),
"Voucher": voucher,
"Message": channel.Message,
}
diff --git a/cli/cmd.go b/cli/cmd.go
index c6617dcfd..edcb69adc 100644
--- a/cli/cmd.go
+++ b/cli/cmd.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
+ "net/url"
"os"
"os/signal"
"strings"
@@ -47,14 +48,44 @@ func NewCliError(s string) error {
type ApiConnector func() api.FullNode
type APIInfo struct {
- Addr multiaddr.Multiaddr
+ Addr string
Token []byte
}
func (a APIInfo) DialArgs() (string, error) {
- _, addr, err := manet.DialArgs(a.Addr)
+ ma, err := multiaddr.NewMultiaddr(a.Addr)
+ if err == nil {
+ _, addr, err := manet.DialArgs(ma)
+ if err != nil {
+ return "", err
+ }
- return "ws://" + addr + "/rpc/v0", err
+ return "ws://" + addr + "/rpc/v0", nil
+ }
+
+ _, err = url.Parse(a.Addr)
+ if err != nil {
+ return "", err
+ }
+ return a.Addr + "/rpc/v0", nil
+}
+
+func (a APIInfo) Host() (string, error) {
+ ma, err := multiaddr.NewMultiaddr(a.Addr)
+ if err == nil {
+ _, addr, err := manet.DialArgs(ma)
+ if err != nil {
+ return "", err
+ }
+
+ return addr, nil
+ }
+
+ spec, err := url.Parse(a.Addr)
+ if err != nil {
+ return "", err
+ }
+ return spec.Host, nil
}
func (a APIInfo) AuthHeader() http.Header {
@@ -72,11 +103,11 @@ func (a APIInfo) AuthHeader() http.Header {
func flagForAPI(t repo.RepoType) string {
switch t {
case repo.FullNode:
- return "api"
+ return "api-url"
case repo.StorageMiner:
- return "miner-api"
+ return "miner-api-url"
case repo.Worker:
- return "worker-api"
+ return "worker-api-url"
default:
panic(fmt.Sprintf("Unknown repo type: %v", t))
}
@@ -130,11 +161,7 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
strma := ctx.String(apiFlag)
strma = strings.TrimSpace(strma)
- apima, err := multiaddr.NewMultiaddr(strma)
- if err != nil {
- return APIInfo{}, err
- }
- return APIInfo{Addr: apima}, nil
+ return APIInfo{Addr: strma}, nil
}
envKey := envForRepo(t)
@@ -152,12 +179,8 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
if len(sp) != 2 {
log.Warnf("invalid env(%s) value, missing token or address", envKey)
} else {
- ma, err := multiaddr.NewMultiaddr(sp[1])
- if err != nil {
- return APIInfo{}, xerrors.Errorf("could not parse multiaddr from env(%s): %w", envKey, err)
- }
return APIInfo{
- Addr: ma,
+ Addr: sp[1],
Token: []byte(sp[0]),
}, nil
}
@@ -186,7 +209,7 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
}
return APIInfo{
- Addr: ma,
+ Addr: ma.String(),
Token: token,
}, nil
}
@@ -216,6 +239,13 @@ func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) {
log.Errorf("repoType type does not match the type of repo.RepoType")
}
+ if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
+ return tn.(api.StorageMiner), func() {}, nil
+ }
+ if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
+ return tn.(api.FullNode), func() {}, nil
+ }
+
addr, headers, err := GetRawAPI(ctx, t)
if err != nil {
return nil, nil, err
@@ -225,6 +255,10 @@ func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) {
}
func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) {
+ if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
+ return tn.(api.FullNode), func() {}, nil
+ }
+
addr, headers, err := GetRawAPI(ctx, repo.FullNode)
if err != nil {
return nil, nil, err
@@ -234,6 +268,10 @@ func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error
}
func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
+ if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
+ return tn.(api.StorageMiner), func() {}, nil
+ }
+
addr, headers, err := GetRawAPI(ctx, repo.StorageMiner)
if err != nil {
return nil, nil, err
diff --git a/cli/log.go b/cli/log.go
index b551b5645..ed624eb8d 100644
--- a/cli/log.go
+++ b/cli/log.go
@@ -49,7 +49,7 @@ var logSetLevel = &cli.Command{
The system flag can be specified multiple times.
- eg) log set-level --system chain --system blocksync debug
+ eg) log set-level --system chain --system chainxchg debug
Available Levels:
debug
diff --git a/cli/mpool.go b/cli/mpool.go
index 587246b87..8f3e937b6 100644
--- a/cli/mpool.go
+++ b/cli/mpool.go
@@ -3,15 +3,21 @@ package cli
import (
"encoding/json"
"fmt"
+ stdbig "math/big"
"sort"
"strconv"
+ cid "github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -26,6 +32,7 @@ var mpoolCmd = &cli.Command{
mpoolReplaceCmd,
mpoolFindCmd,
mpoolConfig,
+ mpoolGasPerfCmd,
},
}
@@ -37,6 +44,10 @@ var mpoolPending = &cli.Command{
Name: "local",
Usage: "print pending messages for addresses in local wallet only",
},
+ &cli.BoolFlag{
+ Name: "cids",
+ Usage: "only print cids of messages in output",
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@@ -73,11 +84,15 @@ var mpoolPending = &cli.Command{
}
}
- out, err := json.MarshalIndent(msg, "", " ")
- if err != nil {
- return err
+ if cctx.Bool("cids") {
+ fmt.Println(msg.Cid())
+ } else {
+ out, err := json.MarshalIndent(msg, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(out))
}
- fmt.Println(string(out))
}
return nil
@@ -149,14 +164,6 @@ var mpoolSub = &cli.Command{
},
}
-type statBucket struct {
- msgs map[uint64]*types.SignedMessage
-}
-type mpStat struct {
- addr string
- past, cur, future uint64
-}
-
var mpoolStat = &cli.Command{
Name: "stat",
Usage: "print mempool stats",
@@ -165,6 +172,11 @@ var mpoolStat = &cli.Command{
Name: "local",
Usage: "print stats for addresses in local wallet only",
},
+ &cli.IntFlag{
+ Name: "basefee-lookback",
+ Usage: "number of blocks to look back for minimum basefee",
+ Value: 60,
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@@ -179,6 +191,20 @@ var mpoolStat = &cli.Command{
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
}
+ currBF := ts.Blocks()[0].ParentBaseFee
+ minBF := currBF
+ {
+ currTs := ts
+ for i := 0; i < cctx.Int("basefee-lookback"); i++ {
+ currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
+ if err != nil {
+ return xerrors.Errorf("walking chain: %w", err)
+ }
+ if newBF := currTs.Blocks()[0].ParentBaseFee; newBF.LessThan(minBF) {
+ minBF = newBF
+ }
+ }
+ }
var filter map[address.Address]struct{}
if cctx.Bool("local") {
@@ -199,8 +225,16 @@ var mpoolStat = &cli.Command{
return err
}
- buckets := map[address.Address]*statBucket{}
+ type statBucket struct {
+ msgs map[uint64]*types.SignedMessage
+ }
+ type mpStat struct {
+ addr string
+ past, cur, future uint64
+ belowCurr, belowPast uint64
+ }
+ buckets := map[address.Address]*statBucket{}
for _, v := range msgs {
if filter != nil {
if _, has := filter[v.Message.From]; !has {
@@ -237,23 +271,27 @@ var mpoolStat = &cli.Command{
cur++
}
- past := uint64(0)
- future := uint64(0)
+ var s mpStat
+ s.addr = a.String()
+
for _, m := range bkt.msgs {
if m.Message.Nonce < act.Nonce {
- past++
+ s.past++
+ } else if m.Message.Nonce > cur {
+ s.future++
+ } else {
+ s.cur++
}
- if m.Message.Nonce > cur {
- future++
+
+ if m.Message.GasFeeCap.LessThan(currBF) {
+ s.belowCurr++
+ }
+ if m.Message.GasFeeCap.LessThan(minBF) {
+ s.belowPast++
}
}
- out = append(out, mpStat{
- addr: a.String(),
- past: past,
- cur: cur - act.Nonce,
- future: future,
- })
+ out = append(out, s)
}
sort.Slice(out, func(i, j int) bool {
@@ -266,12 +304,14 @@ var mpoolStat = &cli.Command{
total.past += stat.past
total.cur += stat.cur
total.future += stat.future
+ total.belowCurr += stat.belowCurr
+ total.belowPast += stat.belowPast
- fmt.Printf("%s: past: %d, cur: %d, future: %d\n", stat.addr, stat.past, stat.cur, stat.future)
+ fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast)
}
fmt.Println("-----")
- fmt.Printf("total: past: %d, cur: %d, future: %d\n", total.past, total.cur, total.future)
+ fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast)
return nil
},
@@ -293,22 +333,17 @@ var mpoolReplaceCmd = &cli.Command{
Name: "gas-limit",
Usage: "gas price for new message",
},
+ &cli.BoolFlag{
+ Name: "auto",
+ Usage: "automatically reprice the specified message",
+ },
+ &cli.StringFlag{
+ Name: "max-fee",
+ Usage: "Spend up to X FIL for this message (applicable for auto mode)",
+ },
},
- ArgsUsage: "[from] [nonce]",
+ ArgsUsage: " | ",
Action: func(cctx *cli.Context) error {
- if cctx.Args().Len() < 2 {
- return cli.ShowCommandHelp(cctx, cctx.Command.Name)
- }
-
- from, err := address.NewFromString(cctx.Args().Get(0))
- if err != nil {
- return err
- }
-
- nonce, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
- if err != nil {
- return err
- }
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@@ -318,6 +353,39 @@ var mpoolReplaceCmd = &cli.Command{
ctx := ReqContext(cctx)
+ var from address.Address
+ var nonce uint64
+ switch cctx.Args().Len() {
+ case 1:
+ mcid, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ msg, err := api.ChainGetMessage(ctx, mcid)
+ if err != nil {
+ return fmt.Errorf("could not find referenced message: %w", err)
+ }
+
+ from = msg.From
+ nonce = msg.Nonce
+ case 2:
+ f, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ n, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ from = f
+ nonce = n
+ default:
+ return cli.ShowCommandHelp(cctx, cctx.Command.Name)
+ }
+
ts, err := api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
@@ -342,15 +410,42 @@ var mpoolReplaceCmd = &cli.Command{
msg := found.Message
- msg.GasLimit = cctx.Int64("gas-limit")
- msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
- if err != nil {
- return fmt.Errorf("parsing gas-premium: %w", err)
- }
- // TODO: estimate fee cap here
- msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
- if err != nil {
- return fmt.Errorf("parsing gas-feecap: %w", err)
+ if cctx.Bool("auto") {
+ minRBF := messagepool.ComputeMinRBF(msg.GasPremium)
+
+ var mss *lapi.MessageSendSpec
+ if cctx.IsSet("max-fee") {
+ maxFee, err := types.BigFromString(cctx.String("max-fee"))
+ if err != nil {
+ return fmt.Errorf("parsing max-spend: %w", err)
+ }
+ mss = &lapi.MessageSendSpec{
+ MaxFee: maxFee,
+ }
+ }
+
+ // msg.GasLimit = 0 // TODO: need to fix the way we estimate gas limits to account for the messages already being in the mempool
+ msg.GasFeeCap = abi.NewTokenAmount(0)
+ msg.GasPremium = abi.NewTokenAmount(0)
+ retm, err := api.GasEstimateMessageGas(ctx, &msg, mss, types.EmptyTSK)
+ if err != nil {
+ return fmt.Errorf("failed to estimate gas values: %w", err)
+ }
+
+ msg.GasPremium = big.Max(retm.GasPremium, minRBF)
+ msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium)
+ messagepool.CapGasFee(&msg, mss.Get().MaxFee)
+ } else {
+ msg.GasLimit = cctx.Int64("gas-limit")
+ msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
+ if err != nil {
+ return fmt.Errorf("parsing gas-premium: %w", err)
+ }
+ // TODO: estimate fee cap here
+ msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
+ if err != nil {
+ return fmt.Errorf("parsing gas-feecap: %w", err)
+ }
}
smsg, err := api.WalletSignMessage(ctx, msg.From, &msg)
@@ -495,3 +590,86 @@ var mpoolConfig = &cli.Command{
return nil
},
}
+
+var mpoolGasPerfCmd = &cli.Command{
+ Name: "gas-perf",
+ Usage: "Check gas performance of messages in mempool",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "all",
+ Usage: "print gas performance for all mempool messages (default only prints for local)",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+
+ msgs, err := api.MpoolPending(ctx, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ var filter map[address.Address]struct{}
+ if !cctx.Bool("all") {
+ filter = map[address.Address]struct{}{}
+
+ addrss, err := api.WalletList(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting local addresses: %w", err)
+ }
+
+ for _, a := range addrss {
+ filter[a] = struct{}{}
+ }
+
+ var filtered []*types.SignedMessage
+ for _, msg := range msgs {
+ if _, has := filter[msg.Message.From]; !has {
+ continue
+ }
+ filtered = append(filtered, msg)
+ }
+ msgs = filtered
+ }
+
+ ts, err := api.ChainHead(ctx)
+ if err != nil {
+ return xerrors.Errorf("failed to get chain head: %w", err)
+ }
+
+ baseFee := ts.Blocks()[0].ParentBaseFee
+
+ bigBlockGasLimit := big.NewInt(build.BlockGasLimit)
+
+ getGasReward := func(msg *types.SignedMessage) big.Int {
+ maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee)
+ if types.BigCmp(maxPremium, msg.Message.GasPremium) < 0 {
+ maxPremium = msg.Message.GasPremium
+ }
+ return types.BigMul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit)))
+ }
+
+ getGasPerf := func(gasReward big.Int, gasLimit int64) float64 {
+ // gasPerf = gasReward * build.BlockGasLimit / gasLimit
+ a := new(stdbig.Rat).SetInt(new(stdbig.Int).Mul(gasReward.Int, bigBlockGasLimit.Int))
+ b := stdbig.NewRat(1, gasLimit)
+ c := new(stdbig.Rat).Mul(a, b)
+ r, _ := c.Float64()
+ return r
+ }
+
+ for _, m := range msgs {
+ gasReward := getGasReward(m)
+ gasPerf := getGasPerf(gasReward, m.Message.GasLimit)
+
+ fmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf)
+ }
+
+ return nil
+ },
+}
diff --git a/cli/multisig.go b/cli/multisig.go
index 57f6c2c03..6b3867cb3 100644
--- a/cli/multisig.go
+++ b/cli/multisig.go
@@ -2,8 +2,6 @@ package cli
import (
"bytes"
- "context"
- "encoding/binary"
"encoding/hex"
"fmt"
"os"
@@ -11,22 +9,29 @@ import (
"strconv"
"text/tabwriter"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-address"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
- samsig "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/filecoin-project/lotus/api"
+ init0 "github.com/filecoin-project/specs-actors/actors/builtin/init"
+ msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
- types "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+ "github.com/filecoin-project/lotus/chain/types"
)
var multisigCmd = &cli.Command{
@@ -37,9 +42,16 @@ var multisigCmd = &cli.Command{
msigInspectCmd,
msigProposeCmd,
msigApproveCmd,
+ msigAddProposeCmd,
+ msigAddApproveCmd,
+ msigAddCancelCmd,
msigSwapProposeCmd,
msigSwapApproveCmd,
msigSwapCancelCmd,
+ msigLockProposeCmd,
+ msigLockApproveCmd,
+ msigLockCancelCmd,
+ msigVestedCmd,
},
}
@@ -142,7 +154,7 @@ var msigCreateCmd = &cli.Command{
// get address of newly created miner
- var execreturn init_.ExecReturn
+ var execreturn init0.ExecReturn
if err := execreturn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
return err
}
@@ -175,50 +187,73 @@ var msigInspectCmd = &cli.Command{
defer closer()
ctx := ReqContext(cctx)
+ store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api)))
+
maddr, err := address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
- act, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
- if err != nil {
- return err
- }
-
- obj, err := api.ChainReadObj(ctx, act.Head)
- if err != nil {
- return err
- }
-
head, err := api.ChainHead(ctx)
if err != nil {
return err
}
- var mstate samsig.State
- if err := mstate.UnmarshalCBOR(bytes.NewReader(obj)); err != nil {
+ act, err := api.StateGetActor(ctx, maddr, head.Key())
+ if err != nil {
+ return err
+ }
+
+ mstate, err := multisig.Load(store, act)
+ if err != nil {
+ return err
+ }
+ locked, err := mstate.LockedBalance(head.Height())
+ if err != nil {
return err
}
- locked := mstate.AmountLocked(head.Height() - mstate.StartEpoch)
fmt.Printf("Balance: %s\n", types.FIL(act.Balance))
fmt.Printf("Spendable: %s\n", types.FIL(types.BigSub(act.Balance, locked)))
if cctx.Bool("vesting") {
- fmt.Printf("InitialBalance: %s\n", types.FIL(mstate.InitialBalance))
- fmt.Printf("StartEpoch: %d\n", mstate.StartEpoch)
- fmt.Printf("UnlockDuration: %d\n", mstate.UnlockDuration)
+ ib, err := mstate.InitialBalance()
+ if err != nil {
+ return err
+ }
+ fmt.Printf("InitialBalance: %s\n", types.FIL(ib))
+ se, err := mstate.StartEpoch()
+ if err != nil {
+ return err
+ }
+ fmt.Printf("StartEpoch: %d\n", se)
+ ud, err := mstate.UnlockDuration()
+ if err != nil {
+ return err
+ }
+ fmt.Printf("UnlockDuration: %d\n", ud)
}
- fmt.Printf("Threshold: %d / %d\n", mstate.NumApprovalsThreshold, len(mstate.Signers))
+ signers, err := mstate.Signers()
+ if err != nil {
+ return err
+ }
+ threshold, err := mstate.Threshold()
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Threshold: %d / %d\n", threshold, len(signers))
fmt.Println("Signers:")
- for _, s := range mstate.Signers {
+ for _, s := range signers {
fmt.Printf("\t%s\n", s)
}
- pending, err := GetMultisigPending(ctx, api, mstate.PendingTxns)
- if err != nil {
- return fmt.Errorf("reading pending transactions: %w", err)
+ pending := make(map[int64]multisig.Transaction)
+ if err := mstate.ForEachPendingTxn(func(id int64, txn multisig.Transaction) error {
+ pending[id] = txn
+ return nil
+ }); err != nil {
+ return xerrors.Errorf("reading pending transactions: %w", err)
}
fmt.Println("Transactions: ", len(pending))
@@ -235,7 +270,7 @@ var msigInspectCmd = &cli.Command{
fmt.Fprintf(w, "ID\tState\tApprovals\tTo\tValue\tMethod\tParams\n")
for _, txid := range txids {
tx := pending[txid]
- fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%d\t%x\n", txid, state(tx), len(tx.Approved), tx.To, types.FIL(tx.Value), tx.Method, tx.Params)
+ fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%d\t%x\n", txid, "pending", len(tx.Approved), tx.To, types.FIL(tx.Value), tx.Method, tx.Params)
}
if err := w.Flush(); err != nil {
return xerrors.Errorf("flushing output: %+v", err)
@@ -247,43 +282,6 @@ var msigInspectCmd = &cli.Command{
},
}
-func GetMultisigPending(ctx context.Context, lapi api.FullNode, hroot cid.Cid) (map[int64]*samsig.Transaction, error) {
- bs := apibstore.NewAPIBlockstore(lapi)
- store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
-
- nd, err := adt.AsMap(store, hroot)
- if err != nil {
- return nil, err
- }
-
- txs := make(map[int64]*samsig.Transaction)
- var tx samsig.Transaction
- err = nd.ForEach(&tx, func(k string) error {
- txid, _ := binary.Varint([]byte(k))
-
- cpy := tx // copy so we don't clobber on future iterations.
- txs[txid] = &cpy
- return nil
- })
- if err != nil {
- return nil, xerrors.Errorf("failed to iterate transactions hamt: %w", err)
- }
-
- return txs, nil
-}
-
-func state(tx *samsig.Transaction) string {
- /* // TODO(why): I strongly disagree with not having these... but i need to move forward
- if tx.Complete {
- return "done"
- }
- if tx.Canceled {
- return "canceled"
- }
- */
- return "pending"
-}
-
var msigProposeCmd = &cli.Command{
Name: "propose",
Usage: "Propose a multisig transaction",
@@ -361,7 +359,7 @@ var msigProposeCmd = &cli.Command{
return fmt.Errorf("failed to look up multisig %s: %w", msig, err)
}
- if act.Code != builtin.MultisigActorCodeID {
+ if !builtin.IsMultisigActor(act.Code) {
return fmt.Errorf("actor %s is not a multisig actor", msig)
}
@@ -381,7 +379,7 @@ var msigProposeCmd = &cli.Command{
return fmt.Errorf("proposal returned exit %d", wait.Receipt.ExitCode)
}
- var retval samsig.ProposeReturn
+ var retval msig0.ProposeReturn
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
return fmt.Errorf("failed to unmarshal propose return value: %w", err)
}
@@ -506,6 +504,236 @@ var msigApproveCmd = &cli.Command{
},
}
+var msigAddProposeCmd = &cli.Command{
+ Name: "add-propose",
+ Usage: "Propose to add a signer",
+ ArgsUsage: "[multisigAddress signer]",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "increase-threshold",
+ Usage: "whether the number of required signers should be increased",
+ },
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "account to send the propose message from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 2 {
+ return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ msig, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ addr, err := address.NewFromString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ var from address.Address
+ if cctx.IsSet("from") {
+ f, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return err
+ }
+ from = f
+ } else {
+ defaddr, err := api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+ from = defaddr
+ }
+
+ msgCid, err := api.MsigAddPropose(ctx, msig, from, addr, cctx.Bool("increase-threshold"))
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("sent add proposal in message: ", msgCid)
+
+ wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ if wait.Receipt.ExitCode != 0 {
+ return fmt.Errorf("add proposal returned exit %d", wait.Receipt.ExitCode)
+ }
+
+ return nil
+ },
+}
+
+var msigAddApproveCmd = &cli.Command{
+ Name: "add-approve",
+ Usage: "Approve a message to add a signer",
+ ArgsUsage: "[multisigAddress proposerAddress txId newAddress increaseThreshold]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "account to send the approve message from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 5 {
+ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, transaction id, new signer address, whether to increase threshold"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ msig, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ prop, err := address.NewFromString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ txid, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ newAdd, err := address.NewFromString(cctx.Args().Get(3))
+ if err != nil {
+ return err
+ }
+
+ inc, err := strconv.ParseBool(cctx.Args().Get(4))
+ if err != nil {
+ return err
+ }
+
+ var from address.Address
+ if cctx.IsSet("from") {
+ f, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return err
+ }
+ from = f
+ } else {
+ defaddr, err := api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+ from = defaddr
+ }
+
+ msgCid, err := api.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("sent add approval in message: ", msgCid)
+
+ wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ if wait.Receipt.ExitCode != 0 {
+ return fmt.Errorf("add approval returned exit %d", wait.Receipt.ExitCode)
+ }
+
+ return nil
+ },
+}
+
+var msigAddCancelCmd = &cli.Command{
+ Name: "add-cancel",
+ Usage: "Cancel a message to add a signer",
+ ArgsUsage: "[multisigAddress txId newAddress increaseThreshold]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "account to send the approve message from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 4 {
+ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, transaction id, new signer address, whether to increase threshold"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ msig, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ newAdd, err := address.NewFromString(cctx.Args().Get(2))
+ if err != nil {
+ return err
+ }
+
+ inc, err := strconv.ParseBool(cctx.Args().Get(3))
+ if err != nil {
+ return err
+ }
+
+ var from address.Address
+ if cctx.IsSet("from") {
+ f, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return err
+ }
+ from = f
+ } else {
+ defaddr, err := api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+ from = defaddr
+ }
+
+ msgCid, err := api.MsigAddCancel(ctx, msig, from, txid, newAdd, inc)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("sent add cancellation in message: ", msgCid)
+
+ wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ if wait.Receipt.ExitCode != 0 {
+ return fmt.Errorf("add cancellation returned exit %d", wait.Receipt.ExitCode)
+ }
+
+ return nil
+ },
+}
+
var msigSwapProposeCmd = &cli.Command{
Name: "swap-propose",
Usage: "Propose to swap signers",
@@ -722,7 +950,7 @@ var msigSwapCancelCmd = &cli.Command{
return err
}
- fmt.Println("sent swap approval in message: ", msgCid)
+ fmt.Println("sent swap cancellation in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
if err != nil {
@@ -730,9 +958,347 @@ var msigSwapCancelCmd = &cli.Command{
}
if wait.Receipt.ExitCode != 0 {
- return fmt.Errorf("swap approval returned exit %d", wait.Receipt.ExitCode)
+ return fmt.Errorf("swap cancellation returned exit %d", wait.Receipt.ExitCode)
}
return nil
},
}
+
+var msigLockProposeCmd = &cli.Command{
+ Name: "lock-propose",
+ Usage: "Propose to lock up some balance",
+ ArgsUsage: "[multisigAddress startEpoch unlockDuration amount]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "account to send the propose message from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 4 {
+ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, start epoch, unlock duration, and amount"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ msig, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ start, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ duration, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ amount, err := types.ParseFIL(cctx.Args().Get(3))
+ if err != nil {
+ return err
+ }
+
+ var from address.Address
+ if cctx.IsSet("from") {
+ f, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return err
+ }
+ from = f
+ } else {
+ defaddr, err := api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+ from = defaddr
+ }
+
+ params, actErr := actors.SerializeParams(&msig0.LockBalanceParams{
+ StartEpoch: abi.ChainEpoch(start),
+ UnlockDuration: abi.ChainEpoch(duration),
+ Amount: abi.NewTokenAmount(amount.Int64()),
+ })
+
+ if actErr != nil {
+ return actErr
+ }
+
+ msgCid, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(builtin2.MethodsMultisig.LockBalance), params)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("sent lock proposal in message: ", msgCid)
+
+ wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ if wait.Receipt.ExitCode != 0 {
+ return fmt.Errorf("lock proposal returned exit %d", wait.Receipt.ExitCode)
+ }
+
+ return nil
+ },
+}
+
+var msigLockApproveCmd = &cli.Command{
+ Name: "lock-approve",
+ Usage: "Approve a message to lock up some balance",
+ ArgsUsage: "[multisigAddress proposerAddress txId startEpoch unlockDuration amount]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "account to send the approve message from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 6 {
+ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, tx id, start epoch, unlock duration, and amount"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ msig, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ prop, err := address.NewFromString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ txid, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ start, err := strconv.ParseUint(cctx.Args().Get(3), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ duration, err := strconv.ParseUint(cctx.Args().Get(4), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ amount, err := types.ParseFIL(cctx.Args().Get(5))
+ if err != nil {
+ return err
+ }
+
+ var from address.Address
+ if cctx.IsSet("from") {
+ f, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return err
+ }
+ from = f
+ } else {
+ defaddr, err := api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+ from = defaddr
+ }
+
+ params, actErr := actors.SerializeParams(&msig0.LockBalanceParams{
+ StartEpoch: abi.ChainEpoch(start),
+ UnlockDuration: abi.ChainEpoch(duration),
+ Amount: abi.NewTokenAmount(amount.Int64()),
+ })
+
+ if actErr != nil {
+ return actErr
+ }
+
+ msgCid, err := api.MsigApprove(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(builtin2.MethodsMultisig.LockBalance), params)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("sent lock approval in message: ", msgCid)
+
+ wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ if wait.Receipt.ExitCode != 0 {
+ return fmt.Errorf("lock approval returned exit %d", wait.Receipt.ExitCode)
+ }
+
+ return nil
+ },
+}
+
+var msigLockCancelCmd = &cli.Command{
+ Name: "lock-cancel",
+ Usage: "Cancel a message to lock up some balance",
+ ArgsUsage: "[multisigAddress txId startEpoch unlockDuration amount]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "account to send the cancel message from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 6 {
+ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, tx id, start epoch, unlock duration, and amount"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ msig, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ start, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ duration, err := strconv.ParseUint(cctx.Args().Get(3), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ amount, err := types.ParseFIL(cctx.Args().Get(4))
+ if err != nil {
+ return err
+ }
+
+ var from address.Address
+ if cctx.IsSet("from") {
+ f, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return err
+ }
+ from = f
+ } else {
+ defaddr, err := api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+ from = defaddr
+ }
+
+ params, actErr := actors.SerializeParams(&msig0.LockBalanceParams{
+ StartEpoch: abi.ChainEpoch(start),
+ UnlockDuration: abi.ChainEpoch(duration),
+ Amount: abi.NewTokenAmount(amount.Int64()),
+ })
+
+ if actErr != nil {
+ return actErr
+ }
+
+ msgCid, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(builtin2.MethodsMultisig.LockBalance), params)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("sent lock cancellation in message: ", msgCid)
+
+ wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ if wait.Receipt.ExitCode != 0 {
+ return fmt.Errorf("lock cancellation returned exit %d", wait.Receipt.ExitCode)
+ }
+
+ return nil
+ },
+}
+
+var msigVestedCmd = &cli.Command{
+ Name: "vested",
+ Usage: "Gets the amount vested in an msig between two epochs",
+ ArgsUsage: "[multisigAddress]",
+ Flags: []cli.Flag{
+ &cli.Int64Flag{
+ Name: "start-epoch",
+ Usage: "start epoch to measure vesting from",
+ Value: 0,
+ },
+ &cli.Int64Flag{
+ Name: "end-epoch",
+ Usage: "end epoch to stop measure vesting at",
+ Value: -1,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return ShowHelp(cctx, fmt.Errorf("must pass multisig address"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ msig, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ start, err := api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Int64("start-epoch")), types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ var end *types.TipSet
+ if cctx.Int64("end-epoch") < 0 {
+ end, err = LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+ } else {
+ end, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Int64("end-epoch")), types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+ }
+
+ ret, err := api.MsigGetVested(ctx, msig, start.Key(), end.Key())
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Vested: %s between %d and %d\n", types.FIL(ret), start.Height(), end.Height())
+
+ return nil
+ },
+}
diff --git a/cli/net.go b/cli/net.go
index f3b5ae2e9..9c40c70c7 100644
--- a/cli/net.go
+++ b/cli/net.go
@@ -8,12 +8,17 @@ import (
"strings"
"text/tabwriter"
- "github.com/libp2p/go-libp2p-core/peer"
- protocol "github.com/libp2p/go-libp2p-core/protocol"
-
"github.com/dustin/go-humanize"
"github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+ "github.com/libp2p/go-libp2p-core/peer"
+ protocol "github.com/libp2p/go-libp2p-core/protocol"
+ "github.com/multiformats/go-multiaddr"
+
+ "github.com/filecoin-project/go-address"
+
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/addrutil"
)
@@ -141,7 +146,7 @@ var NetListen = &cli.Command{
var netConnect = &cli.Command{
Name: "connect",
Usage: "Connect to a peer",
- ArgsUsage: "[peerMultiaddr]",
+ ArgsUsage: "[peerMultiaddr|minerActorAddress]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
@@ -152,7 +157,43 @@ var netConnect = &cli.Command{
pis, err := addrutil.ParseAddresses(ctx, cctx.Args().Slice())
if err != nil {
- return err
+ a, perr := address.NewFromString(cctx.Args().First())
+ if perr != nil {
+ return err
+ }
+
+ na, fc, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer fc()
+
+ mi, err := na.StateMinerInfo(ctx, a, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting miner info: %w", err)
+ }
+
+ if mi.PeerId == nil {
+ return xerrors.Errorf("no PeerID for miner")
+ }
+ multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs))
+ for i, a := range mi.Multiaddrs {
+ maddr, err := multiaddr.NewMultiaddrBytes(a)
+ if err != nil {
+ log.Warnf("parsing multiaddr %d (%x): %s", i, a, err)
+ continue
+ }
+ multiaddrs = append(multiaddrs, maddr)
+ }
+
+ pi := peer.AddrInfo{
+ ID: *mi.PeerId,
+ Addrs: multiaddrs,
+ }
+
+ fmt.Printf("%s -> %s\n", a, pi)
+
+ pis = append(pis, pi)
}
for _, pi := range pis {
diff --git a/cli/paych.go b/cli/paych.go
index 11b550cc6..1d5e304c3 100644
--- a/cli/paych.go
+++ b/cli/paych.go
@@ -8,14 +8,16 @@ import (
"sort"
"strings"
+ "github.com/filecoin-project/lotus/api"
+
"github.com/filecoin-project/lotus/paychmgr"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/urfave/cli/v2"
- types "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/types"
)
var paychCmd = &cli.Command{
@@ -26,6 +28,8 @@ var paychCmd = &cli.Command{
paychListCmd,
paychVoucherCmd,
paychSettleCmd,
+ paychStatusCmd,
+ paychStatusByFromToCmd,
paychCloseCmd,
},
}
@@ -34,6 +38,14 @@ var paychAddFundsCmd = &cli.Command{
Name: "add-funds",
Usage: "Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist.",
ArgsUsage: "[fromAddress toAddress amount]",
+ Flags: []cli.Flag{
+
+ &cli.BoolFlag{
+ Name: "restart-retrievals",
+ Usage: "restart stalled retrieval deals on this payment channel",
+ Value: true,
+ },
+ },
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 3 {
return ShowHelp(cctx, fmt.Errorf("must pass three arguments: "))
@@ -76,18 +88,23 @@ var paychAddFundsCmd = &cli.Command{
}
fmt.Fprintln(cctx.App.Writer, chAddr)
+ restartRetrievals := cctx.Bool("restart-retrievals")
+ if restartRetrievals {
+ return api.ClientRetrieveTryRestartInsufficientFunds(ctx, chAddr)
+ }
return nil
},
}
-var paychStatusCmd = &cli.Command{
- Name: "status",
- Usage: "Show the status of an outbound payment channel between fromAddress and toAddress",
+var paychStatusByFromToCmd = &cli.Command{
+ Name: "status-by-from-to",
+ Usage: "Show the status of an active outbound payment channel by from/to addresses",
ArgsUsage: "[fromAddress toAddress]",
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 2 {
- return ShowHelp(cctx, fmt.Errorf("must pass two arguments: "))
+ return ShowHelp(cctx, fmt.Errorf("must pass two arguments: "))
}
+ ctx := ReqContext(cctx)
from, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
@@ -105,52 +122,87 @@ var paychStatusCmd = &cli.Command{
}
defer closer()
- avail, err := api.PaychAvailableFunds(from, to)
+ avail, err := api.PaychAvailableFundsByFromTo(ctx, from, to)
if err != nil {
return err
}
- if avail.Channel == nil {
- if avail.PendingWaitSentinel != nil {
- fmt.Fprint(cctx.App.Writer, "Creating channel\n")
- fmt.Fprintf(cctx.App.Writer, " From: %s\n", from)
- fmt.Fprintf(cctx.App.Writer, " To: %s\n", to)
- fmt.Fprintf(cctx.App.Writer, " Pending Amt: %d\n", avail.PendingAmt)
- fmt.Fprintf(cctx.App.Writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel)
- return nil
- }
- fmt.Fprint(cctx.App.Writer, "Channel does not exist\n")
- fmt.Fprintf(cctx.App.Writer, " From: %s\n", from)
- fmt.Fprintf(cctx.App.Writer, " To: %s\n", to)
- return nil
- }
-
- if avail.PendingWaitSentinel != nil {
- fmt.Fprint(cctx.App.Writer, "Adding Funds to channel\n")
- } else {
- fmt.Fprint(cctx.App.Writer, "Channel exists\n")
- }
-
- nameValues := [][]string{
- {"Channel", avail.Channel.String()},
- {"From", from.String()},
- {"To", to.String()},
- {"Confirmed Amt", fmt.Sprintf("%d", avail.ConfirmedAmt)},
- {"Pending Amt", fmt.Sprintf("%d", avail.PendingAmt)},
- {"Queued Amt", fmt.Sprintf("%d", avail.QueuedAmt)},
- {"Voucher Redeemed Amt", fmt.Sprintf("%d", avail.VoucherReedeemedAmt)},
- }
- if avail.PendingWaitSentinel != nil {
- nameValues = append(nameValues, []string{
- "Add Funds Wait Sentinel",
- avail.PendingWaitSentinel.String(),
- })
- }
- fmt.Fprint(cctx.App.Writer, formatNameValues(nameValues))
+ paychStatus(cctx.App.Writer, avail)
return nil
},
}
+var paychStatusCmd = &cli.Command{
+ Name: "status",
+ Usage: "Show the status of an outbound payment channel",
+ ArgsUsage: "[channelAddress]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return ShowHelp(cctx, fmt.Errorf("must pass an argument: "))
+ }
+ ctx := ReqContext(cctx)
+
+ ch, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return ShowHelp(cctx, fmt.Errorf("failed to parse channel address: %s", err))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ avail, err := api.PaychAvailableFunds(ctx, ch)
+ if err != nil {
+ return err
+ }
+
+ paychStatus(cctx.App.Writer, avail)
+ return nil
+ },
+}
+
+func paychStatus(writer io.Writer, avail *api.ChannelAvailableFunds) {
+ if avail.Channel == nil {
+ if avail.PendingWaitSentinel != nil {
+ fmt.Fprint(writer, "Creating channel\n")
+ fmt.Fprintf(writer, " From: %s\n", avail.From)
+ fmt.Fprintf(writer, " To: %s\n", avail.To)
+ fmt.Fprintf(writer, " Pending Amt: %d\n", avail.PendingAmt)
+ fmt.Fprintf(writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel)
+ return
+ }
+ fmt.Fprint(writer, "Channel does not exist\n")
+ fmt.Fprintf(writer, " From: %s\n", avail.From)
+ fmt.Fprintf(writer, " To: %s\n", avail.To)
+ return
+ }
+
+ if avail.PendingWaitSentinel != nil {
+ fmt.Fprint(writer, "Adding Funds to channel\n")
+ } else {
+ fmt.Fprint(writer, "Channel exists\n")
+ }
+
+ nameValues := [][]string{
+ {"Channel", avail.Channel.String()},
+ {"From", avail.From.String()},
+ {"To", avail.To.String()},
+ {"Confirmed Amt", fmt.Sprintf("%d", avail.ConfirmedAmt)},
+ {"Pending Amt", fmt.Sprintf("%d", avail.PendingAmt)},
+ {"Queued Amt", fmt.Sprintf("%d", avail.QueuedAmt)},
+ {"Voucher Redeemed Amt", fmt.Sprintf("%d", avail.VoucherReedeemedAmt)},
+ }
+ if avail.PendingWaitSentinel != nil {
+ nameValues = append(nameValues, []string{
+ "Add Funds Wait Sentinel",
+ avail.PendingWaitSentinel.String(),
+ })
+ }
+ fmt.Fprint(writer, formatNameValues(nameValues))
+}
+
func formatNameValues(nameValues [][]string) string {
maxLen := 0
for _, nv := range nameValues {
@@ -352,7 +404,7 @@ var paychVoucherCheckCmd = &cli.Command{
return err
}
- sv, err := types.DecodeSignedVoucher(cctx.Args().Get(1))
+ sv, err := paych.DecodeSignedVoucher(cctx.Args().Get(1))
if err != nil {
return err
}
@@ -388,7 +440,7 @@ var paychVoucherAddCmd = &cli.Command{
return err
}
- sv, err := types.DecodeSignedVoucher(cctx.Args().Get(1))
+ sv, err := paych.DecodeSignedVoucher(cctx.Args().Get(1))
if err != nil {
return err
}
@@ -546,7 +598,7 @@ var paychVoucherSubmitCmd = &cli.Command{
return err
}
- sv, err := types.DecodeSignedVoucher(cctx.Args().Get(1))
+ sv, err := paych.DecodeSignedVoucher(cctx.Args().Get(1))
if err != nil {
return err
}
diff --git a/cli/paych_test.go b/cli/paych_test.go
index d4089c4ab..18782b4e8 100644
--- a/cli/paych_test.go
+++ b/cli/paych_test.go
@@ -12,38 +12,31 @@ import (
"testing"
"time"
- "github.com/filecoin-project/lotus/build"
-
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
-
- "github.com/multiformats/go-multiaddr"
-
- "github.com/filecoin-project/lotus/chain/events"
-
- "github.com/filecoin-project/lotus/api/apibstore"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/chain/types"
-
- "github.com/filecoin-project/lotus/api/test"
- "github.com/filecoin-project/lotus/chain/wallet"
- builder "github.com/filecoin-project/lotus/node/test"
- "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ "github.com/multiformats/go-multiaddr"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+
+ "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/api/test"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/events"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/wallet"
+ builder "github.com/filecoin-project/lotus/node/test"
)
func init() {
- power.ConsensusMinerMinPower = big.NewInt(2048)
- saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
- verifreg.MinVerifiedDealSize = big.NewInt(256)
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
// TestPaymentChannels does a basic test to exercise the payment channel CLI
@@ -88,7 +81,9 @@ func TestPaymentChannels(t *testing.T) {
// Wait for the chain to reach the settle height
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
- waitForHeight(ctx, t, paymentReceiver, chState.SettlingAt)
+ sa, err := chState.SettlingAt()
+ require.NoError(t, err)
+ waitForHeight(ctx, t, paymentReceiver, sa)
// receiver: paych collect
cmd = []string{chAddr.String()}
@@ -117,7 +112,7 @@ func TestPaymentChannelStatus(t *testing.T) {
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
cmd := []string{creatorAddr.String(), receiverAddr.String()}
- out := creatorCLI.runCmd(paychStatusCmd, cmd)
+ out := creatorCLI.runCmd(paychStatusByFromToCmd, cmd)
fmt.Println(out)
noChannelState := "Channel does not exist"
require.Regexp(t, regexp.MustCompile(noChannelState), out)
@@ -133,7 +128,7 @@ func TestPaymentChannelStatus(t *testing.T) {
// Wait for the output to stop being "Channel does not exist"
for regexp.MustCompile(noChannelState).MatchString(out) {
cmd = []string{creatorAddr.String(), receiverAddr.String()}
- out = creatorCLI.runCmd(paychStatusCmd, cmd)
+ out = creatorCLI.runCmd(paychStatusByFromToCmd, cmd)
}
fmt.Println(out)
@@ -153,7 +148,7 @@ func TestPaymentChannelStatus(t *testing.T) {
// Wait for create channel to complete
chstr := <-create
- cmd = []string{creatorAddr.String(), receiverAddr.String()}
+ cmd = []string{chstr}
out = creatorCLI.runCmd(paychStatusCmd, cmd)
fmt.Println(out)
// Output should have the channel address
@@ -169,7 +164,7 @@ func TestPaymentChannelStatus(t *testing.T) {
cmd = []string{chAddr.String(), fmt.Sprintf("%d", voucherAmt)}
creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
- cmd = []string{creatorAddr.String(), receiverAddr.String()}
+ cmd = []string{chstr}
out = creatorCLI.runCmd(paychStatusCmd, cmd)
fmt.Println(out)
voucherAmtAtto := types.BigMul(types.NewInt(voucherAmt), types.NewInt(build.FilecoinPrecision))
@@ -444,12 +439,12 @@ type mockCLI struct {
}
func newMockCLI(t *testing.T) *mockCLI {
- // Create a CLI App with an --api flag so that we can specify which node
+ // Create a CLI App with an --api-url flag so that we can specify which node
// the command should be executed against
app := cli.NewApp()
app.Flags = []cli.Flag{
&cli.StringFlag{
- Name: "api",
+ Name: "api-url",
Hidden: true,
},
}
@@ -481,8 +476,8 @@ func (c *mockCLIClient) runCmd(cmd *cli.Command, input []string) string {
}
func (c *mockCLIClient) runCmdRaw(cmd *cli.Command, input []string) (string, error) {
- // prepend --api=
- apiFlag := "--api=" + c.addr.String()
+ // prepend --api-url=
+ apiFlag := "--api-url=" + c.addr.String()
input = append([]string{apiFlag}, input...)
fs := c.flagSet(cmd)
@@ -498,7 +493,7 @@ func (c *mockCLIClient) runCmdRaw(cmd *cli.Command, input []string) (string, err
}
func (c *mockCLIClient) flagSet(cmd *cli.Command) *flag.FlagSet {
- // Apply app level flags (so we can process --api flag)
+ // Apply app level flags (so we can process --api-url flag)
fs := &flag.FlagSet{}
for _, f := range c.cctx.App.Flags {
err := f.Apply(fs)
@@ -540,8 +535,7 @@ func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr
require.NoError(t, err)
store := cbor.NewCborStore(apibstore.NewAPIBlockstore(node))
- var chState paych.State
- err = store.Get(ctx, act.Head, &chState)
+ chState, err := paych.Load(adt.WrapStore(ctx, store), act)
require.NoError(t, err)
return chState
diff --git a/cli/pprof.go b/cli/pprof.go
index 6819b362a..dccb97f9a 100644
--- a/cli/pprof.go
+++ b/cli/pprof.go
@@ -9,7 +9,6 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/node/repo"
- manet "github.com/multiformats/go-multiaddr/net"
)
var pprofCmd = &cli.Command{
@@ -37,7 +36,7 @@ var PprofGoroutines = &cli.Command{
if err != nil {
return xerrors.Errorf("could not get API info: %w", err)
}
- _, addr, err := manet.DialArgs(ainfo.Addr)
+ addr, err := ainfo.Host()
if err != nil {
return err
}
diff --git a/cli/send.go b/cli/send.go
index ecec42191..14c1b263b 100644
--- a/cli/send.go
+++ b/cli/send.go
@@ -12,7 +12,7 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/stmgr"
@@ -173,7 +173,12 @@ func decodeTypedParams(ctx context.Context, fapi api.FullNode, to address.Addres
return nil, err
}
- p := reflect.New(stmgr.MethodsMap[act.Code][method].Params.Elem()).Interface().(cbg.CBORMarshaler)
+ methodMeta, found := stmgr.MethodsMap[act.Code][method]
+ if !found {
+ return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code)
+ }
+
+ p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler)
if err := json.Unmarshal([]byte(paramstr), p); err != nil {
return nil, fmt.Errorf("unmarshaling input into params type: %w", err)
diff --git a/cli/state.go b/cli/state.go
index a0256c2e3..453cde77f 100644
--- a/cli/state.go
+++ b/cli/state.go
@@ -7,6 +7,7 @@ import (
"fmt"
"html/template"
"io"
+ "io/ioutil"
"os"
"reflect"
"sort"
@@ -14,9 +15,12 @@ import (
"strings"
"time"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
"github.com/multiformats/go-multiaddr"
"github.com/ipfs/go-cid"
+ cbor "github.com/ipfs/go-ipld-cbor"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multihash"
"github.com/urfave/cli/v2"
@@ -24,13 +28,15 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/exported"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -51,6 +57,7 @@ var stateCmd = &cli.Command{
stateListActorsCmd,
stateListMinersCmd,
stateCircSupplyCmd,
+ stateSectorCmd,
stateGetActorCmd,
stateLookupIDCmd,
stateReplaySetCmd,
@@ -62,8 +69,10 @@ var stateCmd = &cli.Command{
stateGetDealSetCmd,
stateWaitMsgCmd,
stateSearchMsgCmd,
+ stateMsgCostCmd,
stateMinerInfo,
stateMarketCmd,
+ stateExecTraceCmd,
},
}
@@ -116,11 +125,18 @@ var stateMinerInfo = &cli.Command{
}
fmt.Println()
+ cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key())
+ if err != nil {
+ return xerrors.Errorf("getting miner info: %w", err)
+ }
+
+ fmt.Printf("Proving Period Start:\t%s\n", EpochTime(cd.CurrentEpoch, cd.PeriodStart))
+
return nil
},
}
-func parseTipSetString(ts string) ([]cid.Cid, error) {
+func ParseTipSetString(ts string) ([]cid.Cid, error) {
strs := strings.Split(ts, ",")
var cids []cid.Cid
@@ -158,7 +174,7 @@ func ParseTipSetRef(ctx context.Context, api api.FullNode, tss string) (*types.T
return api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(h), types.EmptyTSK)
}
- cids, err := parseTipSetString(tss)
+ cids, err := ParseTipSetString(tss)
if err != nil {
return nil, err
}
@@ -247,13 +263,13 @@ var stateSectorsCmd = &cli.Command{
return err
}
- sectors, err := api.StateMinerSectors(ctx, maddr, nil, true, ts.Key())
+ sectors, err := api.StateMinerSectors(ctx, maddr, nil, ts.Key())
if err != nil {
return err
}
for _, s := range sectors {
- fmt.Printf("%d: %x\n", s.Info.SectorNumber, s.Info.SealedCID)
+ fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID)
}
return nil
@@ -293,13 +309,81 @@ var stateActiveSectorsCmd = &cli.Command{
}
for _, s := range sectors {
- fmt.Printf("%d: %x\n", s.Info.SectorNumber, s.Info.SealedCID)
+ fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID)
}
return nil
},
}
+var stateExecTraceCmd = &cli.Command{
+ Name: "exec-trace",
+ Usage: "Get the execution trace of a given message",
+ ArgsUsage: "",
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return ShowHelp(cctx, fmt.Errorf("must pass message cid"))
+ }
+
+ mcid, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("message cid was invalid: %s", err)
+ }
+
+ capi, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+
+ msg, err := capi.ChainGetMessage(ctx, mcid)
+ if err != nil {
+ return err
+ }
+
+ lookup, err := capi.StateSearchMsg(ctx, mcid)
+ if err != nil {
+ return err
+ }
+
+ ts, err := capi.ChainGetTipSet(ctx, lookup.TipSet)
+ if err != nil {
+ return err
+ }
+
+ pts, err := capi.ChainGetTipSet(ctx, ts.Parents())
+ if err != nil {
+ return err
+ }
+
+ cso, err := capi.StateCompute(ctx, pts.Height(), nil, pts.Key())
+ if err != nil {
+ return err
+ }
+
+ var trace *api.InvocResult
+ for _, t := range cso.Trace {
+ if t.Msg.From == msg.From && t.Msg.Nonce == msg.Nonce {
+ trace = t
+ break
+ }
+ }
+ if trace == nil {
+ return fmt.Errorf("failed to find message in tipset trace output")
+ }
+
+ out, err := json.MarshalIndent(trace, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(out))
+ return nil
+ },
+}
+
var stateReplaySetCmd = &cli.Command{
Name: "replay",
Usage: "Replay a particular message within a tipset",
@@ -806,6 +890,14 @@ var stateComputeStateCmd = &cli.Command{
Name: "html",
Usage: "generate html report",
},
+ &cli.BoolFlag{
+ Name: "json",
+ Usage: "generate json output",
+ },
+ &cli.StringFlag{
+ Name: "compute-state-output",
+ Usage: "a json file containing pre-existing compute-state output, to generate html reports without rerunning state changes",
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@@ -822,14 +914,14 @@ var stateComputeStateCmd = &cli.Command{
}
h := abi.ChainEpoch(cctx.Uint64("vm-height"))
- if h == 0 {
- if ts == nil {
- head, err := api.ChainHead(ctx)
- if err != nil {
- return err
- }
- ts = head
+ if ts == nil {
+ head, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
}
+ ts = head
+ }
+ if h == 0 {
h = ts.Height()
}
@@ -845,19 +937,50 @@ var stateComputeStateCmd = &cli.Command{
}
}
- stout, err := api.StateCompute(ctx, h, msgs, ts.Key())
- if err != nil {
- return err
+ var stout *lapi.ComputeStateOutput
+ if csofile := cctx.String("compute-state-output"); csofile != "" {
+ data, err := ioutil.ReadFile(csofile)
+ if err != nil {
+ return err
+ }
+
+ var o lapi.ComputeStateOutput
+ if err := json.Unmarshal(data, &o); err != nil {
+ return err
+ }
+
+ stout = &o
+ } else {
+ o, err := api.StateCompute(ctx, h, msgs, ts.Key())
+ if err != nil {
+ return err
+ }
+
+ stout = o
+ }
+
+ if cctx.Bool("json") {
+ out, err := json.Marshal(stout)
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(out))
+ return nil
}
if cctx.Bool("html") {
+ st, err := state.LoadStateTree(cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), stout.Root)
+ if err != nil {
+ return xerrors.Errorf("loading state tree: %w", err)
+ }
+
codeCache := map[address.Address]cid.Cid{}
getCode := func(addr address.Address) (cid.Cid, error) {
if c, found := codeCache[addr]; found {
return c, nil
}
- c, err := api.StateGetActor(ctx, addr, ts.Key())
+ c, err := st.GetActor(addr)
if err != nil {
return cid.Cid{}, err
}
@@ -1167,7 +1290,11 @@ func sumGas(changes []*types.GasTrace) types.GasTrace {
}
func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
- re := reflect.New(stmgr.MethodsMap[code][method].Params.Elem())
+ methodMeta, found := stmgr.MethodsMap[code][method]
+ if !found {
+ return "", fmt.Errorf("method %d not found on actor %s", method, code)
+ }
+ re := reflect.New(methodMeta.Params.Elem())
p := re.Interface().(cbg.CBORUnmarshaler)
if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil {
return "", err
@@ -1178,7 +1305,11 @@ func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, erro
}
func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) {
- re := reflect.New(stmgr.MethodsMap[code][method].Ret.Elem())
+ methodMeta, found := stmgr.MethodsMap[code][method]
+ if !found {
+ return "", fmt.Errorf("method %d not found on actor %s", method, code)
+ }
+ re := reflect.New(methodMeta.Ret.Elem())
p := re.Interface().(cbg.CBORUnmarshaler)
if err := p.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
return "", err
@@ -1293,6 +1424,60 @@ var stateSearchMsgCmd = &cli.Command{
},
}
+var stateMsgCostCmd = &cli.Command{
+ Name: "msg-cost",
+ Usage: "Get the detailed gas costs of a message",
+ ArgsUsage: "[messageCid]",
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must specify message cid to get gas costs for")
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+
+ msg, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ tsk := types.EmptyTSK
+
+ ts, err := LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ if ts != nil {
+ tsk = ts.Key()
+ }
+
+ mgc, err := api.StateMsgGasCost(ctx, msg, tsk)
+ if err != nil {
+ return err
+ }
+
+ if mgc != nil {
+ fmt.Printf("Message CID: %s", mgc.Message)
+ fmt.Printf("\nGas Used: %d", mgc.GasUsed)
+ fmt.Printf("\nBase Fee Burn: %d", mgc.BaseFeeBurn)
+ fmt.Printf("\nOverestimation Burn: %d", mgc.OverEstimationBurn)
+ fmt.Printf("\nMiner Tip: %d", mgc.MinerTip)
+ fmt.Printf("\nRefund: %d", mgc.Refund)
+ fmt.Printf("\nTotal Cost: %d", mgc.TotalCost)
+ fmt.Printf("\nMiner Penalty: %d", mgc.MinerPenalty)
+ } else {
+ fmt.Print("message was not found on chain")
+ }
+ return nil
+ },
+}
+
var stateCallCmd = &cli.Command{
Name: "call",
Usage: "Invoke a method on an actor locally",
@@ -1374,7 +1559,7 @@ var stateCallCmd = &cli.Command{
}
if ret.MsgRct.ExitCode != 0 {
- return fmt.Errorf("invocation failed (exit: %d): %s", ret.MsgRct.ExitCode, ret.Error)
+ return fmt.Errorf("invocation failed (exit: %d, gasUsed: %d): %s", ret.MsgRct.ExitCode, ret.MsgRct.GasUsed, ret.Error)
}
s, err := formatOutput(cctx.String("ret"), ret.MsgRct.Return)
@@ -1382,6 +1567,7 @@ var stateCallCmd = &cli.Command{
return fmt.Errorf("failed to format output: %s", err)
}
+ fmt.Printf("gas used: %d\n", ret.MsgRct.GasUsed)
fmt.Printf("return: %s\n", s)
return nil
@@ -1438,28 +1624,18 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er
return nil, nil
}
- var target abi.Invokee
- for _, actor := range exported.BuiltinActors() {
- if actor.Code() == act {
- target = actor
- }
- }
- if target == nil {
+ // TODO: consider moving this to a dedicated helper
+ actMeta, ok := stmgr.MethodsMap[act]
+ if !ok {
return nil, fmt.Errorf("unknown actor %s", act)
}
- methods := target.Exports()
- if uint64(len(methods)) <= method || methods[method] == nil {
+
+ methodMeta, ok := actMeta[abi.MethodNum(method)]
+ if !ok {
return nil, fmt.Errorf("unknown method %d for actor %s", method, act)
}
- f := methods[method]
-
- rf := reflect.TypeOf(f)
- if rf.NumIn() != 3 {
- return nil, fmt.Errorf("expected referenced method to have three arguments")
- }
-
- paramObj := rf.In(2).Elem()
+ paramObj := methodMeta.Params
if paramObj.NumField() != len(args) {
return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField())
}
@@ -1479,6 +1655,18 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er
return nil, err
}
p.Elem().Field(i).Set(reflect.ValueOf(val))
+ case reflect.TypeOf(abi.ChainEpoch(0)):
+ val, err := strconv.ParseInt(args[i], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ p.Elem().Field(i).Set(reflect.ValueOf(abi.ChainEpoch(val)))
+ case reflect.TypeOf(big.Int{}):
+ val, err := big.FromString(args[i])
+ if err != nil {
+ return nil, err
+ }
+ p.Elem().Field(i).Set(reflect.ValueOf(val))
case reflect.TypeOf(peer.ID("")):
pid, err := peer.Decode(args[i])
if err != nil {
@@ -1530,6 +1718,77 @@ var stateCircSupplyCmd = &cli.Command{
},
}
+var stateSectorCmd = &cli.Command{
+ Name: "sector",
+ Usage: "Get miner sector info",
+ ArgsUsage: "[miner address] [sector number]",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+
+ if cctx.Args().Len() != 2 {
+ return xerrors.Errorf("expected 2 params")
+ }
+
+ ts, err := LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ if ts == nil {
+ ts, err = api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ maddr, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ sid, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ si, err := api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(sid), ts.Key())
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("SectorNumber: ", si.SectorNumber)
+ fmt.Println("SealProof: ", si.SealProof)
+ fmt.Println("SealedCID: ", si.SealedCID)
+ fmt.Println("DealIDs: ", si.DealIDs)
+ fmt.Println()
+ fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation))
+ fmt.Println("Expiration: ", EpochTime(ts.Height(), si.Expiration))
+ fmt.Println()
+ fmt.Println("DealWeight: ", si.DealWeight)
+ fmt.Println("VerifiedDealWeight: ", si.VerifiedDealWeight)
+ fmt.Println("InitialPledge: ", types.FIL(si.InitialPledge))
+ fmt.Println("ExpectedDayReward: ", types.FIL(si.ExpectedDayReward))
+ fmt.Println("ExpectedStoragePledge: ", types.FIL(si.ExpectedStoragePledge))
+ fmt.Println()
+
+ sp, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(sid), ts.Key())
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Deadline: ", sp.Deadline)
+ fmt.Println("Partition: ", sp.Partition)
+
+ return nil
+ },
+}
+
var stateMarketCmd = &cli.Command{
Name: "market",
Usage: "Inspect the storage market actor",
diff --git a/cli/sync.go b/cli/sync.go
index 27957ac35..dea96d14e 100644
--- a/cli/sync.go
+++ b/cli/sync.go
@@ -5,13 +5,14 @@ import (
"fmt"
"time"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ "github.com/filecoin-project/go-state-types/abi"
cid "github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain"
)
var syncCmd = &cli.Command{
@@ -21,7 +22,9 @@ var syncCmd = &cli.Command{
syncStatusCmd,
syncWaitCmd,
syncMarkBadCmd,
+ syncUnmarkBadCmd,
syncCheckBadCmd,
+ syncCheckpointCmd,
},
}
@@ -61,7 +64,7 @@ var syncStatusCmd = &cli.Command{
fmt.Printf("\tBase:\t%s\n", base)
fmt.Printf("\tTarget:\t%s (%d)\n", target, theight)
fmt.Printf("\tHeight diff:\t%d\n", heightDiff)
- fmt.Printf("\tStage: %s\n", chain.SyncStageString(ss.Stage))
+ fmt.Printf("\tStage: %s\n", ss.Stage)
fmt.Printf("\tHeight: %d\n", ss.Height)
if ss.End.IsZero() {
if !ss.Start.IsZero() {
@@ -118,6 +121,31 @@ var syncMarkBadCmd = &cli.Command{
},
}
+var syncUnmarkBadCmd = &cli.Command{
+ Name: "unmark-bad",
+ Usage: "Unmark the given block as bad, makes it possible to sync to a chain containing it",
+ ArgsUsage: "[blockCid]",
+ Action: func(cctx *cli.Context) error {
+ napi, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must specify block cid to unmark")
+ }
+
+ bcid, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("failed to decode input as a cid: %s", err)
+ }
+
+ return napi.SyncUnmarkBad(ctx, bcid)
+ },
+}
+
var syncCheckBadCmd = &cli.Command{
Name: "check-bad",
Usage: "check if the given block was marked bad, and for what reason",
@@ -154,7 +182,65 @@ var syncCheckBadCmd = &cli.Command{
},
}
+var syncCheckpointCmd = &cli.Command{
+ Name: "checkpoint",
+ Usage: "mark a certain tipset as checkpointed; the node will never fork away from this tipset",
+ ArgsUsage: "[tipsetKey]",
+ Flags: []cli.Flag{
+ &cli.Uint64Flag{
+ Name: "epoch",
+ Usage: "checkpoint the tipset at the given epoch",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ napi, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ var ts *types.TipSet
+
+ if cctx.IsSet("epoch") {
+ ts, err = napi.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK)
+ }
+ if ts == nil {
+ ts, err = parseTipSet(ctx, napi, cctx.Args().Slice())
+ }
+ if err != nil {
+ return err
+ }
+
+ if ts == nil {
+ return fmt.Errorf("must pass cids for tipset to set as head, or specify epoch flag")
+ }
+
+ if err := napi.SyncCheckpoint(ctx, ts.Key()); err != nil {
+ return err
+ }
+
+ return nil
+ },
+}
+
func SyncWait(ctx context.Context, napi api.FullNode) error {
+ tick := time.Second / 4
+
+ lastLines := 0
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ samples := 8
+ i := 0
+ var firstApp, app, lastApp uint64
+
+ state, err := napi.SyncState(ctx)
+ if err != nil {
+ return err
+ }
+ firstApp = state.VMApplied
+
for {
state, err := napi.SyncState(ctx)
if err != nil {
@@ -179,14 +265,41 @@ func SyncWait(ctx context.Context, napi api.FullNode) error {
ss := state.ActiveSyncs[working]
+ var baseHeight abi.ChainEpoch
var target []cid.Cid
var theight abi.ChainEpoch
+ var heightDiff int64
+
+ if ss.Base != nil {
+ baseHeight = ss.Base.Height()
+ heightDiff = int64(ss.Base.Height())
+ }
if ss.Target != nil {
target = ss.Target.Cids()
theight = ss.Target.Height()
+ heightDiff = int64(ss.Target.Height()) - heightDiff
+ } else {
+ heightDiff = 0
}
- fmt.Printf("\r\x1b[2KWorker %d: Target Height: %d\tTarget: %s\tState: %s\tHeight: %d", working, theight, target, chain.SyncStageString(ss.Stage), ss.Height)
+ for i := 0; i < lastLines; i++ {
+ fmt.Print("\r\x1b[2K\x1b[A")
+ }
+
+ fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", working, baseHeight, theight, heightDiff)
+ fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height)
+ lastLines = 2
+
+ if i%samples == 0 {
+ lastApp = app
+ app = state.VMApplied - firstApp
+ }
+ if i > 0 {
+ fmt.Printf("Validated %d messages (%d per second)\n", state.VMApplied-firstApp, (app-lastApp)*uint64(time.Second/tick)/uint64(samples))
+ lastLines++
+ }
+
+ _ = target // todo: maybe print? (creates a bunch of line wrapping issues with most tipsets)
if time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
fmt.Println("\nDone!")
@@ -197,7 +310,9 @@ func SyncWait(ctx context.Context, napi api.FullNode) error {
case <-ctx.Done():
fmt.Println("\nExit by user")
return nil
- case <-build.Clock.After(1 * time.Second):
+ case <-ticker.C:
}
+
+ i++
}
}
diff --git a/cli/util.go b/cli/util.go
new file mode 100644
index 000000000..fb555e320
--- /dev/null
+++ b/cli/util.go
@@ -0,0 +1,48 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/hako/durafmt"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
+ var headers []*types.BlockHeader
+ for _, c := range vals {
+ blkc, err := cid.Decode(c)
+ if err != nil {
+ return nil, err
+ }
+
+ bh, err := api.ChainGetBlock(ctx, blkc)
+ if err != nil {
+ return nil, err
+ }
+
+ headers = append(headers, bh)
+ }
+
+ return types.NewTipSet(headers)
+}
+
+func EpochTime(curr, e abi.ChainEpoch) string {
+ switch {
+ case curr > e:
+ return fmt.Sprintf("%d (%s ago)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2))
+ case curr == e:
+ return fmt.Sprintf("%d (now)", e)
+ case curr < e:
+ return fmt.Sprintf("%d (in %s)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2))
+ }
+
+ panic("math broke")
+}
diff --git a/cli/wallet.go b/cli/wallet.go
index 025e3a7b6..aa5b9bed3 100644
--- a/cli/wallet.go
+++ b/cli/wallet.go
@@ -9,13 +9,16 @@ import (
"os"
"strings"
- "github.com/filecoin-project/go-address"
- types "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/wallet"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/urfave/cli/v2"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ types "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/wallet"
+ "github.com/filecoin-project/lotus/lib/tablewriter"
)
var walletCmd = &cli.Command{
@@ -66,6 +69,13 @@ var walletNew = &cli.Command{
var walletList = &cli.Command{
Name: "list",
Usage: "List wallet address",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "addr-only",
+ Usage: "Only print addresses",
+ Aliases: []string{"a"},
+ },
+ },
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@@ -79,9 +89,52 @@ var walletList = &cli.Command{
return err
}
+ // Assume an error means no default key is set
+ def, _ := api.WalletDefaultAddress(ctx)
+
+ tw := tablewriter.New(
+ tablewriter.Col("Address"),
+ tablewriter.Col("Balance"),
+ tablewriter.Col("Nonce"),
+ tablewriter.Col("Default"),
+ tablewriter.NewLineCol("Error"))
+
for _, addr := range addrs {
- fmt.Println(addr.String())
+ if cctx.Bool("addr-only") {
+ fmt.Println(addr.String())
+ } else {
+ a, err := api.StateGetActor(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ if !strings.Contains(err.Error(), "actor not found") {
+ tw.Write(map[string]interface{}{
+ "Address": addr,
+ "Error": err,
+ })
+ continue
+ }
+
+ a = &types.Actor{
+ Balance: big.Zero(),
+ }
+ }
+
+ row := map[string]interface{}{
+ "Address": addr,
+ "Balance": types.FIL(a.Balance),
+ "Nonce": a.Nonce,
+ }
+ if addr == def {
+ row["Default"] = "X"
+ }
+
+ tw.Write(row)
+ }
}
+
+ if !cctx.Bool("addr-only") {
+ return tw.Flush(os.Stdout)
+ }
+
return nil
},
}
@@ -382,7 +435,11 @@ var walletVerify = &cli.Command{
return err
}
- if api.WalletVerify(ctx, addr, msg, &sig) {
+ ok, err := api.WalletVerify(ctx, addr, msg, &sig)
+ if err != nil {
+ return err
+ }
+ if ok {
fmt.Println("valid")
return nil
}
diff --git a/cmd/chain-noise/main.go b/cmd/chain-noise/main.go
index 9e9ac2e49..7b9824016 100644
--- a/cmd/chain-noise/main.go
+++ b/cmd/chain-noise/main.go
@@ -7,7 +7,7 @@ import (
"os"
"time"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go
new file mode 100644
index 000000000..51ab696f7
--- /dev/null
+++ b/cmd/lotus-bench/caching_verifier.go
@@ -0,0 +1,98 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "errors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+ "github.com/ipfs/go-datastore"
+ "github.com/minio/blake2b-simd"
+ cbg "github.com/whyrusleeping/cbor-gen"
+)
+
+type cachingVerifier struct {
+ ds datastore.Datastore
+ backend ffiwrapper.Verifier
+}
+
+const bufsize = 128
+
+func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBORMarshaler) (bool, error) {
+ hasher := blake2b.New256()
+ wr := bufio.NewWriterSize(hasher, bufsize)
+ err := param.MarshalCBOR(wr)
+ if err != nil {
+ log.Errorf("could not marshal call info: %+v", err)
+ return execute()
+ }
+ err = wr.Flush()
+ if err != nil {
+ log.Errorf("could not flush: %+v", err)
+ return execute()
+ }
+ hash := hasher.Sum(nil)
+ key := datastore.NewKey(string(hash))
+ fromDs, err := cv.ds.Get(key)
+ if err == nil {
+ switch fromDs[0] {
+ case 's':
+ return true, nil
+ case 'f':
+ return false, nil
+ case 'e':
+ return false, errors.New(string(fromDs[1:]))
+ default:
+ log.Errorf("bad cached result in cache %s(%x)", fromDs[0], fromDs[0])
+ return execute()
+ }
+ } else if errors.Is(err, datastore.ErrNotFound) {
+ // recalc
+ ok, err := execute()
+ var save []byte
+ if err != nil {
+ if ok {
+ log.Errorf("success with an error: %+v", err)
+ } else {
+ save = append([]byte{'e'}, []byte(err.Error())...)
+ }
+ } else if ok {
+ save = []byte{'s'}
+ } else {
+ save = []byte{'f'}
+ }
+
+ if len(save) != 0 {
+ errSave := cv.ds.Put(key, save)
+ if errSave != nil {
+ log.Errorf("error saving result: %+v", errSave)
+ }
+ }
+
+ return ok, err
+ } else {
+ log.Errorf("could not get data from cache: %+v", err)
+ return execute()
+ }
+}
+
+func (cv *cachingVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
+ return cv.withCache(func() (bool, error) {
+ return cv.backend.VerifySeal(svi)
+ }, &svi)
+}
+func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
+ return cv.backend.VerifyWinningPoSt(ctx, info)
+}
+func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
+ return cv.withCache(func() (bool, error) {
+ return cv.backend.VerifyWindowPoSt(ctx, info)
+ }, &info)
+}
+func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, a abi.ActorID, rnd abi.PoStRandomness, u uint64) ([]uint64, error) {
+ return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u)
+}
+
+var _ ffiwrapper.Verifier = (*cachingVerifier)(nil)
diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go
index 7400cd92e..3d93b0e5e 100644
--- a/cmd/lotus-bench/import.go
+++ b/cmd/lotus-bench/import.go
@@ -16,6 +16,8 @@ import (
"sort"
"time"
+ "github.com/cockroachdb/pebble"
+ "github.com/cockroachdb/pebble/bloom"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@@ -24,12 +26,16 @@ import (
"github.com/filecoin-project/lotus/lib/blockstore"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
+ "github.com/ipld/go-car"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ bdg "github.com/dgraph-io/badger/v2"
"github.com/ipfs/go-datastore"
badger "github.com/ipfs/go-ds-badger2"
+ pebbleds "github.com/ipfs/go-ds-pebble"
+
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
@@ -56,6 +62,33 @@ var importBenchCmd = &cli.Command{
Usage: "set the parallelism factor for batch seal verification",
Value: runtime.NumCPU(),
},
+ &cli.StringFlag{
+ Name: "repodir",
+ Usage: "set the repo directory for the lotus bench run (defaults to /tmp)",
+ },
+ &cli.StringFlag{
+ Name: "syscall-cache",
+ Usage: "read and write syscall results from datastore",
+ },
+ &cli.BoolFlag{
+ Name: "export-traces",
+ Usage: "should we export execution traces",
+ Value: true,
+ },
+ &cli.BoolFlag{
+ Name: "no-import",
+ Usage: "should we import the chain? if set to true chain has to be previously imported",
+ },
+ &cli.BoolFlag{
+ Name: "global-profile",
+ Value: true,
+ },
+ &cli.Int64Flag{
+ Name: "start-at",
+ },
+ &cli.BoolFlag{
+ Name: "only-import",
+ },
},
Action: func(cctx *cli.Context) error {
vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads")
@@ -70,38 +103,120 @@ var importBenchCmd = &cli.Command{
}
defer cfi.Close() //nolint:errcheck // read only file
- tdir, err := ioutil.TempDir("", "lotus-import-bench")
- if err != nil {
- return err
+ go func() {
+ http.ListenAndServe("localhost:6060", nil) //nolint:errcheck
+ }()
+
+ var tdir string
+ if rdir := cctx.String("repodir"); rdir != "" {
+ tdir = rdir
+ } else {
+ tmp, err := ioutil.TempDir("", "lotus-import-bench")
+ if err != nil {
+ return err
+ }
+ tdir = tmp
}
- bds, err := badger.NewDatastore(tdir, nil)
+ bdgOpt := badger.DefaultOptions
+ bdgOpt.GcInterval = 0
+ bdgOpt.Options = bdg.DefaultOptions("")
+ bdgOpt.Options.SyncWrites = false
+ bdgOpt.Options.Truncate = true
+ bdgOpt.Options.DetectConflicts = false
+
+ var bds datastore.Batching
+ if false {
+ cache := 512
+ bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{
+ // Pebble has a single combined cache area and the write
+ // buffers are taken from this too. Assign all available
+ // memory allowance for cache.
+ Cache: pebble.NewCache(int64(cache * 1024 * 1024)),
+ // The size of memory table(as well as the write buffer).
+ // Note, there may have more than two memory tables in the system.
+ // MemTableStopWritesThreshold can be configured to avoid the memory abuse.
+ MemTableSize: cache * 1024 * 1024 / 4,
+ // The default compaction concurrency(1 thread),
+ // Here use all available CPUs for faster compaction.
+ MaxConcurrentCompactions: runtime.NumCPU(),
+ // Per-level options. Options for at least one level must be specified. The
+ // options for the last level are used for all subsequent levels.
+ Levels: []pebble.LevelOptions{
+ {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10), Compression: pebble.NoCompression},
+ },
+ Logger: log,
+ })
+ } else {
+ bds, err = badger.NewDatastore(tdir, &bdgOpt)
+ }
if err != nil {
return err
}
+ defer bds.Close() //nolint:errcheck
+
bs := blockstore.NewBlockstore(bds)
- cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, blockstore.DefaultCacheOpts())
+ cacheOpts := blockstore.DefaultCacheOpts()
+ cacheOpts.HasBloomFilterSize = 0
+
+ cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, cacheOpts)
if err != nil {
return err
}
bs = cbs
ds := datastore.NewMapDatastore()
- cs := store.NewChainStore(bs, ds, vm.Syscalls(ffiwrapper.ProofVerifier))
+
+ var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier
+ if cctx.IsSet("syscall-cache") {
+ scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt)
+ if err != nil {
+ return xerrors.Errorf("opening syscall-cache datastore: %w", err)
+ }
+ defer scds.Close() //nolint:errcheck
+
+ verifier = &cachingVerifier{
+ ds: scds,
+ backend: verifier,
+ }
+ }
+ if cctx.Bool("only-gc") {
+ return nil
+ }
+
+ cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier))
stm := stmgr.NewStateManager(cs)
- prof, err := os.Create("import-bench.prof")
- if err != nil {
- return err
- }
- defer prof.Close() //nolint:errcheck
+ if cctx.Bool("global-profile") {
+ prof, err := os.Create("import-bench.prof")
+ if err != nil {
+ return err
+ }
+ defer prof.Close() //nolint:errcheck
- if err := pprof.StartCPUProfile(prof); err != nil {
- return err
+ if err := pprof.StartCPUProfile(prof); err != nil {
+ return err
+ }
}
- head, err := cs.Import(cfi)
- if err != nil {
- return err
+ var head *types.TipSet
+ if !cctx.Bool("no-import") {
+ head, err = cs.Import(cfi)
+ if err != nil {
+ return err
+ }
+ } else {
+ cr, err := car.NewCarReader(cfi)
+ if err != nil {
+ return err
+ }
+ head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...))
+ if err != nil {
+ return err
+ }
+ }
+
+ if cctx.Bool("only-import") {
+ return nil
}
gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true)
@@ -114,6 +229,20 @@ var importBenchCmd = &cli.Command{
return err
}
+ startEpoch := abi.ChainEpoch(1)
+ if cctx.IsSet("start-at") {
+ startEpoch = abi.ChainEpoch(cctx.Int64("start-at"))
+ start, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(cctx.Int64("start-at")), head, true)
+ if err != nil {
+ return err
+ }
+
+ err = cs.SetHead(start)
+ if err != nil {
+ return err
+ }
+ }
+
if h := cctx.Int64("height"); h != 0 {
tsh, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true)
if err != nil {
@@ -124,7 +253,7 @@ var importBenchCmd = &cli.Command{
ts := head
tschain := []*types.TipSet{ts}
- for ts.Height() != 0 {
+ for ts.Height() > startEpoch {
next, err := cs.LoadTipSet(ts.Parents())
if err != nil {
return err
@@ -134,45 +263,48 @@ var importBenchCmd = &cli.Command{
ts = next
}
- ibj, err := os.Create("import-bench.json")
- if err != nil {
- return err
+ var enc *json.Encoder
+ if cctx.Bool("export-traces") {
+ ibj, err := os.Create("import-bench.json")
+ if err != nil {
+ return err
+ }
+ defer ibj.Close() //nolint:errcheck
+
+ enc = json.NewEncoder(ibj)
}
- defer ibj.Close() //nolint:errcheck
- enc := json.NewEncoder(ibj)
-
- var lastTse *TipSetExec
-
- lastState := tschain[len(tschain)-1].ParentState()
- for i := len(tschain) - 2; i >= 0; i-- {
+ for i := len(tschain) - 1; i >= 1; i-- {
cur := tschain[i]
+ start := time.Now()
log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids())
- if cur.ParentState() != lastState {
- lastTrace := lastTse.Trace
+ st, trace, err := stm.ExecutionTrace(context.TODO(), cur)
+ if err != nil {
+ return err
+ }
+ tse := &TipSetExec{
+ TipSet: cur.Key(),
+ Trace: trace,
+ Duration: time.Since(start),
+ }
+ if enc != nil {
+ stripCallers(tse.Trace)
+
+ if err := enc.Encode(tse); err != nil {
+ return xerrors.Errorf("failed to write out tipsetexec: %w", err)
+ }
+ }
+ if tschain[i-1].ParentState() != st {
+ stripCallers(tse.Trace)
+ lastTrace := tse.Trace
d, err := json.MarshalIndent(lastTrace, "", " ")
if err != nil {
panic(err)
}
fmt.Println("TRACE")
fmt.Println(string(d))
- return xerrors.Errorf("tipset chain had state mismatch at height %d (%s != %s)", cur.Height(), cur.ParentState(), lastState)
- }
- start := time.Now()
- st, trace, err := stm.ExecutionTrace(context.TODO(), cur)
- if err != nil {
- return err
- }
- stripCallers(trace)
-
- lastTse = &TipSetExec{
- TipSet: cur.Key(),
- Trace: trace,
- Duration: time.Since(start),
- }
- lastState = st
- if err := enc.Encode(lastTse); err != nil {
- return xerrors.Errorf("failed to write out tipsetexec: %w", err)
+ //fmt.Println(statediff.Diff(context.Background(), bs, tschain[i-1].ParentState(), st, statediff.ExpandActors))
+ return xerrors.Errorf("tipset chain had state mismatch at height %d (%s != %s)", cur.Height(), cur.ParentState(), st)
}
}
diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go
index 694987f27..e409dfe5a 100644
--- a/cmd/lotus-bench/main.go
+++ b/cmd/lotus-bench/main.go
@@ -11,6 +11,8 @@ import (
"path/filepath"
"time"
+ saproof "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/docker/go-units"
logging "github.com/ipfs/go-log/v2"
"github.com/minio/blake2b-simd"
@@ -20,16 +22,16 @@ import (
"github.com/filecoin-project/go-address"
paramfetch "github.com/filecoin-project/go-paramfetch"
+ "github.com/filecoin-project/go-state-types/abi"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-storage/storage"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/genesis"
)
@@ -74,8 +76,6 @@ func main() {
log.Info("Starting lotus-bench")
- miner.SupportedProofTypes[abi.RegisteredSealProof_StackedDrg2KiBV1] = struct{}{}
-
app := &cli.App{
Name: "lotus-bench",
Usage: "Benchmark performance of lotus on your hardware",
@@ -145,6 +145,8 @@ var sealBenchCmd = &cli.Command{
},
},
Action: func(c *cli.Context) error {
+ policy.AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+
if c.Bool("no-gpu") {
err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
@@ -235,7 +237,7 @@ var sealBenchCmd = &cli.Command{
}
var sealTimings []SealingResult
- var sealedSectors []abi.SectorInfo
+ var sealedSectors []saproof.SectorInfo
if robench == "" {
var err error
@@ -278,7 +280,7 @@ var sealBenchCmd = &cli.Command{
}
for _, s := range genm.Sectors {
- sealedSectors = append(sealedSectors, abi.SectorInfo{
+ sealedSectors = append(sealedSectors, saproof.SectorInfo{
SealedCID: s.CommR,
SectorNumber: s.SectorID,
SealProof: s.ProofType,
@@ -303,7 +305,7 @@ var sealBenchCmd = &cli.Command{
return err
}
- candidates := make([]abi.SectorInfo, len(fcandidates))
+ candidates := make([]saproof.SectorInfo, len(fcandidates))
for i, fcandidate := range fcandidates {
candidates[i] = sealedSectors[fcandidate]
}
@@ -326,7 +328,7 @@ var sealBenchCmd = &cli.Command{
winnningpost2 := time.Now()
- pvi1 := abi.WinningPoStVerifyInfo{
+ pvi1 := saproof.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]),
Proofs: proof1,
ChallengedSectors: candidates,
@@ -342,7 +344,7 @@ var sealBenchCmd = &cli.Command{
verifyWinningPost1 := time.Now()
- pvi2 := abi.WinningPoStVerifyInfo{
+ pvi2 := saproof.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]),
Proofs: proof2,
ChallengedSectors: candidates,
@@ -374,7 +376,7 @@ var sealBenchCmd = &cli.Command{
windowpost2 := time.Now()
- wpvi1 := abi.WindowPoStVerifyInfo{
+ wpvi1 := saproof.WindowPoStVerifyInfo{
Randomness: challenge[:],
Proofs: wproof1,
ChallengedSectors: sealedSectors,
@@ -385,12 +387,12 @@ var sealBenchCmd = &cli.Command{
return err
}
if !ok {
- log.Error("post verification failed")
+ log.Error("window post verification failed")
}
verifyWindowpost1 := time.Now()
- wpvi2 := abi.WindowPoStVerifyInfo{
+ wpvi2 := saproof.WindowPoStVerifyInfo{
Randomness: challenge[:],
Proofs: wproof2,
ChallengedSectors: sealedSectors,
@@ -401,7 +403,7 @@ var sealBenchCmd = &cli.Command{
return err
}
if !ok {
- log.Error("post verification failed")
+ log.Error("window post verification failed")
}
verifyWindowpost2 := time.Now()
@@ -462,10 +464,10 @@ type ParCfg struct {
Commit int
}
-func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []abi.SectorInfo, error) {
+func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof.SectorInfo, error) {
var pieces []abi.PieceInfo
sealTimings := make([]SealingResult, numSectors)
- sealedSectors := make([]abi.SectorInfo, numSectors)
+ sealedSectors := make([]saproof.SectorInfo, numSectors)
preCommit2Sema := make(chan struct{}, par.PreCommit2)
commitSema := make(chan struct{}, par.Commit)
@@ -535,7 +537,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
precommit2 := time.Now()
<-preCommit2Sema
- sealedSectors[ix] = abi.SectorInfo{
+ sealedSectors[ix] = saproof.SectorInfo{
SealProof: sb.SealProofType(),
SectorNumber: i,
SealedCID: cids.Sealed,
@@ -587,7 +589,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
<-commitSema
if !skipc2 {
- svi := abi.SealVerifyInfo{
+ svi := saproof.SealVerifyInfo{
SectorID: abi.SectorID{Miner: mid, Number: i},
SealedCID: cids.Sealed,
SealProof: sb.SealProofType(),
diff --git a/cmd/lotus-chainwatch/processor/common_actors.go b/cmd/lotus-chainwatch/processor/common_actors.go
index d6aec7f90..0f2c0d2ea 100644
--- a/cmd/lotus-chainwatch/processor/common_actors.go
+++ b/cmd/lotus-chainwatch/processor/common_actors.go
@@ -1,7 +1,6 @@
package processor
import (
- "bytes"
"context"
"time"
@@ -9,14 +8,16 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ _init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/types"
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- _init "github.com/filecoin-project/specs-actors/actors/builtin/init"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
- "github.com/ipfs/go-cid"
- typegen "github.com/whyrusleeping/cbor-gen"
)
func (p *Processor) setupCommonActors() error {
@@ -137,45 +138,30 @@ func (p Processor) storeActorAddresses(ctx context.Context, actors map[cid.Cid]A
addressToID := map[address.Address]address.Address{}
// HACK until genesis storage is figured out:
- addressToID[builtin.SystemActorAddr] = builtin.SystemActorAddr
- addressToID[builtin.InitActorAddr] = builtin.InitActorAddr
- addressToID[builtin.RewardActorAddr] = builtin.RewardActorAddr
- addressToID[builtin.CronActorAddr] = builtin.CronActorAddr
- addressToID[builtin.StoragePowerActorAddr] = builtin.StoragePowerActorAddr
- addressToID[builtin.StorageMarketActorAddr] = builtin.StorageMarketActorAddr
- addressToID[builtin.VerifiedRegistryActorAddr] = builtin.VerifiedRegistryActorAddr
- addressToID[builtin.BurntFundsActorAddr] = builtin.BurntFundsActorAddr
- initActor, err := p.node.StateGetActor(ctx, builtin.InitActorAddr, types.EmptyTSK)
+ addressToID[builtin2.SystemActorAddr] = builtin2.SystemActorAddr
+ addressToID[builtin2.InitActorAddr] = builtin2.InitActorAddr
+ addressToID[builtin2.RewardActorAddr] = builtin2.RewardActorAddr
+ addressToID[builtin2.CronActorAddr] = builtin2.CronActorAddr
+ addressToID[builtin2.StoragePowerActorAddr] = builtin2.StoragePowerActorAddr
+ addressToID[builtin2.StorageMarketActorAddr] = builtin2.StorageMarketActorAddr
+ addressToID[builtin2.VerifiedRegistryActorAddr] = builtin2.VerifiedRegistryActorAddr
+ addressToID[builtin2.BurntFundsActorAddr] = builtin2.BurntFundsActorAddr
+ initActor, err := p.node.StateGetActor(ctx, builtin2.InitActorAddr, types.EmptyTSK)
if err != nil {
return err
}
- initActorRaw, err := p.node.ChainReadObj(ctx, initActor.Head)
- if err != nil {
- return err
- }
-
- var initActorState _init.State
- if err := initActorState.UnmarshalCBOR(bytes.NewReader(initActorRaw)); err != nil {
- return err
- }
- ctxStore := cw_util.NewAPIIpldStore(ctx, p.node)
- addrMap, err := adt.AsMap(ctxStore, initActorState.AddressMap)
+ initActorState, err := _init.Load(cw_util.NewAPIIpldStore(ctx, p.node), initActor)
if err != nil {
return err
}
// gross..
- var actorID typegen.CborInt
- if err := addrMap.ForEach(&actorID, func(key string) error {
- longAddr, err := address.NewFromBytes([]byte(key))
+ if err := initActorState.ForEachActor(func(id abi.ActorID, addr address.Address) error {
+ idAddr, err := address.NewIDAddress(uint64(id))
if err != nil {
return err
}
- shortAddr, err := address.NewIDAddress(uint64(actorID))
- if err != nil {
- return err
- }
- addressToID[longAddr] = shortAddr
+ addressToID[addr] = idAddr
return nil
}); err != nil {
return err
diff --git a/cmd/lotus-chainwatch/processor/market.go b/cmd/lotus-chainwatch/processor/market.go
index e50ec3076..17aa1c37b 100644
--- a/cmd/lotus-chainwatch/processor/market.go
+++ b/cmd/lotus-chainwatch/processor/market.go
@@ -8,6 +8,7 @@ import (
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/events/state"
)
@@ -293,7 +294,7 @@ func (p *Processor) updateMarketActorDealProposals(ctx context.Context, marketTi
if !changed {
continue
}
- changes, ok := val.(*state.MarketDealStateChanges)
+ changes, ok := val.(*market.DealStateChanges)
if !ok {
return xerrors.Errorf("Unknown type returned by Deal State AMT predicate: %T", val)
}
diff --git a/cmd/lotus-chainwatch/processor/miner.go b/cmd/lotus-chainwatch/processor/miner.go
index 13f637237..3a37a82f8 100644
--- a/cmd/lotus-chainwatch/processor/miner.go
+++ b/cmd/lotus-chainwatch/processor/miner.go
@@ -1,9 +1,7 @@
package processor
import (
- "bytes"
"context"
- "fmt"
"strings"
"time"
@@ -13,15 +11,15 @@ import (
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/events/state"
+ "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
)
@@ -168,11 +166,11 @@ type SectorDealEvent struct {
}
type PartitionStatus struct {
- Terminated abi.BitField
- Expired abi.BitField
- Faulted abi.BitField
- InRecovery abi.BitField
- Recovered abi.BitField
+ Terminated bitfield.BitField
+ Expired bitfield.BitField
+ Faulted bitfield.BitField
+ InRecovery bitfield.BitField
+ Recovered bitfield.BitField
}
type minerActorInfo struct {
@@ -204,11 +202,13 @@ func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSe
log.Debugw("Processed Miners", "duration", time.Since(start).String())
}()
+ stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node))
+
var out []minerActorInfo
// TODO add parallel calls if this becomes slow
for tipset, miners := range minerTips {
// get the power actors claims map
- minersClaims, err := getPowerActorClaimsMap(ctx, p.node, tipset)
+ powerState, err := getPowerActorState(ctx, p.node, tipset)
if err != nil {
return nil, err
}
@@ -218,10 +218,9 @@ func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSe
var mi minerActorInfo
mi.common = act
- var claim power.Claim
// get miner claim from power actors claim map and store if found, else the miner had no claim at
// this tipset
- found, err := minersClaims.Get(adt.AddrKey(act.addr), &claim)
+ claim, found, err := powerState.MinerPower(act.addr)
if err != nil {
return nil, err
}
@@ -230,15 +229,13 @@ func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSe
mi.rawPower = claim.RawBytePower
}
- // Get the miner state info
- astb, err := p.node.ChainReadObj(ctx, act.act.Head)
+ // Get the miner state
+ mas, err := miner.Load(stor, &act.act)
if err != nil {
log.Warnw("failed to find miner actor state", "address", act.addr, "error", err)
continue
}
- if err := mi.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
- return nil, err
- }
+ mi.state = mas
out = append(out, mi)
}
}
@@ -322,11 +319,6 @@ func (p *Processor) storeMinerPreCommitInfo(ctx context.Context, miners []minerA
for _, m := range miners {
m := m
grp.Go(func() error {
- minerSectors, err := adt.AsArray(p.ctxStore, m.state.Sectors)
- if err != nil {
- return err
- }
-
changes, err := p.getMinerPreCommitChanges(ctx, m)
if err != nil {
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
@@ -399,10 +391,12 @@ func (p *Processor) storeMinerPreCommitInfo(ctx context.Context, miners []minerA
}
var preCommitExpired []uint64
for _, removed := range changes.Removed {
- var sector miner.SectorOnChainInfo
- if found, err := minerSectors.Get(uint64(removed.Info.SectorNumber), §or); err != nil {
+ // TODO: we can optimize this to not load the AMT every time, if necessary.
+ si, err := m.state.GetSector(removed.Info.SectorNumber)
+ if err != nil {
return err
- } else if !found {
+ }
+ if si == nil {
preCommitExpired = append(preCommitExpired, uint64(removed.Info.SectorNumber))
}
}
@@ -653,21 +647,12 @@ func (p *Processor) storeMinerSectorEvents(ctx context.Context, sectorEvents, pr
func (p *Processor) getMinerStateAt(ctx context.Context, maddr address.Address, tskey types.TipSetKey) (miner.State, error) {
prevActor, err := p.node.StateGetActor(ctx, maddr, tskey)
if err != nil {
- return miner.State{}, err
+ return nil, err
}
- var out miner.State
- // Get the miner state info
- astb, err := p.node.ChainReadObj(ctx, prevActor.Head)
- if err != nil {
- return miner.State{}, err
- }
- if err := out.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
- return miner.State{}, err
- }
- return out, nil
+ return miner.Load(store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node)), prevActor)
}
-func (p *Processor) getMinerPreCommitChanges(ctx context.Context, m minerActorInfo) (*state.MinerPreCommitChanges, error) {
+func (p *Processor) getMinerPreCommitChanges(ctx context.Context, m minerActorInfo) (*miner.PreCommitChanges, error) {
pred := state.NewStatePredicates(p.node)
changed, val, err := pred.OnMinerActorChange(m.common.addr, pred.OnMinerPreCommitChange())(ctx, m.common.parentTsKey, m.common.tsKey)
if err != nil {
@@ -676,11 +661,11 @@ func (p *Processor) getMinerPreCommitChanges(ctx context.Context, m minerActorIn
if !changed {
return nil, nil
}
- out := val.(*state.MinerPreCommitChanges)
+ out := val.(*miner.PreCommitChanges)
return out, nil
}
-func (p *Processor) getMinerSectorChanges(ctx context.Context, m minerActorInfo) (*state.MinerSectorChanges, error) {
+func (p *Processor) getMinerSectorChanges(ctx context.Context, m minerActorInfo) (*miner.SectorChanges, error) {
pred := state.NewStatePredicates(p.node)
changed, val, err := pred.OnMinerActorChange(m.common.addr, pred.OnMinerSectorChange())(ctx, m.common.parentTsKey, m.common.tsKey)
if err != nil {
@@ -689,188 +674,209 @@ func (p *Processor) getMinerSectorChanges(ctx context.Context, m minerActorInfo)
if !changed {
return nil, nil
}
- out := val.(*state.MinerSectorChanges)
+ out := val.(*miner.SectorChanges)
return out, nil
}
func (p *Processor) diffMinerPartitions(ctx context.Context, m minerActorInfo, events chan<- *MinerSectorsEvent) error {
- prevMiner, err := p.getMinerStateAt(ctx, m.common.addr, m.common.tsKey)
+ prevMiner, err := p.getMinerStateAt(ctx, m.common.addr, m.common.parentTsKey)
if err != nil {
return err
}
- dlIdx := prevMiner.CurrentDeadline
curMiner := m.state
-
- // load the old deadline
- prevDls, err := prevMiner.LoadDeadlines(p.ctxStore)
+ dc, err := prevMiner.DeadlinesChanged(curMiner)
if err != nil {
return err
}
- var prevDl miner.Deadline
- if err := p.ctxStore.Get(ctx, prevDls.Due[dlIdx], &prevDl); err != nil {
- return err
+ if !dc {
+ return nil
}
+ panic("TODO")
- prevPartitions, err := prevDl.PartitionsArray(p.ctxStore)
- if err != nil {
- return err
- }
+ // FIXME: This code doesn't work.
+ // 1. We need to diff all deadlines, not just the "current" deadline.
+ // 2. We need to handle the case where we _add_ a partition. (i.e.,
+ // where len(newPartitions) != len(oldPartitions).
+ /*
- // load the new deadline
- curDls, err := curMiner.LoadDeadlines(p.ctxStore)
- if err != nil {
- return err
- }
+ // NOTE: If we change the number of deadlines in an upgrade, this will
+ // break.
- var curDl miner.Deadline
- if err := p.ctxStore.Get(ctx, curDls.Due[dlIdx], &curDl); err != nil {
- return err
- }
+ // load the old deadline
+ prevDls, err := prevMiner.LoadDeadlines(p.ctxStore)
+ if err != nil {
+ return err
+ }
+ var prevDl miner.Deadline
+ if err := p.ctxStore.Get(ctx, prevDls.Due[dlIdx], &prevDl); err != nil {
+ return err
+ }
- curPartitions, err := curDl.PartitionsArray(p.ctxStore)
- if err != nil {
- return err
- }
+ prevPartitions, err := prevDl.PartitionsArray(p.ctxStore)
+ if err != nil {
+ return err
+ }
- // TODO this can be optimized by inspecting the miner state for partitions that have changed and only inspecting those.
- var prevPart miner.Partition
- if err := prevPartitions.ForEach(&prevPart, func(i int64) error {
- var curPart miner.Partition
- if found, err := curPartitions.Get(uint64(i), &curPart); err != nil {
- return err
- } else if !found {
- log.Fatal("I don't know what this means, are partitions ever removed?")
- }
- partitionDiff, err := p.diffPartition(prevPart, curPart)
- if err != nil {
- return err
- }
+ // load the new deadline
+ curDls, err := curMiner.LoadDeadlines(p.ctxStore)
+ if err != nil {
+ return err
+ }
- recovered, err := partitionDiff.Recovered.All(miner.SectorsMax)
- if err != nil {
- return err
- }
- events <- &MinerSectorsEvent{
- MinerID: m.common.addr,
- StateRoot: m.common.stateroot,
- SectorIDs: recovered,
- Event: SectorRecovered,
- }
- inRecovery, err := partitionDiff.InRecovery.All(miner.SectorsMax)
- if err != nil {
- return err
- }
- events <- &MinerSectorsEvent{
- MinerID: m.common.addr,
- StateRoot: m.common.stateroot,
- SectorIDs: inRecovery,
- Event: SectorRecovering,
- }
- faulted, err := partitionDiff.Faulted.All(miner.SectorsMax)
- if err != nil {
- return err
- }
- events <- &MinerSectorsEvent{
- MinerID: m.common.addr,
- StateRoot: m.common.stateroot,
- SectorIDs: faulted,
- Event: SectorFaulted,
- }
- terminated, err := partitionDiff.Terminated.All(miner.SectorsMax)
- if err != nil {
- return err
- }
- events <- &MinerSectorsEvent{
- MinerID: m.common.addr,
- StateRoot: m.common.stateroot,
- SectorIDs: terminated,
- Event: SectorTerminated,
- }
- expired, err := partitionDiff.Expired.All(miner.SectorsMax)
- if err != nil {
- return err
- }
- events <- &MinerSectorsEvent{
- MinerID: m.common.addr,
- StateRoot: m.common.stateroot,
- SectorIDs: expired,
- Event: SectorExpired,
- }
+ var curDl miner.Deadline
+ if err := p.ctxStore.Get(ctx, curDls.Due[dlIdx], &curDl); err != nil {
+ return err
+ }
+
+ curPartitions, err := curDl.PartitionsArray(p.ctxStore)
+ if err != nil {
+ return err
+ }
+
+ // TODO this can be optimized by inspecting the miner state for partitions that have changed and only inspecting those.
+ var prevPart miner.Partition
+ if err := prevPartitions.ForEach(&prevPart, func(i int64) error {
+ var curPart miner.Partition
+ if found, err := curPartitions.Get(uint64(i), &curPart); err != nil {
+ return err
+ } else if !found {
+ log.Fatal("I don't know what this means, are partitions ever removed?")
+ }
+ partitionDiff, err := p.diffPartition(prevPart, curPart)
+ if err != nil {
+ return err
+ }
+
+ recovered, err := partitionDiff.Recovered.All(miner.SectorsMax)
+ if err != nil {
+ return err
+ }
+ events <- &MinerSectorsEvent{
+ MinerID: m.common.addr,
+ StateRoot: m.common.stateroot,
+ SectorIDs: recovered,
+ Event: SectorRecovered,
+ }
+ inRecovery, err := partitionDiff.InRecovery.All(miner.SectorsMax)
+ if err != nil {
+ return err
+ }
+ events <- &MinerSectorsEvent{
+ MinerID: m.common.addr,
+ StateRoot: m.common.stateroot,
+ SectorIDs: inRecovery,
+ Event: SectorRecovering,
+ }
+ faulted, err := partitionDiff.Faulted.All(miner.SectorsMax)
+ if err != nil {
+ return err
+ }
+ events <- &MinerSectorsEvent{
+ MinerID: m.common.addr,
+ StateRoot: m.common.stateroot,
+ SectorIDs: faulted,
+ Event: SectorFaulted,
+ }
+ terminated, err := partitionDiff.Terminated.All(miner.SectorsMax)
+ if err != nil {
+ return err
+ }
+ events <- &MinerSectorsEvent{
+ MinerID: m.common.addr,
+ StateRoot: m.common.stateroot,
+ SectorIDs: terminated,
+ Event: SectorTerminated,
+ }
+ expired, err := partitionDiff.Expired.All(miner.SectorsMax)
+ if err != nil {
+ return err
+ }
+ events <- &MinerSectorsEvent{
+ MinerID: m.common.addr,
+ StateRoot: m.common.stateroot,
+ SectorIDs: expired,
+ Event: SectorExpired,
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
return nil
- }); err != nil {
- return err
- }
-
- return nil
+ */
}
func (p *Processor) diffPartition(prevPart, curPart miner.Partition) (*PartitionStatus, error) {
- // all the sectors that were in previous but not in current
- allRemovedSectors, err := bitfield.SubtractBitField(prevPart.Sectors, curPart.Sectors)
+ prevLiveSectors, err := prevPart.LiveSectors()
+ if err != nil {
+ return nil, err
+ }
+ curLiveSectors, err := curPart.LiveSectors()
if err != nil {
return nil, err
}
- // list of sectors that were terminated before their expiration.
- terminatedEarlyArr, err := adt.AsArray(p.ctxStore, curPart.EarlyTerminated)
+ removedSectors, err := bitfield.SubtractBitField(prevLiveSectors, curLiveSectors)
if err != nil {
return nil, err
}
- expired := bitfield.New()
- var bf abi.BitField
- if err := terminatedEarlyArr.ForEach(&bf, func(i int64) error {
- // expired = all removals - termination
- expirations, err := bitfield.SubtractBitField(allRemovedSectors, bf)
- if err != nil {
- return err
- }
- // merge with expired sectors from other epochs
- expired, err = bitfield.MergeBitFields(expirations, expired)
- if err != nil {
- return nil
- }
- return nil
- }); err != nil {
- return nil, err
- }
-
- // terminated = all removals - expired
- terminated, err := bitfield.SubtractBitField(allRemovedSectors, expired)
+ prevRecoveries, err := prevPart.RecoveringSectors()
if err != nil {
return nil, err
}
- // faults in current but not previous
- faults, err := bitfield.SubtractBitField(curPart.Recoveries, prevPart.Recoveries)
+ curRecoveries, err := curPart.RecoveringSectors()
if err != nil {
return nil, err
}
- // recoveries in current but not previous
- inRecovery, err := bitfield.SubtractBitField(curPart.Recoveries, prevPart.Recoveries)
+ newRecoveries, err := bitfield.SubtractBitField(curRecoveries, prevRecoveries)
+ if err != nil {
+ return nil, err
+ }
+
+ prevFaults, err := prevPart.FaultySectors()
+ if err != nil {
+ return nil, err
+ }
+
+ curFaults, err := curPart.FaultySectors()
+ if err != nil {
+ return nil, err
+ }
+
+ newFaults, err := bitfield.SubtractBitField(curFaults, prevFaults)
if err != nil {
return nil, err
}
// all current good sectors
- newActiveSectors, err := curPart.ActiveSectors()
+ curActiveSectors, err := curPart.ActiveSectors()
if err != nil {
return nil, err
}
// sectors that were previously fault and are now currently active are considered recovered.
- recovered, err := bitfield.IntersectBitField(prevPart.Faults, newActiveSectors)
+ recovered, err := bitfield.IntersectBitField(prevFaults, curActiveSectors)
if err != nil {
return nil, err
}
+ // TODO: distinguish between "terminated" and "expired" sectors. The
+ // previous code here never had a chance of working in the first place,
+ // so I'm not going to try to replicate it right now.
+ //
+ // How? If the sector expires before it should (according to sector
+ // info) and it wasn't replaced by a pre-commit deleted in this change
+ // set, it was "early terminated".
+
return &PartitionStatus{
- Terminated: terminated,
- Expired: expired,
- Faulted: faults,
- InRecovery: inRecovery,
+ Terminated: bitfield.New(),
+ Expired: removedSectors,
+ Faulted: newFaults,
+ InRecovery: newRecoveries,
Recovered: recovered,
}, nil
}
@@ -1020,22 +1026,10 @@ func (p *Processor) storeMinersPower(miners []minerActorInfo) error {
}
// load the power actor state clam as an adt.Map at the tipset `ts`.
-func getPowerActorClaimsMap(ctx context.Context, api api.FullNode, ts types.TipSetKey) (*adt.Map, error) {
- powerActor, err := api.StateGetActor(ctx, builtin.StoragePowerActorAddr, ts)
+func getPowerActorState(ctx context.Context, api api.FullNode, ts types.TipSetKey) (power.State, error) {
+ powerActor, err := api.StateGetActor(ctx, power.Address, ts)
if err != nil {
return nil, err
}
-
- powerRaw, err := api.ChainReadObj(ctx, powerActor.Head)
- if err != nil {
- return nil, err
- }
-
- var powerActorState power.State
- if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerRaw)); err != nil {
- return nil, fmt.Errorf("failed to unmarshal power actor state: %w", err)
- }
-
- s := cw_util.NewAPIIpldStore(ctx, api)
- return adt.AsMap(s, powerActorState.Claims)
+ return power.Load(cw_util.NewAPIIpldStore(ctx, api), powerActor)
}
diff --git a/cmd/lotus-chainwatch/processor/power.go b/cmd/lotus-chainwatch/processor/power.go
index 6fa03e943..726a46706 100644
--- a/cmd/lotus-chainwatch/processor/power.go
+++ b/cmd/lotus-chainwatch/processor/power.go
@@ -1,16 +1,14 @@
package processor
import (
- "bytes"
"context"
"time"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/util/smoothing"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
)
type powerActorInfo struct {
@@ -22,10 +20,7 @@ type powerActorInfo struct {
totalQualityAdjustedBytesCommitted big.Int
totalPledgeCollateral big.Int
- newRawBytes big.Int
- newQualityAdjustedBytes big.Int
- newPledgeCollateral big.Int
- newQAPowerSmoothed *smoothing.FilterEstimate
+ qaPowerSmoothed builtin.FilterEstimate
minerCount int64
minerCountAboveMinimumPower int64
@@ -44,10 +39,6 @@ create table if not exists chain_power
constraint power_smoothing_estimates_pk
primary key,
- new_raw_bytes_power text not null,
- new_qa_bytes_power text not null,
- new_pledge_collateral text not null,
-
total_raw_bytes_power text not null,
total_raw_bytes_committed text not null,
total_qa_bytes_power text not null,
@@ -92,35 +83,50 @@ func (p *Processor) processPowerActors(ctx context.Context, powerTips ActorTips)
var pw powerActorInfo
pw.common = act
- powerActor, err := p.node.StateGetActor(ctx, builtin.StoragePowerActorAddr, tipset)
+ powerActorState, err := getPowerActorState(ctx, p.node, tipset)
if err != nil {
return nil, xerrors.Errorf("get power state (@ %s): %w", pw.common.stateroot.String(), err)
}
- powerStateRaw, err := p.node.ChainReadObj(ctx, powerActor.Head)
+ totalPower, err := powerActorState.TotalPower()
if err != nil {
- return nil, xerrors.Errorf("read state obj (@ %s): %w", pw.common.stateroot.String(), err)
+ return nil, xerrors.Errorf("failed to compute total power: %w", err)
}
- var powerActorState power.State
- if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerStateRaw)); err != nil {
- return nil, xerrors.Errorf("unmarshal state (@ %s): %w", pw.common.stateroot.String(), err)
+ totalCommitted, err := powerActorState.TotalCommitted()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to compute total committed: %w", err)
}
- pw.totalRawBytes = powerActorState.TotalRawBytePower
- pw.totalRawBytesCommitted = powerActorState.TotalBytesCommitted
- pw.totalQualityAdjustedBytes = powerActorState.TotalQualityAdjPower
- pw.totalQualityAdjustedBytesCommitted = powerActorState.TotalQABytesCommitted
- pw.totalPledgeCollateral = powerActorState.TotalPledgeCollateral
+ totalLocked, err := powerActorState.TotalLocked()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to compute total locked: %w", err)
+ }
- pw.newRawBytes = powerActorState.ThisEpochRawBytePower
- pw.newQualityAdjustedBytes = powerActorState.ThisEpochQualityAdjPower
- pw.newPledgeCollateral = powerActorState.ThisEpochPledgeCollateral
- pw.newQAPowerSmoothed = powerActorState.ThisEpochQAPowerSmoothed
+ powerSmoothed, err := powerActorState.TotalPowerSmoothed()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to determine smoothed power: %w", err)
+ }
- pw.minerCount = powerActorState.MinerCount
- pw.minerCountAboveMinimumPower = powerActorState.MinerAboveMinPowerCount
- out = append(out, pw)
+ // NOTE: this doesn't set new* fields. Previously, we
+ // filled these using ThisEpoch* fields from the actor
+ // state, but these fields are effectively internal
+ // state and don't represent "new" power, as was
+ // assumed.
+
+ participatingMiners, totalMiners, err := powerActorState.MinerCounts()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to count miners: %w", err)
+ }
+
+ pw.totalRawBytes = totalPower.RawBytePower
+ pw.totalQualityAdjustedBytes = totalPower.QualityAdjPower
+ pw.totalRawBytesCommitted = totalCommitted.RawBytePower
+ pw.totalQualityAdjustedBytesCommitted = totalCommitted.QualityAdjPower
+ pw.totalPledgeCollateral = totalLocked
+ pw.qaPowerSmoothed = powerSmoothed
+ pw.minerCountAboveMinimumPower = int64(participatingMiners)
+ pw.minerCount = int64(totalMiners)
}
}
@@ -142,7 +148,7 @@ func (p *Processor) storePowerSmoothingEstimates(powerStates []powerActorInfo) e
return xerrors.Errorf("prep chain_power: %w", err)
}
- stmt, err := tx.Prepare(`copy cp (state_root, new_raw_bytes_power, new_qa_bytes_power, new_pledge_collateral, total_raw_bytes_power, total_raw_bytes_committed, total_qa_bytes_power, total_qa_bytes_committed, total_pledge_collateral, qa_smoothed_position_estimate, qa_smoothed_velocity_estimate, miner_count, minimum_consensus_miner_count) from stdin;`)
+ stmt, err := tx.Prepare(`copy cp (state_root, total_raw_bytes_power, total_raw_bytes_committed, total_qa_bytes_power, total_qa_bytes_committed, total_pledge_collateral, qa_smoothed_position_estimate, qa_smoothed_velocity_estimate, miner_count, minimum_consensus_miner_count) from stdin;`)
if err != nil {
return xerrors.Errorf("prepare tmp chain_power: %w", err)
}
@@ -150,9 +156,6 @@ func (p *Processor) storePowerSmoothingEstimates(powerStates []powerActorInfo) e
for _, ps := range powerStates {
if _, err := stmt.Exec(
ps.common.stateroot.String(),
- ps.newRawBytes.String(),
- ps.newQualityAdjustedBytes.String(),
- ps.newPledgeCollateral.String(),
ps.totalRawBytes.String(),
ps.totalRawBytesCommitted.String(),
@@ -160,8 +163,8 @@ func (p *Processor) storePowerSmoothingEstimates(powerStates []powerActorInfo) e
ps.totalQualityAdjustedBytesCommitted.String(),
ps.totalPledgeCollateral.String(),
- ps.newQAPowerSmoothed.PositionEstimate.String(),
- ps.newQAPowerSmoothed.VelocityEstimate.String(),
+ ps.qaPowerSmoothed.PositionEstimate.String(),
+ ps.qaPowerSmoothed.VelocityEstimate.String(),
ps.minerCount,
ps.minerCountAboveMinimumPower,
diff --git a/cmd/lotus-chainwatch/processor/processor.go b/cmd/lotus-chainwatch/processor/processor.go
index e6c2ffb94..bce2b9fb7 100644
--- a/cmd/lotus-chainwatch/processor/processor.go
+++ b/cmd/lotus-chainwatch/processor/processor.go
@@ -14,7 +14,7 @@ import (
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/lotus/api"
diff --git a/cmd/lotus-chainwatch/processor/reward.go b/cmd/lotus-chainwatch/processor/reward.go
index 7068c1a93..72a329c87 100644
--- a/cmd/lotus-chainwatch/processor/reward.go
+++ b/cmd/lotus-chainwatch/processor/reward.go
@@ -1,18 +1,19 @@
package processor
import (
- "bytes"
"context"
"time"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- "github.com/filecoin-project/specs-actors/actors/util/smoothing"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/types"
+
+ cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
)
type rewardActorInfo struct {
@@ -21,16 +22,61 @@ type rewardActorInfo struct {
cumSumBaselinePower big.Int
cumSumRealizedPower big.Int
- effectiveNetworkTime int64
+ effectiveNetworkTime abi.ChainEpoch
effectiveBaselinePower big.Int
+ // NOTE: These variables are wrong. Talk to @ZX about fixing. These _do
+ // not_ represent "new" anything.
newBaselinePower big.Int
newBaseReward big.Int
- newSmoothingEstimate *smoothing.FilterEstimate
+ newSmoothingEstimate builtin.FilterEstimate
totalMinedReward big.Int
}
+func (rw *rewardActorInfo) set(s reward.State) (err error) {
+ rw.cumSumBaselinePower, err = s.CumsumBaseline()
+ if err != nil {
+ return xerrors.Errorf("getting cumsum baseline power (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+
+ rw.cumSumRealizedPower, err = s.CumsumRealized()
+ if err != nil {
+ return xerrors.Errorf("getting cumsum realized power (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+
+ rw.effectiveNetworkTime, err = s.EffectiveNetworkTime()
+ if err != nil {
+ return xerrors.Errorf("getting effective network time (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+
+ rw.effectiveBaselinePower, err = s.EffectiveBaselinePower()
+ if err != nil {
+ return xerrors.Errorf("getting effective baseline power (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+
+ rw.totalMinedReward, err = s.TotalStoragePowerReward()
+ if err != nil {
+ return xerrors.Errorf("getting total mined (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+
+ rw.newBaselinePower, err = s.ThisEpochBaselinePower()
+ if err != nil {
+ return xerrors.Errorf("getting this epoch baseline power (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+
+ rw.newBaseReward, err = s.ThisEpochReward()
+ if err != nil {
+ return xerrors.Errorf("getting this epoch baseline power (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+
+ rw.newSmoothingEstimate, err = s.ThisEpochRewardSmoothed()
+ if err != nil {
+ return xerrors.Errorf("getting this epoch baseline power (@ %s): %w", rw.common.stateroot.String(), err)
+ }
+ return nil
+}
+
func (p *Processor) setupRewards() error {
tx, err := p.db.Begin()
if err != nil {
@@ -89,29 +135,19 @@ func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTip
rw.common = act
// get reward actor states at each tipset once for all updates
- rewardActor, err := p.node.StateGetActor(ctx, builtin.RewardActorAddr, tipset)
+ rewardActor, err := p.node.StateGetActor(ctx, reward.Address, tipset)
if err != nil {
return nil, xerrors.Errorf("get reward state (@ %s): %w", rw.common.stateroot.String(), err)
}
- rewardStateRaw, err := p.node.ChainReadObj(ctx, rewardActor.Head)
+ rewardActorState, err := reward.Load(cw_util.NewAPIIpldStore(ctx, p.node), rewardActor)
if err != nil {
return nil, xerrors.Errorf("read state obj (@ %s): %w", rw.common.stateroot.String(), err)
}
-
- var rewardActorState reward.State
- if err := rewardActorState.UnmarshalCBOR(bytes.NewReader(rewardStateRaw)); err != nil {
- return nil, xerrors.Errorf("unmarshal state (@ %s): %w", rw.common.stateroot.String(), err)
+ if err := rw.set(rewardActorState); err != nil {
+ return nil, err
}
- rw.cumSumBaselinePower = rewardActorState.CumsumBaseline
- rw.cumSumRealizedPower = rewardActorState.CumsumRealized
- rw.effectiveNetworkTime = int64(rewardActorState.EffectiveNetworkTime)
- rw.effectiveBaselinePower = rewardActorState.EffectiveBaselinePower
- rw.newBaselinePower = rewardActorState.ThisEpochBaselinePower
- rw.newBaseReward = rewardActorState.ThisEpochReward
- rw.newSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
- rw.totalMinedReward = rewardActorState.TotalMined
out = append(out, rw)
}
}
@@ -126,29 +162,19 @@ func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTip
rw.common.stateroot = tipset.ParentState()
rw.common.parentTsKey = tipset.Parents()
// get reward actor states at each tipset once for all updates
- rewardActor, err := p.node.StateGetActor(ctx, builtin.RewardActorAddr, tsKey)
+ rewardActor, err := p.node.StateGetActor(ctx, reward.Address, tsKey)
if err != nil {
return nil, err
}
- rewardStateRaw, err := p.node.ChainReadObj(ctx, rewardActor.Head)
+ rewardActorState, err := reward.Load(cw_util.NewAPIIpldStore(ctx, p.node), rewardActor)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("read state obj (@ %s): %w", rw.common.stateroot.String(), err)
}
- var rewardActorState reward.State
- if err := rewardActorState.UnmarshalCBOR(bytes.NewReader(rewardStateRaw)); err != nil {
+ if err := rw.set(rewardActorState); err != nil {
return nil, err
}
-
- rw.cumSumBaselinePower = rewardActorState.CumsumBaseline
- rw.cumSumRealizedPower = rewardActorState.CumsumRealized
- rw.effectiveNetworkTime = int64(rewardActorState.EffectiveNetworkTime)
- rw.effectiveBaselinePower = rewardActorState.EffectiveBaselinePower
- rw.newBaselinePower = rewardActorState.ThisEpochBaselinePower
- rw.newBaseReward = rewardActorState.ThisEpochReward
- rw.newSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
- rw.totalMinedReward = rewardActorState.TotalMined
out = append(out, rw)
}
@@ -180,7 +206,7 @@ func (p *Processor) persistRewardActors(ctx context.Context, rewards []rewardAct
rewardState.common.stateroot.String(),
rewardState.cumSumBaselinePower.String(),
rewardState.cumSumRealizedPower.String(),
- rewardState.effectiveNetworkTime,
+ uint64(rewardState.effectiveNetworkTime),
rewardState.effectiveBaselinePower.String(),
rewardState.newBaselinePower.String(),
rewardState.newBaseReward.String(),
diff --git a/cmd/lotus-chainwatch/syncer/sync.go b/cmd/lotus-chainwatch/syncer/sync.go
index 52a36fc9e..609b71088 100644
--- a/cmd/lotus-chainwatch/syncer/sync.go
+++ b/cmd/lotus-chainwatch/syncer/sync.go
@@ -136,7 +136,8 @@ create unique index if not exists block_cid_uindex
on blocks (cid,height);
create materialized view if not exists state_heights
- as select distinct height, parentstateroot from blocks;
+ as select min(b.height) height, b.parentstateroot
+ from blocks b group by b.parentstateroot;
create index if not exists state_heights_height_index
on state_heights (height);
diff --git a/cmd/lotus-gateway/api.go b/cmd/lotus-gateway/api.go
new file mode 100644
index 000000000..0a6365dbd
--- /dev/null
+++ b/cmd/lotus-gateway/api.go
@@ -0,0 +1,90 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/ipfs/go-cid"
+
+ "go.opencensus.io/trace"
+)
+
+const LookbackCap = time.Hour
+
+var (
+ ErrLookbackTooLong = fmt.Errorf("lookbacks of more than %s are disallowed", LookbackCap)
+)
+
+type GatewayAPI struct {
+ api api.FullNode
+}
+
+func (a *GatewayAPI) getTipsetTimestamp(ctx context.Context, tsk types.TipSetKey) (time.Time, error) {
+ if tsk.IsEmpty() {
+ return time.Now(), nil
+ }
+
+ ts, err := a.api.ChainGetTipSet(ctx, tsk)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ return time.Unix(int64(ts.Blocks()[0].Timestamp), 0), nil
+}
+
+func (a *GatewayAPI) checkTipset(ctx context.Context, ts types.TipSetKey) error {
+ when, err := a.getTipsetTimestamp(ctx, ts)
+ if err != nil {
+ return err
+ }
+
+ if time.Since(when) > time.Hour {
+ return ErrLookbackTooLong
+ }
+
+ return nil
+}
+
+func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
+ ctx, span := trace.StartSpan(ctx, "StateGetActor")
+ defer span.End()
+
+ if err := a.checkTipset(ctx, ts); err != nil {
+ return nil, fmt.Errorf("bad tipset: %w", err)
+ }
+
+ return a.api.StateGetActor(ctx, actor, ts)
+}
+
+func (a *GatewayAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
+ ctx, span := trace.StartSpan(ctx, "ChainHead")
+ defer span.End()
+ // TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify)
+
+ return a.api.ChainHead(ctx)
+}
+
+func (a *GatewayAPI) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
+ ctx, span := trace.StartSpan(ctx, "ChainGetTipSet")
+ defer span.End()
+
+ if err := a.checkTipset(ctx, tsk); err != nil {
+ return nil, fmt.Errorf("bad tipset: %w", err)
+ }
+
+ // TODO: since we're limiting lookbacks, should just cache this (could really even cache the json response bytes)
+ return a.api.ChainGetTipSet(ctx, tsk)
+}
+
+func (a *GatewayAPI) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) {
+ ctx, span := trace.StartSpan(ctx, "MpoolPush")
+ defer span.End()
+
+ // TODO: additional anti-spam checks
+
+ return a.api.MpoolPushUntrusted(ctx, sm)
+}
diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go
new file mode 100644
index 000000000..c19599084
--- /dev/null
+++ b/cmd/lotus-gateway/main.go
@@ -0,0 +1,112 @@
+package main
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "os"
+
+ "github.com/filecoin-project/go-jsonrpc"
+ "github.com/filecoin-project/lotus/build"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/lib/lotuslog"
+ logging "github.com/ipfs/go-log"
+
+ "github.com/gorilla/mux"
+ "github.com/urfave/cli/v2"
+)
+
+var log = logging.Logger("gateway")
+
+func main() {
+ lotuslog.SetupLogLevels()
+
+ local := []*cli.Command{
+ runCmd,
+ }
+
+ app := &cli.App{
+ Name: "lotus-gateway",
+ Usage: "Public API server for lotus",
+ Version: build.UserVersion(),
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ EnvVars: []string{"LOTUS_PATH"},
+ Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
+ },
+ },
+
+ Commands: local,
+ }
+ app.Setup()
+
+ if err := app.Run(os.Args); err != nil {
+ log.Warnf("%+v", err)
+ return
+ }
+}
+
+var runCmd = &cli.Command{
+ Name: "run",
+ Usage: "Start api server",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "listen",
+ Usage: "host address and port the api server will listen on",
+ Value: "0.0.0.0:2346",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ log.Info("Starting lotus gateway")
+
+ ctx := lcli.ReqContext(cctx)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ address := cctx.String("listen")
+ mux := mux.NewRouter()
+
+ log.Info("Setting up API endpoint at " + address)
+
+ rpcServer := jsonrpc.NewServer()
+ rpcServer.Register("Filecoin", &GatewayAPI{api: api})
+
+ mux.Handle("/rpc/v0", rpcServer)
+ mux.PathPrefix("/").Handler(http.DefaultServeMux)
+
+ /*ah := &auth.Handler{
+ Verify: nodeApi.AuthVerify,
+ Next: mux.ServeHTTP,
+ }*/
+
+ srv := &http.Server{
+ Handler: mux,
+ BaseContext: func(listener net.Listener) context.Context {
+ return ctx
+ },
+ }
+
+ go func() {
+ <-ctx.Done()
+ log.Warn("Shutting down...")
+ if err := srv.Shutdown(context.TODO()); err != nil {
+ log.Errorf("shutting down RPC server failed: %s", err)
+ }
+ log.Warn("Graceful shutdown successful")
+ }()
+
+ nl, err := net.Listen("tcp", address)
+ if err != nil {
+ return err
+ }
+
+ return srv.Serve(nl)
+ },
+}
diff --git a/cmd/lotus-keygen/main.go b/cmd/lotus-keygen/main.go
index 8835a0928..4b971cf48 100644
--- a/cmd/lotus-keygen/main.go
+++ b/cmd/lotus-keygen/main.go
@@ -5,10 +5,10 @@ import (
"fmt"
"os"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/chain/wallet"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/urfave/cli/v2"
)
diff --git a/cmd/lotus-pcr/main.go b/cmd/lotus-pcr/main.go
index 80732accf..5491e4af2 100644
--- a/cmd/lotus-pcr/main.go
+++ b/cmd/lotus-pcr/main.go
@@ -1,8 +1,10 @@
package main
import (
+ "bufio"
"bytes"
"context"
+ "encoding/csv"
"fmt"
"io/ioutil"
"net/http"
@@ -12,6 +14,14 @@ import (
"strconv"
"time"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
@@ -20,15 +30,16 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
-
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/exitcode"
+
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/tools/stats"
)
@@ -38,6 +49,8 @@ var log = logging.Logger("main")
func main() {
local := []*cli.Command{
runCmd,
+ recoverMinersCmd,
+ findMinersCmd,
versionCmd,
}
@@ -101,6 +114,186 @@ var versionCmd = &cli.Command{
},
}
+var findMinersCmd = &cli.Command{
+ Name: "find-miners",
+ Usage: "find miners with a desired minimum balance",
+ Description: `Find miners returns a list of miners and their balances that are below a
+ threhold value. By default only the miner actor available balance is considered but other
+ account balances can be included by enabling them through the flags.
+
+ Examples
+ Find all miners with an available balance below 100 FIL
+
+ lotus-pcr find-miners --threshold 100
+
+ Find all miners with a balance below zero, which includes the owner and worker balances
+
+ lotus-pcr find-miners --threshold 0 --owner --worker
+`,
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "no-sync",
+ EnvVars: []string{"LOTUS_PCR_NO_SYNC"},
+ Usage: "do not wait for chain sync to complete",
+ },
+ &cli.IntFlag{
+ Name: "threshold",
+ EnvVars: []string{"LOTUS_PCR_THRESHOLD"},
+ Usage: "balance below this limit will be printed",
+ Value: 0,
+ },
+ &cli.BoolFlag{
+ Name: "owner",
+ Usage: "include owner balance",
+ Value: false,
+ },
+ &cli.BoolFlag{
+ Name: "worker",
+ Usage: "include worker balance",
+ Value: false,
+ },
+ &cli.BoolFlag{
+ Name: "control",
+ Usage: "include control balance",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := context.Background()
+ api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path"))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer closer()
+
+ if !cctx.Bool("no-sync") {
+ if err := stats.WaitForSyncComplete(ctx, api); err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ owner := cctx.Bool("owner")
+ worker := cctx.Bool("worker")
+ control := cctx.Bool("control")
+ threshold := uint64(cctx.Int("threshold"))
+
+ rf := &refunder{
+ api: api,
+ threshold: types.FromFil(threshold),
+ }
+
+ refundTipset, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ balanceRefund, err := rf.FindMiners(ctx, refundTipset, NewMinersRefund(), owner, worker, control)
+ if err != nil {
+ return err
+ }
+
+ for _, maddr := range balanceRefund.Miners() {
+ fmt.Printf("%s\t%s\n", maddr, types.FIL(balanceRefund.GetRefund(maddr)))
+ }
+
+ return nil
+ },
+}
+
+var recoverMinersCmd = &cli.Command{
+ Name: "recover-miners",
+ Usage: "Ensure all miners with a negative available balance have a FIL surplus across accounts",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ EnvVars: []string{"LOTUS_PCR_FROM"},
+ Usage: "wallet address to send refund from",
+ },
+ &cli.BoolFlag{
+ Name: "no-sync",
+ EnvVars: []string{"LOTUS_PCR_NO_SYNC"},
+ Usage: "do not wait for chain sync to complete",
+ },
+ &cli.BoolFlag{
+ Name: "dry-run",
+ EnvVars: []string{"LOTUS_PCR_DRY_RUN"},
+ Usage: "do not send any messages",
+ Value: false,
+ },
+ &cli.StringFlag{
+ Name: "output",
+ Usage: "dump data as a csv format to this file",
+ },
+ &cli.IntFlag{
+ Name: "miner-recovery-cutoff",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_CUTOFF"},
+ Usage: "maximum amount of FIL that can be sent to any one miner before refund percent is applied",
+ Value: 3000,
+ },
+ &cli.IntFlag{
+ Name: "miner-recovery-bonus",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_BONUS"},
+ Usage: "additional FIL to send to each miner",
+ Value: 5,
+ },
+ &cli.IntFlag{
+ Name: "miner-recovery-refund-percent",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_REFUND_PERCENT"},
+ Usage: "percent of refund to issue",
+ Value: 110,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := context.Background()
+ api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path"))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer closer()
+
+ from, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return xerrors.Errorf("parsing source address (provide correct --from flag!): %w", err)
+ }
+
+ if !cctx.Bool("no-sync") {
+ if err := stats.WaitForSyncComplete(ctx, api); err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ dryRun := cctx.Bool("dry-run")
+ minerRecoveryRefundPercent := cctx.Int("miner-recovery-refund-percent")
+ minerRecoveryCutoff := uint64(cctx.Int("miner-recovery-cutoff"))
+ minerRecoveryBonus := uint64(cctx.Int("miner-recovery-bonus"))
+
+ rf := &refunder{
+ api: api,
+ wallet: from,
+ dryRun: dryRun,
+ minerRecoveryRefundPercent: minerRecoveryRefundPercent,
+ minerRecoveryCutoff: types.FromFil(minerRecoveryCutoff),
+ minerRecoveryBonus: types.FromFil(minerRecoveryBonus),
+ }
+
+ refundTipset, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ balanceRefund, err := rf.EnsureMinerMinimums(ctx, refundTipset, NewMinersRefund(), cctx.String("output"))
+ if err != nil {
+ return err
+ }
+
+ if err := rf.Refund(ctx, "refund to recover miner", refundTipset, balanceRefund, 0); err != nil {
+ return err
+ }
+
+ return nil
+ },
+}
+
var runCmd = &cli.Command{
Name: "run",
Usage: "Start message reimpursement",
@@ -116,16 +309,22 @@ var runCmd = &cli.Command{
Usage: "do not wait for chain sync to complete",
},
&cli.IntFlag{
- Name: "percent-extra",
- EnvVars: []string{"LOTUS_PCR_PERCENT_EXTRA"},
- Usage: "extra funds to send above the refund",
- Value: 3,
+ Name: "refund-percent",
+ EnvVars: []string{"LOTUS_PCR_REFUND_PERCENT"},
+ Usage: "percent of refund to issue",
+ Value: 103,
},
&cli.IntFlag{
Name: "max-message-queue",
EnvVars: []string{"LOTUS_PCR_MAX_MESSAGE_QUEUE"},
Usage: "set the maximum number of messages that can be queue in the mpool",
- Value: 3000,
+ Value: 300,
+ },
+ &cli.IntFlag{
+ Name: "aggregate-tipsets",
+ EnvVars: []string{"LOTUS_PCR_AGGREGATE_TIPSETS"},
+ Usage: "number of tipsets to process before sending messages",
+ Value: 1,
},
&cli.BoolFlag{
Name: "dry-run",
@@ -145,12 +344,66 @@ var runCmd = &cli.Command{
Usage: "process ProveCommitSector messages",
Value: true,
},
+ &cli.BoolFlag{
+ Name: "windowed-post",
+ EnvVars: []string{"LOTUS_PCR_WINDOWED_POST"},
+ Usage: "process SubmitWindowedPoSt messages and refund gas fees",
+ Value: false,
+ },
+ &cli.BoolFlag{
+ Name: "storage-deals",
+ EnvVars: []string{"LOTUS_PCR_STORAGE_DEALS"},
+ Usage: "process PublishStorageDeals messages and refund gas fees",
+ Value: false,
+ },
&cli.IntFlag{
Name: "head-delay",
EnvVars: []string{"LOTUS_PCR_HEAD_DELAY"},
Usage: "the number of tipsets to delay message processing to smooth chain reorgs",
Value: int(build.MessageConfidence),
},
+ &cli.BoolFlag{
+ Name: "miner-recovery",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY"},
+ Usage: "run the miner recovery job",
+ Value: false,
+ },
+ &cli.IntFlag{
+ Name: "miner-recovery-period",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_PERIOD"},
+ Usage: "interval between running miner recovery",
+ Value: 2880,
+ },
+ &cli.IntFlag{
+ Name: "miner-recovery-cutoff",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_CUTOFF"},
+ Usage: "maximum amount of FIL that can be sent to any one miner before refund percent is applied",
+ Value: 3000,
+ },
+ &cli.IntFlag{
+ Name: "miner-recovery-bonus",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_BONUS"},
+ Usage: "additional FIL to send to each miner",
+ Value: 5,
+ },
+ &cli.IntFlag{
+ Name: "miner-recovery-refund-percent",
+ EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_REFUND_PERCENT"},
+ Usage: "percent of refund to issue",
+ Value: 110,
+ },
+ &cli.StringFlag{
+ Name: "pre-fee-cap-max",
+ EnvVars: []string{"LOTUS_PCR_PRE_FEE_CAP_MAX"},
+ Usage: "messages with a fee cap larger than this will be skipped when processing pre commit messages",
+ Value: "0.000000001",
+ },
+ &cli.StringFlag{
+ Name: "prove-fee-cap-max",
+ EnvVars: []string{"LOTUS_PCR_PROVE_FEE_CAP_MAX"},
+ Usage: "messages with a prove cap larger than this will be skipped when processing pre commit messages",
+ Value: "0.000000001",
+ },
},
Action: func(cctx *cli.Context) error {
go func() {
@@ -189,31 +442,90 @@ var runCmd = &cli.Command{
log.Fatal(err)
}
- percentExtra := cctx.Int("percent-extra")
+ refundPercent := cctx.Int("refund-percent")
maxMessageQueue := cctx.Int("max-message-queue")
dryRun := cctx.Bool("dry-run")
preCommitEnabled := cctx.Bool("pre-commit")
proveCommitEnabled := cctx.Bool("prove-commit")
+ windowedPoStEnabled := cctx.Bool("windowed-post")
+ publishStorageDealsEnabled := cctx.Bool("storage-deals")
+ aggregateTipsets := cctx.Int("aggregate-tipsets")
+ minerRecoveryEnabled := cctx.Bool("miner-recovery")
+ minerRecoveryPeriod := abi.ChainEpoch(int64(cctx.Int("miner-recovery-period")))
+ minerRecoveryRefundPercent := cctx.Int("miner-recovery-refund-percent")
+ minerRecoveryCutoff := uint64(cctx.Int("miner-recovery-cutoff"))
+ minerRecoveryBonus := uint64(cctx.Int("miner-recovery-bonus"))
- rf := &refunder{
- api: api,
- wallet: from,
- percentExtra: percentExtra,
- dryRun: dryRun,
- preCommitEnabled: preCommitEnabled,
- proveCommitEnabled: proveCommitEnabled,
+ preFeeCapMax, err := types.ParseFIL(cctx.String("pre-fee-cap-max"))
+ if err != nil {
+ return err
}
+ proveFeeCapMax, err := types.ParseFIL(cctx.String("prove-fee-cap-max"))
+ if err != nil {
+ return err
+ }
+
+ rf := &refunder{
+ api: api,
+ wallet: from,
+ refundPercent: refundPercent,
+ minerRecoveryRefundPercent: minerRecoveryRefundPercent,
+ minerRecoveryCutoff: types.FromFil(minerRecoveryCutoff),
+ minerRecoveryBonus: types.FromFil(minerRecoveryBonus),
+ dryRun: dryRun,
+ preCommitEnabled: preCommitEnabled,
+ proveCommitEnabled: proveCommitEnabled,
+ windowedPoStEnabled: windowedPoStEnabled,
+ publishStorageDealsEnabled: publishStorageDealsEnabled,
+ preFeeCapMax: types.BigInt(preFeeCapMax),
+ proveFeeCapMax: types.BigInt(proveFeeCapMax),
+ }
+
+ var refunds *MinersRefund = NewMinersRefund()
+ var rounds int = 0
+ nextMinerRecovery := r.MinerRecoveryHeight() + minerRecoveryPeriod
+
for tipset := range tipsetsCh {
- refunds, err := rf.ProcessTipset(ctx, tipset)
+ refunds, err = rf.ProcessTipset(ctx, tipset, refunds)
if err != nil {
return err
}
- if err := rf.Refund(ctx, tipset, refunds); err != nil {
+ refundTipset, err := api.ChainHead(ctx)
+ if err != nil {
return err
}
+ if minerRecoveryEnabled && refundTipset.Height() >= nextMinerRecovery {
+ recoveryRefund, err := rf.EnsureMinerMinimums(ctx, refundTipset, NewMinersRefund(), "")
+ if err != nil {
+ return err
+ }
+
+ if err := rf.Refund(ctx, "refund to recover miners", refundTipset, recoveryRefund, 0); err != nil {
+ return err
+ }
+
+ if err := r.SetMinerRecoveryHeight(tipset.Height()); err != nil {
+ return err
+ }
+
+ nextMinerRecovery = r.MinerRecoveryHeight() + minerRecoveryPeriod
+ }
+
+ rounds = rounds + 1
+ if rounds < aggregateTipsets {
+ continue
+ }
+
+ if err := rf.Refund(ctx, "refund stats", refundTipset, refunds, rounds); err != nil {
+ return err
+ }
+
+ rounds = 0
+ refunds = NewMinersRefund()
+
if err := r.SetHeight(tipset.Height()); err != nil {
return err
}
@@ -247,13 +559,15 @@ var runCmd = &cli.Command{
}
type MinersRefund struct {
- refunds map[address.Address]types.BigInt
- count int
+ refunds map[address.Address]types.BigInt
+ count int
+ totalRefunds types.BigInt
}
func NewMinersRefund() *MinersRefund {
return &MinersRefund{
- refunds: make(map[address.Address]types.BigInt),
+ refunds: make(map[address.Address]types.BigInt),
+ totalRefunds: types.NewInt(0),
}
}
@@ -263,7 +577,7 @@ func (m *MinersRefund) Track(addr address.Address, value types.BigInt) {
}
m.count = m.count + 1
-
+ m.totalRefunds = types.BigAdd(m.totalRefunds, value)
m.refunds[addr] = types.BigAdd(m.refunds[addr], value)
}
@@ -271,6 +585,10 @@ func (m *MinersRefund) Count() int {
return m.count
}
+func (m *MinersRefund) TotalRefunds() types.BigInt {
+ return m.totalRefunds
+}
+
func (m *MinersRefund) Miners() []address.Address {
miners := make([]address.Address, 0, len(m.refunds))
for addr := range m.refunds {
@@ -288,24 +606,406 @@ type refunderNodeApi interface {
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error)
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error)
StateMinerInitialPledgeCollateral(ctx context.Context, addr address.Address, precommitInfo miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error)
+ StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
StateSectorPreCommitInfo(ctx context.Context, addr address.Address, sector abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
+ StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
+ StateMinerSectors(ctx context.Context, addr address.Address, filter *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
+ StateMinerFaults(ctx context.Context, addr address.Address, tsk types.TipSetKey) (bitfield.BitField, error)
+ StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error)
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
+ StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error)
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error)
GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
WalletBalance(ctx context.Context, addr address.Address) (types.BigInt, error)
}
type refunder struct {
- api refunderNodeApi
- wallet address.Address
- percentExtra int
- dryRun bool
- preCommitEnabled bool
- proveCommitEnabled bool
+ api refunderNodeApi
+ wallet address.Address
+ refundPercent int
+ minerRecoveryRefundPercent int
+ minerRecoveryCutoff big.Int
+ minerRecoveryBonus big.Int
+ dryRun bool
+ preCommitEnabled bool
+ proveCommitEnabled bool
+ windowedPoStEnabled bool
+ publishStorageDealsEnabled bool
+ threshold big.Int
+
+ preFeeCapMax big.Int
+ proveFeeCapMax big.Int
}
-func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet) (*MinersRefund, error) {
+func (r *refunder) FindMiners(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund, owner, worker, control bool) (*MinersRefund, error) {
+ miners, err := r.api.StateListMiners(ctx, tipset.Key())
+ if err != nil {
+ return nil, err
+ }
+
+ for _, maddr := range miners {
+ mact, err := r.api.StateGetActor(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ if !mact.Balance.GreaterThan(big.Zero()) {
+ continue
+ }
+
+ minerAvailableBalance, err := r.api.StateMinerAvailableBalance(ctx, maddr, tipset.Key())
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ // Look up and find all addresses associated with the miner
+ minerInfo, err := r.api.StateMinerInfo(ctx, maddr, tipset.Key())
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ allAddresses := []address.Address{}
+
+ if worker {
+ allAddresses = append(allAddresses, minerInfo.Worker)
+ }
+
+ if owner {
+ allAddresses = append(allAddresses, minerInfo.Owner)
+ }
+
+ if control {
+ allAddresses = append(allAddresses, minerInfo.ControlAddresses...)
+ }
+
+ // Sum the balancer of all the addresses
+ addrSum := big.Zero()
+ addrCheck := make(map[address.Address]struct{}, len(allAddresses))
+ for _, addr := range allAddresses {
+ if _, found := addrCheck[addr]; !found {
+ balance, err := r.api.WalletBalance(ctx, addr)
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ addrSum = big.Add(addrSum, balance)
+ addrCheck[addr] = struct{}{}
+ }
+ }
+
+ totalAvailableBalance := big.Add(addrSum, minerAvailableBalance)
+
+ if totalAvailableBalance.GreaterThanEqual(r.threshold) {
+ continue
+ }
+
+ refunds.Track(maddr, totalAvailableBalance)
+
+ log.Debugw("processing miner", "miner", maddr, "sectors", "available_balance", totalAvailableBalance)
+ }
+
+ return refunds, nil
+}
+
+func (r *refunder) EnsureMinerMinimums(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund, output string) (*MinersRefund, error) {
+ miners, err := r.api.StateListMiners(ctx, tipset.Key())
+ if err != nil {
+ return nil, err
+ }
+
+ w := ioutil.Discard
+ if len(output) != 0 {
+ f, err := os.Create(output)
+ if err != nil {
+ return nil, err
+ }
+
+ defer f.Close() // nolint:errcheck
+
+ w = bufio.NewWriter(f)
+ }
+
+ csvOut := csv.NewWriter(w)
+ defer csvOut.Flush()
+ if err := csvOut.Write([]string{"MinerID", "FaultedSectors", "AvailableBalance", "ProposedRefund"}); err != nil {
+ return nil, err
+ }
+
+ for _, maddr := range miners {
+ mact, err := r.api.StateGetActor(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ if !mact.Balance.GreaterThan(big.Zero()) {
+ continue
+ }
+
+ minerAvailableBalance, err := r.api.StateMinerAvailableBalance(ctx, maddr, tipset.Key())
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ // Look up and find all addresses associated with the miner
+ minerInfo, err := r.api.StateMinerInfo(ctx, maddr, tipset.Key())
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ allAddresses := []address.Address{minerInfo.Worker, minerInfo.Owner}
+ allAddresses = append(allAddresses, minerInfo.ControlAddresses...)
+
+ // Sum the balancer of all the addresses
+ addrSum := big.Zero()
+ addrCheck := make(map[address.Address]struct{}, len(allAddresses))
+ for _, addr := range allAddresses {
+ if _, found := addrCheck[addr]; !found {
+ balance, err := r.api.WalletBalance(ctx, addr)
+ if err != nil {
+ log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ addrSum = big.Add(addrSum, balance)
+ addrCheck[addr] = struct{}{}
+ }
+ }
+
+ faults, err := r.api.StateMinerFaults(ctx, maddr, tipset.Key())
+ if err != nil {
+ log.Errorw("failed to look up miner faults", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ faultsCount, err := faults.Count()
+ if err != nil {
+ log.Errorw("failed to get count of faults", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ if faultsCount == 0 {
+ log.Debugw("skipping miner with zero faults", "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
+ continue
+ }
+
+ totalAvailableBalance := big.Add(addrSum, minerAvailableBalance)
+ balanceCutoff := big.Mul(big.Div(big.NewIntUnsigned(faultsCount), big.NewInt(10)), big.NewIntUnsigned(build.FilecoinPrecision))
+
+ if totalAvailableBalance.GreaterThan(balanceCutoff) {
+ log.Debugw(
+ "skipping over miner with total available balance larger than refund",
+ "height", tipset.Height(),
+ "key", tipset.Key(),
+ "miner", maddr,
+ "available_balance", totalAvailableBalance,
+ "balance_cutoff", balanceCutoff,
+ "faults_count", faultsCount,
+ "available_balance_fil", big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ "balance_cutoff_fil", big.Div(balanceCutoff, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ )
+ continue
+ }
+
+ refundValue := big.Sub(balanceCutoff, totalAvailableBalance)
+ if r.minerRecoveryRefundPercent > 0 {
+ refundValue = types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.minerRecoveryRefundPercent)))
+ }
+
+ refundValue = big.Add(refundValue, r.minerRecoveryBonus)
+
+ if refundValue.GreaterThan(r.minerRecoveryCutoff) {
+ log.Infow(
+ "skipping over miner with refund greater than refund cutoff",
+ "height", tipset.Height(),
+ "key", tipset.Key(),
+ "miner", maddr,
+ "available_balance", totalAvailableBalance,
+ "balance_cutoff", balanceCutoff,
+ "faults_count", faultsCount,
+ "refund", refundValue,
+ "available_balance_fil", big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ "balance_cutoff_fil", big.Div(balanceCutoff, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ "refund_fil", big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ )
+ continue
+ }
+
+ refunds.Track(maddr, refundValue)
+ record := []string{
+ maddr.String(),
+ fmt.Sprintf("%d", faultsCount),
+ big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).String(),
+ big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).String(),
+ }
+ if err := csvOut.Write(record); err != nil {
+ return nil, err
+ }
+
+ log.Debugw(
+ "processing miner",
+ "miner", maddr,
+ "faults_count", faultsCount,
+ "available_balance", totalAvailableBalance,
+ "refund", refundValue,
+ "available_balance_fil", big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ "refund_fil", big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ )
+ }
+
+ return refunds, nil
+}
+
+func (r *refunder) processTipsetStorageMarketActor(ctx context.Context, tipset *types.TipSet, msg api.Message, recp *types.MessageReceipt) (bool, string, types.BigInt, error) {
+
+ m := msg.Message
+ refundValue := types.NewInt(0)
+ var messageMethod string
+
+ switch m.Method {
+ case builtin0.MethodsMarket.PublishStorageDeals:
+ if !r.publishStorageDealsEnabled {
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ messageMethod = "PublishStorageDeals"
+
+ if recp.ExitCode != exitcode.Ok {
+ log.Debugw("skipping non-ok exitcode message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "exitcode", recp.ExitCode)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ refundValue = types.BigMul(types.NewInt(uint64(recp.GasUsed)), tipset.Blocks()[0].ParentBaseFee)
+ }
+
+ return true, messageMethod, refundValue, nil
+}
+
+func (r *refunder) processTipsetStorageMinerActor(ctx context.Context, tipset *types.TipSet, msg api.Message, recp *types.MessageReceipt) (bool, string, types.BigInt, error) {
+
+ m := msg.Message
+ refundValue := types.NewInt(0)
+ var messageMethod string
+
+ switch m.Method {
+ case builtin0.MethodsMiner.SubmitWindowedPoSt:
+ if !r.windowedPoStEnabled {
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ messageMethod = "SubmitWindowedPoSt"
+
+ if recp.ExitCode != exitcode.Ok {
+ log.Debugw("skipping non-ok exitcode message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "exitcode", recp.ExitCode)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ refundValue = types.BigMul(types.NewInt(uint64(recp.GasUsed)), tipset.Blocks()[0].ParentBaseFee)
+ case builtin0.MethodsMiner.ProveCommitSector:
+ if !r.proveCommitEnabled {
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ messageMethod = "ProveCommitSector"
+
+ if recp.ExitCode != exitcode.Ok {
+ log.Debugw("skipping non-ok exitcode message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "exitcode", recp.ExitCode)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ if m.GasFeeCap.GreaterThan(r.proveFeeCapMax) {
+ log.Debugw("skipping high fee cap message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "gas_fee_cap", m.GasFeeCap, "fee_cap_max", r.proveFeeCapMax)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ var sn abi.SectorNumber
+
+ var proveCommitSector miner0.ProveCommitSectorParams
+ if err := proveCommitSector.UnmarshalCBOR(bytes.NewBuffer(m.Params)); err != nil {
+ log.Warnw("failed to decode provecommit params", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ sn = proveCommitSector.SectorNumber
+
+ // We use the parent tipset key because precommit information is removed when ProveCommitSector is executed
+ precommitChainInfo, err := r.api.StateSectorPreCommitInfo(ctx, m.To, sn, tipset.Parents())
+ if err != nil {
+ log.Warnw("failed to get precommit info for sector", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", sn)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ precommitTipset, err := r.api.ChainGetTipSetByHeight(ctx, precommitChainInfo.PreCommitEpoch, tipset.Key())
+ if err != nil {
+ log.Warnf("failed to lookup precommit epoch", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", sn)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ collateral, err := r.api.StateMinerInitialPledgeCollateral(ctx, m.To, precommitChainInfo.Info, precommitTipset.Key())
+ if err != nil {
+ log.Warnw("failed to get initial pledge collateral", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", sn)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ collateral = big.Sub(collateral, precommitChainInfo.PreCommitDeposit)
+ if collateral.LessThan(big.Zero()) {
+ log.Debugw("skipping zero pledge collateral difference", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", sn)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ refundValue = collateral
+ if r.refundPercent > 0 {
+ refundValue = types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.refundPercent)))
+ }
+ case builtin0.MethodsMiner.PreCommitSector:
+ if !r.preCommitEnabled {
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ messageMethod = "PreCommitSector"
+
+ if recp.ExitCode != exitcode.Ok {
+ log.Debugw("skipping non-ok exitcode message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "exitcode", recp.ExitCode)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ if m.GasFeeCap.GreaterThan(r.preFeeCapMax) {
+ log.Debugw("skipping high fee cap message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "gas_fee_cap", m.GasFeeCap, "fee_cap_max", r.preFeeCapMax)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ var precommitInfo miner.SectorPreCommitInfo
+ if err := precommitInfo.UnmarshalCBOR(bytes.NewBuffer(m.Params)); err != nil {
+ log.Warnw("failed to decode precommit params", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ collateral, err := r.api.StateMinerInitialPledgeCollateral(ctx, m.To, precommitInfo, tipset.Key())
+ if err != nil {
+ log.Warnw("failed to calculate initial pledge collateral", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", precommitInfo.SectorNumber)
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ refundValue = collateral
+ if r.refundPercent > 0 {
+ refundValue = types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.refundPercent)))
+ }
+ default:
+ return false, messageMethod, types.NewInt(0), nil
+ }
+
+ return true, messageMethod, refundValue, nil
+}
+
+func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund) (*MinersRefund, error) {
cids := tipset.Cids()
if len(cids) == 0 {
log.Errorw("no cids in tipset", "height", tipset.Height(), "key", tipset.Key())
@@ -329,10 +1029,9 @@ func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet) (*Mi
return nil, nil
}
- refunds := NewMinersRefund()
-
- refundValue := types.NewInt(0)
+ tipsetRefunds := NewMinersRefund()
for i, msg := range msgs {
+ refundValue := types.NewInt(0)
m := msg.Message
a, err := r.api.StateGetActor(ctx, m.To, tipset.Key())
@@ -341,98 +1040,56 @@ func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet) (*Mi
continue
}
- if a.Code != builtin.StorageMinerActorCodeID {
- continue
- }
-
var messageMethod string
+ var processed bool
- switch m.Method {
- case builtin.MethodsMiner.ProveCommitSector:
- if !r.proveCommitEnabled {
- continue
- }
+ if m.To == market.Address {
+ processed, messageMethod, refundValue, err = r.processTipsetStorageMarketActor(ctx, tipset, msg, recps[i])
+ }
- messageMethod = "ProveCommitSector"
+ if builtin.IsStorageMinerActor(a.Code) {
+ processed, messageMethod, refundValue, err = r.processTipsetStorageMinerActor(ctx, tipset, msg, recps[i])
+ }
- if recps[i].ExitCode != exitcode.Ok {
- log.Debugw("skipping non-ok exitcode message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "exitcode", recps[i].ExitCode)
- continue
- }
-
- var proveCommitSector miner.ProveCommitSectorParams
- if err := proveCommitSector.UnmarshalCBOR(bytes.NewBuffer(m.Params)); err != nil {
- log.Warnw("failed to decode provecommit params", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To)
- continue
- }
-
- // We use the parent tipset key because precommit information is removed when ProveCommitSector is executed
- precommitChainInfo, err := r.api.StateSectorPreCommitInfo(ctx, m.To, proveCommitSector.SectorNumber, tipset.Parents())
- if err != nil {
- log.Warnw("failed to get precommit info for sector", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", proveCommitSector.SectorNumber)
- continue
- }
-
- precommitTipset, err := r.api.ChainGetTipSetByHeight(ctx, precommitChainInfo.PreCommitEpoch, tipset.Key())
- if err != nil {
- log.Warnf("failed to lookup precommit epoch", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", proveCommitSector.SectorNumber)
- continue
- }
-
- collateral, err := r.api.StateMinerInitialPledgeCollateral(ctx, m.To, precommitChainInfo.Info, precommitTipset.Key())
- if err != nil {
- log.Warnw("failed to get initial pledge collateral", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", proveCommitSector.SectorNumber)
- }
-
- collateral = big.Sub(collateral, precommitChainInfo.PreCommitDeposit)
- if collateral.LessThan(big.Zero()) {
- log.Debugw("skipping zero pledge collateral difference", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", proveCommitSector.SectorNumber)
- continue
- }
-
- refundValue = collateral
- case builtin.MethodsMiner.PreCommitSector:
- if !r.preCommitEnabled {
- continue
- }
-
- messageMethod = "PreCommitSector"
-
- if recps[i].ExitCode != exitcode.Ok {
- log.Debugw("skipping non-ok exitcode message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "exitcode", recps[i].ExitCode)
- continue
- }
-
- var precommitInfo miner.SectorPreCommitInfo
- if err := precommitInfo.UnmarshalCBOR(bytes.NewBuffer(m.Params)); err != nil {
- log.Warnw("failed to decode precommit params", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To)
- continue
- }
-
- collateral, err := r.api.StateMinerInitialPledgeCollateral(ctx, m.To, precommitInfo, tipset.Key())
- if err != nil {
- log.Warnw("failed to calculate initial pledge collateral", "err", err, "method", messageMethod, "cid", msg.Cid, "miner", m.To, "sector_number", precommitInfo.SectorNumber)
- continue
- }
-
- refundValue = collateral
- default:
+ if err != nil {
+ log.Errorw("error while processing message", "cid", msg.Cid)
+ continue
+ }
+ if !processed {
continue
}
- if r.percentExtra > 0 {
- refundValue = types.BigAdd(refundValue, types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.percentExtra))))
- }
-
- log.Debugw("processing message", "method", messageMethod, "cid", msg.Cid, "from", m.From, "to", m.To, "value", m.Value, "gas_fee_cap", m.GasFeeCap, "gas_premium", m.GasPremium, "gas_used", recps[i].GasUsed, "refund", refundValue)
+ log.Debugw(
+ "processing message",
+ "method", messageMethod,
+ "cid", msg.Cid,
+ "from", m.From,
+ "to", m.To,
+ "value", m.Value,
+ "gas_fee_cap", m.GasFeeCap,
+ "gas_premium", m.GasPremium,
+ "gas_used", recps[i].GasUsed,
+ "refund", refundValue,
+ "refund_fil", big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ )
refunds.Track(m.From, refundValue)
+ tipsetRefunds.Track(m.From, refundValue)
}
+ log.Infow(
+ "tipset stats",
+ "height", tipset.Height(),
+ "key", tipset.Key(),
+ "total_refunds", tipsetRefunds.TotalRefunds(),
+ "total_refunds_fil", big.Div(tipsetRefunds.TotalRefunds(), big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ "messages_processed", tipsetRefunds.Count(),
+ )
+
return refunds, nil
}
-func (r *refunder) Refund(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund) error {
+func (r *refunder) Refund(ctx context.Context, name string, tipset *types.TipSet, refunds *MinersRefund, rounds int) error {
if refunds.Count() == 0 {
log.Debugw("no messages to refund in tipset", "height", tipset.Height(), "key", tipset.Key())
return nil
@@ -490,13 +1147,24 @@ func (r *refunder) Refund(ctx context.Context, tipset *types.TipSet, refunds *Mi
refundSum = types.BigAdd(refundSum, msg.Value)
}
- log.Infow("tipset stats", "height", tipset.Height(), "key", tipset.Key(), "messages_sent", len(messages)-failures, "refund_sum", refundSum, "messages_failures", failures, "messages_processed", refunds.Count())
+ log.Infow(
+ name,
+ "tipsets_processed", rounds,
+ "height", tipset.Height(),
+ "key", tipset.Key(),
+ "messages_sent", len(messages)-failures,
+ "refund_sum", refundSum,
+ "refund_sum_fil", big.Div(refundSum, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
+ "messages_failures", failures,
+ "messages_processed", refunds.Count(),
+ )
return nil
}
type Repo struct {
- last abi.ChainEpoch
- path string
+ lastHeight abi.ChainEpoch
+ lastMinerRecoveryHeight abi.ChainEpoch
+ path string
}
func NewRepo(path string) (*Repo, error) {
@@ -506,8 +1174,9 @@ func NewRepo(path string) (*Repo, error) {
}
return &Repo{
- last: 0,
- path: path,
+ lastHeight: 0,
+ lastMinerRecoveryHeight: 0,
+ path: path,
}, nil
}
@@ -538,43 +1207,66 @@ func (r *Repo) init() error {
return nil
}
-func (r *Repo) Open() (err error) {
- if err = r.init(); err != nil {
- return
+func (r *Repo) Open() error {
+ if err := r.init(); err != nil {
+ return err
}
- var f *os.File
+ if err := r.loadHeight(); err != nil {
+ return err
+ }
- f, err = os.OpenFile(filepath.Join(r.path, "height"), os.O_RDWR|os.O_CREATE, 0644)
+ if err := r.loadMinerRecoveryHeight(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func loadChainEpoch(fn string) (abi.ChainEpoch, error) {
+ f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
- return
+ return 0, err
}
defer func() {
err = f.Close()
}()
- var raw []byte
-
- raw, err = ioutil.ReadAll(f)
+ raw, err := ioutil.ReadAll(f)
if err != nil {
- return
+ return 0, err
}
height, err := strconv.Atoi(string(bytes.TrimSpace(raw)))
if err != nil {
- return
+ return 0, err
}
- r.last = abi.ChainEpoch(height)
- return
+ return abi.ChainEpoch(height), nil
+}
+
+func (r *Repo) loadHeight() error {
+ var err error
+ r.lastHeight, err = loadChainEpoch(filepath.Join(r.path, "height"))
+ return err
+}
+
+func (r *Repo) loadMinerRecoveryHeight() error {
+ var err error
+ r.lastMinerRecoveryHeight, err = loadChainEpoch(filepath.Join(r.path, "miner_recovery_height"))
+ return err
}
func (r *Repo) Height() abi.ChainEpoch {
- return r.last
+ return r.lastHeight
+}
+
+func (r *Repo) MinerRecoveryHeight() abi.ChainEpoch {
+ return r.lastMinerRecoveryHeight
}
func (r *Repo) SetHeight(last abi.ChainEpoch) (err error) {
- r.last = last
+ r.lastHeight = last
var f *os.File
f, err = os.OpenFile(filepath.Join(r.path, "height"), os.O_RDWR, 0644)
if err != nil {
@@ -585,7 +1277,26 @@ func (r *Repo) SetHeight(last abi.ChainEpoch) (err error) {
err = f.Close()
}()
- if _, err = fmt.Fprintf(f, "%d", r.last); err != nil {
+ if _, err = fmt.Fprintf(f, "%d", r.lastHeight); err != nil {
+ return
+ }
+
+ return
+}
+
+func (r *Repo) SetMinerRecoveryHeight(last abi.ChainEpoch) (err error) {
+ r.lastMinerRecoveryHeight = last
+ var f *os.File
+ f, err = os.OpenFile(filepath.Join(r.path, "miner_recovery_height"), os.O_RDWR, 0644)
+ if err != nil {
+ return
+ }
+
+ defer func() {
+ err = f.Close()
+ }()
+
+ if _, err = fmt.Fprintf(f, "%d", r.lastMinerRecoveryHeight); err != nil {
return
}
diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go
index e6361d3cf..d2c57e680 100644
--- a/cmd/lotus-seal-worker/main.go
+++ b/cmd/lotus-seal-worker/main.go
@@ -45,6 +45,8 @@ const FlagWorkerRepo = "worker-repo"
const FlagWorkerRepoDeprecation = "workerrepo"
func main() {
+ build.RunningNodeType = build.NodeWorker
+
lotuslog.SetupLogLevels()
local := []*cli.Command{
@@ -107,6 +109,11 @@ var runCmd = &cli.Command{
Name: "no-local-storage",
Usage: "don't use storageminer repo for sector storage",
},
+ &cli.BoolFlag{
+ Name: "no-swap",
+ Usage: "don't use swap",
+ Value: false,
+ },
&cli.BoolFlag{
Name: "addpiece",
Usage: "enable addpiece",
@@ -187,8 +194,8 @@ var runCmd = &cli.Command{
if err != nil {
return err
}
- if v.APIVersion != build.APIVersion {
- return xerrors.Errorf("lotus-miner API version doesn't match: local: %s", api.Version{APIVersion: build.APIVersion})
+ if v.APIVersion != build.MinerAPIVersion {
+ return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.Version{APIVersion: build.MinerAPIVersion})
}
log.Infof("Remote version %s", v)
@@ -344,6 +351,7 @@ var runCmd = &cli.Command{
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
SealProof: spt,
TaskTypes: taskTypes,
+ NoSwap: cctx.Bool("no-swap"),
}, remote, localStore, nodeApi),
localStore: localStore,
ls: lr,
@@ -463,6 +471,7 @@ func watchMinerConn(ctx context.Context, cctx *cli.Context, nodeApi api.StorageM
"run",
fmt.Sprintf("--listen=%s", cctx.String("listen")),
fmt.Sprintf("--no-local-storage=%t", cctx.Bool("no-local-storage")),
+ fmt.Sprintf("--no-swap=%t", cctx.Bool("no-swap")),
fmt.Sprintf("--addpiece=%t", cctx.Bool("addpiece")),
fmt.Sprintf("--precommit1=%t", cctx.Bool("precommit1")),
fmt.Sprintf("--unseal=%t", cctx.Bool("unseal")),
diff --git a/cmd/lotus-seal-worker/rpc.go b/cmd/lotus-seal-worker/rpc.go
index 5380fe432..8aa9093c2 100644
--- a/cmd/lotus-seal-worker/rpc.go
+++ b/cmd/lotus-seal-worker/rpc.go
@@ -21,7 +21,7 @@ type worker struct {
}
func (w *worker) Version(context.Context) (build.Version, error) {
- return build.APIVersion, nil
+ return build.WorkerAPIVersion, nil
}
func (w *worker) StorageAddLocal(ctx context.Context, path string) error {
diff --git a/cmd/lotus-seed/genesis.go b/cmd/lotus-seed/genesis.go
index f2bff4d6e..bbaea6969 100644
--- a/cmd/lotus-seed/genesis.go
+++ b/cmd/lotus-seed/genesis.go
@@ -15,8 +15,8 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/gen"
diff --git a/cmd/lotus-seed/main.go b/cmd/lotus-seed/main.go
index 48691d5ec..d365f6493 100644
--- a/cmd/lotus-seed/main.go
+++ b/cmd/lotus-seed/main.go
@@ -15,8 +15,8 @@ import (
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
diff --git a/cmd/lotus-seed/seed/seed.go b/cmd/lotus-seed/seed/seed.go
index f892709f6..5e911991d 100644
--- a/cmd/lotus-seed/seed/seed.go
+++ b/cmd/lotus-seed/seed/seed.go
@@ -19,11 +19,11 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go
new file mode 100644
index 000000000..26450bdb8
--- /dev/null
+++ b/cmd/lotus-shed/balances.go
@@ -0,0 +1,422 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "github.com/docker/go-units"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+
+ "github.com/ipfs/go-cid"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ logging "github.com/ipfs/go-log/v2"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/lotus/lib/blockstore"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+type accountInfo struct {
+ Address address.Address
+ Balance types.FIL
+ Type string
+ Power abi.StoragePower
+ Worker address.Address
+ Owner address.Address
+ InitialPledge types.FIL
+ PreCommits types.FIL
+ LockedFunds types.FIL
+ Sectors uint64
+ VestingStart abi.ChainEpoch
+ VestingDuration abi.ChainEpoch
+ VestingAmount types.FIL
+}
+
+var auditsCmd = &cli.Command{
+ Name: "audits",
+ Description: "a collection of utilities for auditing the filecoin chain",
+ Subcommands: []*cli.Command{
+ chainBalanceCmd,
+ chainBalanceStateCmd,
+ chainPledgeCmd,
+ },
+}
+
+var chainBalanceCmd = &cli.Command{
+ Name: "chain-balances",
+ Description: "Produces a csv file of all account balances",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset to start from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ ts, err := lcli.LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ tsk := ts.Key()
+ actors, err := api.StateListActors(ctx, tsk)
+ if err != nil {
+ return err
+ }
+
+ var infos []accountInfo
+ for _, addr := range actors {
+ act, err := api.StateGetActor(ctx, addr, tsk)
+ if err != nil {
+ return err
+ }
+
+ ai := accountInfo{
+ Address: addr,
+ Balance: types.FIL(act.Balance),
+ Type: string(act.Code.Hash()[2:]),
+ }
+
+ if builtin.IsStorageMinerActor(act.Code) {
+ pow, err := api.StateMinerPower(ctx, addr, tsk)
+ if err != nil {
+ return xerrors.Errorf("failed to get power: %w", err)
+ }
+
+ ai.Power = pow.MinerPower.RawBytePower
+ info, err := api.StateMinerInfo(ctx, addr, tsk)
+ if err != nil {
+ return xerrors.Errorf("failed to get miner info: %w", err)
+ }
+ ai.Worker = info.Worker
+ ai.Owner = info.Owner
+
+ }
+ infos = append(infos, ai)
+ }
+
+ printAccountInfos(infos, false)
+
+ return nil
+ },
+}
+
+var chainBalanceStateCmd = &cli.Command{
+ Name: "stateroot-balances",
+ Description: "Produces a csv file of all account balances from a given stateroot",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ Value: "~/.lotus",
+ },
+ &cli.BoolFlag{
+ Name: "miner-info",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := context.TODO()
+
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass state root")
+ }
+
+ sroot, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("failed to parse input: %w", err)
+ }
+
+ fsrepo, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+
+ lkrepo, err := fsrepo.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+
+ defer lkrepo.Close() //nolint:errcheck
+
+ ds, err := lkrepo.Datastore("/chain")
+ if err != nil {
+ return err
+ }
+
+ mds, err := lkrepo.Datastore("/metadata")
+ if err != nil {
+ return err
+ }
+
+ bs := blockstore.NewBlockstore(ds)
+
+ cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier))
+
+ cst := cbor.NewCborStore(bs)
+ store := adt.WrapStore(ctx, cst)
+
+ sm := stmgr.NewStateManager(cs)
+
+ tree, err := state.LoadStateTree(cst, sroot)
+ if err != nil {
+ return err
+ }
+
+ minerInfo := cctx.Bool("miner-info")
+
+ var infos []accountInfo
+ err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+
+ ai := accountInfo{
+ Address: addr,
+ Balance: types.FIL(act.Balance),
+ Type: string(act.Code.Hash()[2:]),
+ Power: big.NewInt(0),
+ LockedFunds: types.FIL(big.NewInt(0)),
+ InitialPledge: types.FIL(big.NewInt(0)),
+ PreCommits: types.FIL(big.NewInt(0)),
+ VestingAmount: types.FIL(big.NewInt(0)),
+ }
+
+ if minerInfo && builtin.IsStorageMinerActor(act.Code) {
+ pow, _, _, err := stmgr.GetPowerRaw(ctx, sm, sroot, addr)
+ if err != nil {
+ return xerrors.Errorf("failed to get power: %w", err)
+ }
+
+ ai.Power = pow.RawBytePower
+
+ st, err := miner.Load(store, act)
+ if err != nil {
+ return xerrors.Errorf("failed to read miner state: %w", err)
+ }
+
+ liveSectorCount, err := st.NumLiveSectors()
+ if err != nil {
+ return xerrors.Errorf("failed to compute live sector count: %w", err)
+ }
+
+ lockedFunds, err := st.LockedFunds()
+ if err != nil {
+ return xerrors.Errorf("failed to compute locked funds: %w", err)
+ }
+
+ ai.InitialPledge = types.FIL(lockedFunds.InitialPledgeRequirement)
+ ai.LockedFunds = types.FIL(lockedFunds.VestingFunds)
+ ai.PreCommits = types.FIL(lockedFunds.PreCommitDeposits)
+ ai.Sectors = liveSectorCount
+
+ minfo, err := st.Info()
+ if err != nil {
+ return xerrors.Errorf("failed to get miner info: %w", err)
+ }
+
+ ai.Worker = minfo.Worker
+ ai.Owner = minfo.Owner
+ }
+
+ if builtin.IsMultisigActor(act.Code) {
+ mst, err := multisig.Load(store, act)
+ if err != nil {
+ return err
+ }
+
+ ai.VestingStart, err = mst.StartEpoch()
+ if err != nil {
+ return err
+ }
+
+ ib, err := mst.InitialBalance()
+ if err != nil {
+ return err
+ }
+
+ ai.VestingAmount = types.FIL(ib)
+
+ ai.VestingDuration, err = mst.UnlockDuration()
+ if err != nil {
+ return err
+ }
+
+ }
+
+ infos = append(infos, ai)
+ return nil
+ })
+ if err != nil {
+ return xerrors.Errorf("failed to loop over actors: %w", err)
+ }
+
+ printAccountInfos(infos, minerInfo)
+
+ return nil
+ },
+}
+
+func printAccountInfos(infos []accountInfo, minerInfo bool) {
+ if minerInfo {
+ fmt.Printf("Address,Balance,Type,Sectors,Worker,Owner,InitialPledge,Locked,PreCommits,VestingStart,VestingDuration,VestingAmount\n")
+ for _, acc := range infos {
+ fmt.Printf("%s,%s,%s,%d,%s,%s,%s,%s,%s,%d,%d,%s\n", acc.Address, acc.Balance.Unitless(), acc.Type, acc.Sectors, acc.Worker, acc.Owner, acc.InitialPledge.Unitless(), acc.LockedFunds.Unitless(), acc.PreCommits.Unitless(), acc.VestingStart, acc.VestingDuration, acc.VestingAmount.Unitless())
+ }
+ } else {
+ fmt.Printf("Address,Balance,Type\n")
+ for _, acc := range infos {
+ fmt.Printf("%s,%s,%s\n", acc.Address, acc.Balance.Unitless(), acc.Type)
+ }
+ }
+
+}
+
+var chainPledgeCmd = &cli.Command{
+ Name: "stateroot-pledge",
+ Description: "Calculate sector pledge numbers",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ Value: "~/.lotus",
+ },
+ },
+ ArgsUsage: "[stateroot epoch]",
+ Action: func(cctx *cli.Context) error {
+ logging.SetLogLevel("badger", "ERROR")
+ ctx := context.TODO()
+
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass state root")
+ }
+
+ sroot, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("failed to parse input: %w", err)
+ }
+
+ epoch, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return xerrors.Errorf("parsing epoch arg: %w", err)
+ }
+
+ fsrepo, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+
+ lkrepo, err := fsrepo.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+
+ defer lkrepo.Close() //nolint:errcheck
+
+ ds, err := lkrepo.Datastore("/chain")
+ if err != nil {
+ return err
+ }
+
+ mds, err := lkrepo.Datastore("/metadata")
+ if err != nil {
+ return err
+ }
+
+ bs := blockstore.NewBlockstore(ds)
+
+ cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier))
+
+ cst := cbor.NewCborStore(bs)
+ store := adt.WrapStore(ctx, cst)
+
+ sm := stmgr.NewStateManager(cs)
+
+ state, err := state.LoadStateTree(cst, sroot)
+ if err != nil {
+ return err
+ }
+
+ var (
+ powerSmoothed builtin.FilterEstimate
+ pledgeCollateral abi.TokenAmount
+ )
+ if act, err := state.GetActor(power.Address); err != nil {
+ return xerrors.Errorf("loading miner actor: %w", err)
+ } else if s, err := power.Load(store, act); err != nil {
+ return xerrors.Errorf("loading power actor state: %w", err)
+ } else if p, err := s.TotalPowerSmoothed(); err != nil {
+ return xerrors.Errorf("failed to determine total power: %w", err)
+ } else if c, err := s.TotalLocked(); err != nil {
+ return xerrors.Errorf("failed to determine pledge collateral: %w", err)
+ } else {
+ powerSmoothed = p
+ pledgeCollateral = c
+ }
+
+ circ, err := sm.GetCirculatingSupplyDetailed(ctx, abi.ChainEpoch(epoch), state)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("(real) circulating supply: ", types.FIL(circ.FilCirculating))
+ if circ.FilCirculating.LessThan(big.Zero()) {
+ circ.FilCirculating = big.Zero()
+ }
+
+ rewardActor, err := state.GetActor(reward.Address)
+ if err != nil {
+ return xerrors.Errorf("loading miner actor: %w", err)
+ }
+
+ rewardState, err := reward.Load(store, rewardActor)
+ if err != nil {
+ return xerrors.Errorf("loading reward actor state: %w", err)
+ }
+
+ fmt.Println("FilVested", types.FIL(circ.FilVested))
+ fmt.Println("FilMined", types.FIL(circ.FilMined))
+ fmt.Println("FilBurnt", types.FIL(circ.FilBurnt))
+ fmt.Println("FilLocked", types.FIL(circ.FilLocked))
+ fmt.Println("FilCirculating", types.FIL(circ.FilCirculating))
+
+ for _, sectorWeight := range []abi.StoragePower{
+ types.NewInt(32 << 30),
+ types.NewInt(64 << 30),
+ types.NewInt(32 << 30 * 10),
+ types.NewInt(64 << 30 * 10),
+ } {
+ initialPledge, err := rewardState.InitialPledgeForPower(
+ sectorWeight,
+ pledgeCollateral,
+ &powerSmoothed,
+ circ.FilCirculating,
+ )
+ if err != nil {
+ return xerrors.Errorf("calculating initial pledge: %w", err)
+ }
+
+ fmt.Println("IP ", units.HumanSize(float64(sectorWeight.Uint64())), types.FIL(initialPledge))
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/bitfield.go b/cmd/lotus-shed/bitfield.go
index 79ce214ee..442cbef48 100644
--- a/cmd/lotus-shed/bitfield.go
+++ b/cmd/lotus-shed/bitfield.go
@@ -4,6 +4,7 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
+ "io"
"io/ioutil"
"os"
@@ -28,6 +29,9 @@ var bitFieldCmd = &cli.Command{
bitFieldRunsCmd,
bitFieldStatCmd,
bitFieldDecodeCmd,
+ bitFieldIntersectCmd,
+ bitFieldEncodeCmd,
+ bitFieldSubCmd,
},
}
@@ -200,38 +204,9 @@ var bitFieldDecodeCmd = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
- var val string
- if cctx.Args().Present() {
- val = cctx.Args().Get(0)
- } else {
- b, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- return err
- }
- val = string(b)
- }
-
- var dec []byte
- switch cctx.String("enc") {
- case "base64":
- d, err := base64.StdEncoding.DecodeString(val)
- if err != nil {
- return fmt.Errorf("decoding base64 value: %w", err)
- }
- dec = d
- case "hex":
- d, err := hex.DecodeString(val)
- if err != nil {
- return fmt.Errorf("decoding hex value: %w", err)
- }
- dec = d
- default:
- return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
- }
-
- rle, err := bitfield.NewFromBytes(dec)
+ rle, err := decode(cctx, 0)
if err != nil {
- return xerrors.Errorf("failed to parse bitfield: %w", err)
+ return err
}
vals, err := rle.All(100000000000)
@@ -243,3 +218,170 @@ var bitFieldDecodeCmd = &cli.Command{
return nil
},
}
+
+var bitFieldIntersectCmd = &cli.Command{
+ Name: "intersect",
+ Description: "intersect 2 bitfields and print the resulting bitfield as base64",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "enc",
+ Value: "base64",
+ Usage: "specify input encoding to parse",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ b, err := decode(cctx, 1)
+ if err != nil {
+ return err
+ }
+
+ a, err := decode(cctx, 0)
+ if err != nil {
+ return err
+ }
+
+ o, err := bitfield.IntersectBitField(a, b)
+ if err != nil {
+ return xerrors.Errorf("intersect: %w", err)
+ }
+
+ s, err := o.RunIterator()
+ if err != nil {
+ return err
+ }
+
+ bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(base64.StdEncoding.EncodeToString(bytes))
+
+ return nil
+ },
+}
+
+var bitFieldSubCmd = &cli.Command{
+ Name: "sub",
+ Description: "subtract 2 bitfields and print the resulting bitfield as base64",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "enc",
+ Value: "base64",
+ Usage: "specify input encoding to parse",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ b, err := decode(cctx, 1)
+ if err != nil {
+ return err
+ }
+
+ a, err := decode(cctx, 0)
+ if err != nil {
+ return err
+ }
+
+ o, err := bitfield.SubtractBitField(a, b)
+ if err != nil {
+ return xerrors.Errorf("intersect: %w", err)
+ }
+
+ s, err := o.RunIterator()
+ if err != nil {
+ return err
+ }
+
+ bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(base64.StdEncoding.EncodeToString(bytes))
+
+ return nil
+ },
+}
+
+var bitFieldEncodeCmd = &cli.Command{
+ Name: "encode",
+ Description: "encode a series of decimal numbers into a bitfield",
+ ArgsUsage: "[infile]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "enc",
+ Value: "base64",
+ Usage: "specify input encoding to parse",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ f, err := os.Open(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+ defer f.Close() // nolint
+
+ out := bitfield.New()
+ for {
+ var i uint64
+ _, err := fmt.Fscan(f, &i)
+ if err == io.EOF {
+ break
+ }
+ out.Set(i)
+ }
+
+ s, err := out.RunIterator()
+ if err != nil {
+ return err
+ }
+
+ bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(base64.StdEncoding.EncodeToString(bytes))
+
+ return nil
+ },
+}
+
+func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
+ var val string
+ if cctx.Args().Present() {
+ if a >= cctx.NArg() {
+ return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
+ }
+ val = cctx.Args().Get(a)
+ } else {
+ if a > 0 {
+ return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
+ }
+ b, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ return bitfield.BitField{}, err
+ }
+ val = string(b)
+ }
+
+ var dec []byte
+ switch cctx.String("enc") {
+ case "base64":
+ d, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ return bitfield.BitField{}, fmt.Errorf("decoding base64 value: %w", err)
+ }
+ dec = d
+ case "hex":
+ d, err := hex.DecodeString(val)
+ if err != nil {
+ return bitfield.BitField{}, fmt.Errorf("decoding hex value: %w", err)
+ }
+ dec = d
+ default:
+ return bitfield.BitField{}, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
+ }
+
+ return bitfield.NewFromBytes(dec)
+}
diff --git a/cmd/lotus-shed/consensus.go b/cmd/lotus-shed/consensus.go
new file mode 100644
index 000000000..38a8cd8ef
--- /dev/null
+++ b/cmd/lotus-shed/consensus.go
@@ -0,0 +1,286 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/client"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/multiformats/go-multiaddr"
+ "github.com/urfave/cli/v2"
+)
+
+var consensusCmd = &cli.Command{
+ Name: "consensus",
+ Usage: "tools for gathering information about consensus between nodes",
+ Flags: []cli.Flag{},
+ Subcommands: []*cli.Command{
+ consensusCheckCmd,
+ },
+}
+
+type consensusItem struct {
+ multiaddr multiaddr.Multiaddr
+ genesisTipset *types.TipSet
+ targetTipset *types.TipSet
+ headTipset *types.TipSet
+ peerID peer.ID
+ version api.Version
+ api api.FullNode
+}
+
+var consensusCheckCmd = &cli.Command{
+ Name: "check",
+ Usage: "verify if all nodes agree upon a common tipset for a given tipset height",
+ Description: `Consensus check verifies that all nodes share a common tipset for a given
+ height.
+
+ The height flag specifies a chain height to start a comparison from. There are two special
+ arguments for this flag. All other expected values should be chain tipset heights.
+
+ @common - Use the maximum common chain height between all nodes
+ @expected - Use the current time and the genesis timestamp to determine a height
+
+ Examples
+
+ Find the highest common tipset and look back 10 tipsets
+ lotus-shed consensus check --height @common --lookback 10
+
+ Calculate the expected tipset height and look back 10 tipsets
+ lotus-shed consensus check --height @expected --lookback 10
+
+ Check if nodes all share a common genesis
+ lotus-shed consensus check --height 0
+
+ Check that all nodes agree upon the tipset for 1day post genesis
+ lotus-shed consensus check --height 2880 --lookback 0
+ `,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "height",
+ Value: "@common",
+ Usage: "height of tipset to start check from",
+ },
+ &cli.IntFlag{
+ Name: "lookback",
+ Value: int(build.MessageConfidence * 2),
+ Usage: "number of tipsets behind to look back when comparing nodes",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ filePath := cctx.Args().First()
+
+ var input *bufio.Reader
+ if cctx.Args().Len() == 0 {
+ input = bufio.NewReader(os.Stdin)
+ } else {
+ var err error
+ inputFile, err := os.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer inputFile.Close() //nolint:errcheck
+ input = bufio.NewReader(inputFile)
+ }
+
+ var nodes []*consensusItem
+ ctx := lcli.ReqContext(cctx)
+
+ for {
+ strma, errR := input.ReadString('\n')
+ strma = strings.TrimSpace(strma)
+
+ if len(strma) == 0 {
+ if errR == io.EOF {
+ break
+ }
+ continue
+ }
+
+ apima, err := multiaddr.NewMultiaddr(strma)
+ if err != nil {
+ return err
+ }
+ ainfo := lcli.APIInfo{Addr: apima.String()}
+ addr, err := ainfo.DialArgs()
+ if err != nil {
+ return err
+ }
+
+ api, closer, err := client.NewFullNodeRPC(cctx.Context, addr, nil)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ peerID, err := api.ID(ctx)
+ if err != nil {
+ return err
+ }
+
+ version, err := api.Version(ctx)
+ if err != nil {
+ return err
+ }
+
+ genesisTipset, err := api.ChainGetGenesis(ctx)
+ if err != nil {
+ return err
+ }
+
+ headTipset, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ nodes = append(nodes, &consensusItem{
+ genesisTipset: genesisTipset,
+ headTipset: headTipset,
+ multiaddr: apima,
+ api: api,
+ peerID: peerID,
+ version: version,
+ })
+
+ if errR != nil && errR != io.EOF {
+ return err
+ }
+
+ if errR == io.EOF {
+ break
+ }
+ }
+
+ if len(nodes) == 0 {
+ return fmt.Errorf("no nodes")
+ }
+
+ genesisBuckets := make(map[types.TipSetKey][]*consensusItem)
+ for _, node := range nodes {
+ genesisBuckets[node.genesisTipset.Key()] = append(genesisBuckets[node.genesisTipset.Key()], node)
+
+ }
+
+ if len(genesisBuckets) != 1 {
+ for _, nodes := range genesisBuckets {
+ for _, node := range nodes {
+ log.Errorw(
+ "genesis do not match",
+ "genesis_tipset", node.genesisTipset.Key(),
+ "peer_id", node.peerID,
+ "version", node.version,
+ )
+ }
+ }
+
+ return fmt.Errorf("genesis does not match between all nodes")
+ }
+
+ target := abi.ChainEpoch(0)
+
+ switch cctx.String("height") {
+ case "@common":
+ minTipset := nodes[0].headTipset
+ for _, node := range nodes {
+ if node.headTipset.Height() < minTipset.Height() {
+ minTipset = node.headTipset
+ }
+ }
+
+ target = minTipset.Height()
+ case "@expected":
+ tnow := uint64(time.Now().Unix())
+ tgen := nodes[0].genesisTipset.MinTimestamp()
+
+ target = abi.ChainEpoch((tnow - tgen) / build.BlockDelaySecs)
+ default:
+ h, err := strconv.Atoi(strings.TrimSpace(cctx.String("height")))
+ if err != nil {
+ return fmt.Errorf("failed to parse string: %s", cctx.String("height"))
+ }
+
+ target = abi.ChainEpoch(h)
+ }
+
+ lookback := abi.ChainEpoch(cctx.Int("lookback"))
+ if lookback > target {
+ target = abi.ChainEpoch(0)
+ } else {
+ target = target - lookback
+ }
+
+ for _, node := range nodes {
+ targetTipset, err := node.api.ChainGetTipSetByHeight(ctx, target, types.EmptyTSK)
+ if err != nil {
+ log.Errorw("error checking target", "err", err)
+ node.targetTipset = nil
+ } else {
+ node.targetTipset = targetTipset
+ }
+
+ }
+ for _, node := range nodes {
+ log.Debugw(
+ "node info",
+ "peer_id", node.peerID,
+ "version", node.version,
+ "genesis_tipset", node.genesisTipset.Key(),
+ "head_tipset", node.headTipset.Key(),
+ "target_tipset", node.targetTipset.Key(),
+ )
+ }
+
+ targetBuckets := make(map[types.TipSetKey][]*consensusItem)
+ for _, node := range nodes {
+ if node.targetTipset == nil {
+ targetBuckets[types.EmptyTSK] = append(targetBuckets[types.EmptyTSK], node)
+ continue
+ }
+
+ targetBuckets[node.targetTipset.Key()] = append(targetBuckets[node.targetTipset.Key()], node)
+ }
+
+ if nodes, ok := targetBuckets[types.EmptyTSK]; ok {
+ for _, node := range nodes {
+ log.Errorw(
+ "targeted tipset not found",
+ "peer_id", node.peerID,
+ "version", node.version,
+ "genesis_tipset", node.genesisTipset.Key(),
+ "head_tipset", node.headTipset.Key(),
+ "target_tipset", node.targetTipset.Key(),
+ )
+ }
+
+ return fmt.Errorf("targeted tipset not found")
+ }
+
+ if len(targetBuckets) != 1 {
+ for _, nodes := range targetBuckets {
+ for _, node := range nodes {
+ log.Errorw(
+ "targeted tipset not found",
+ "peer_id", node.peerID,
+ "version", node.version,
+ "genesis_tipset", node.genesisTipset.Key(),
+ "head_tipset", node.headTipset.Key(),
+ "target_tipset", node.targetTipset.Key(),
+ )
+ }
+ }
+ return fmt.Errorf("nodes not in consensus at tipset height %d", target)
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go
new file mode 100644
index 000000000..c6bac6815
--- /dev/null
+++ b/cmd/lotus-shed/datastore.go
@@ -0,0 +1,290 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/docker/go-units"
+ "github.com/ipfs/go-datastore"
+ dsq "github.com/ipfs/go-datastore/query"
+ logging "github.com/ipfs/go-log"
+ "github.com/polydawn/refmt/cbor"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/lib/backupds"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+var datastoreCmd = &cli.Command{
+ Name: "datastore",
+ Description: "access node datastores directly",
+ Subcommands: []*cli.Command{
+ datastoreBackupCmd,
+ datastoreListCmd,
+ datastoreGetCmd,
+ },
+}
+
+var datastoreListCmd = &cli.Command{
+ Name: "list",
+ Description: "list datastore keys",
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "repo-type",
+ Usage: "node type (1 - full, 2 - storage, 3 - worker)",
+ Value: 1,
+ },
+ &cli.BoolFlag{
+ Name: "top-level",
+ Usage: "only print top-level keys",
+ },
+ &cli.StringFlag{
+ Name: "get-enc",
+ Usage: "print values [esc/hex/cbor]",
+ },
+ },
+ ArgsUsage: "[namespace prefix]",
+ Action: func(cctx *cli.Context) error {
+ logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
+
+ r, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.RepoType(cctx.Int("repo-type")))
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
+ if err != nil {
+ return err
+ }
+
+ genc := cctx.String("get-enc")
+
+ q, err := ds.Query(dsq.Query{
+ Prefix: datastore.NewKey(cctx.Args().Get(1)).String(),
+ KeysOnly: genc == "",
+ })
+ if err != nil {
+ return xerrors.Errorf("datastore query: %w", err)
+ }
+ defer q.Close() //nolint:errcheck
+
+ printKv := kvPrinter(cctx.Bool("top-level"), genc)
+
+ for res := range q.Next() {
+ if err := printKv(res.Key, res.Value); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ },
+}
+
+var datastoreGetCmd = &cli.Command{
+ Name: "get",
+ Description: "list datastore keys",
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "repo-type",
+ Usage: "node type (1 - full, 2 - storage, 3 - worker)",
+ Value: 1,
+ },
+ &cli.StringFlag{
+ Name: "enc",
+ Usage: "encoding (esc/hex/cbor)",
+ Value: "esc",
+ },
+ },
+ ArgsUsage: "[namespace key]",
+ Action: func(cctx *cli.Context) error {
+ logging.SetLogLevel("badger", "ERROR") // nolint:errchec
+
+ r, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.RepoType(cctx.Int("repo-type")))
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
+ if err != nil {
+ return err
+ }
+
+ val, err := ds.Get(datastore.NewKey(cctx.Args().Get(1)))
+ if err != nil {
+ return xerrors.Errorf("get: %w", err)
+ }
+
+ return printVal(cctx.String("enc"), val)
+ },
+}
+
+var datastoreBackupCmd = &cli.Command{
+ Name: "backup",
+ Description: "manage datastore backups",
+ Subcommands: []*cli.Command{
+ datastoreBackupStatCmd,
+ datastoreBackupListCmd,
+ },
+}
+
+var datastoreBackupStatCmd = &cli.Command{
+ Name: "stat",
+ Description: "validate and print info about datastore backup",
+ ArgsUsage: "[file]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("expected 1 argument")
+ }
+
+ f, err := os.Open(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("opening backup file: %w", err)
+ }
+ defer f.Close() // nolint:errcheck
+
+ var keys, kbytes, vbytes uint64
+ err = backupds.ReadBackup(f, func(key datastore.Key, value []byte) error {
+ keys++
+ kbytes += uint64(len(key.String()))
+ vbytes += uint64(len(value))
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Keys: ", keys)
+ fmt.Println("Key bytes: ", units.BytesSize(float64(kbytes)))
+ fmt.Println("Value bytes: ", units.BytesSize(float64(vbytes)))
+
+ return err
+ },
+}
+
+var datastoreBackupListCmd = &cli.Command{
+ Name: "list",
+ Description: "list data in a backup",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "top-level",
+ Usage: "only print top-level keys",
+ },
+ &cli.StringFlag{
+ Name: "get-enc",
+ Usage: "print values [esc/hex/cbor]",
+ },
+ },
+ ArgsUsage: "[file]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("expected 1 argument")
+ }
+
+ f, err := os.Open(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("opening backup file: %w", err)
+ }
+ defer f.Close() // nolint:errcheck
+
+ printKv := kvPrinter(cctx.Bool("top-level"), cctx.String("get-enc"))
+ err = backupds.ReadBackup(f, func(key datastore.Key, value []byte) error {
+ return printKv(key.String(), value)
+ })
+ if err != nil {
+ return err
+ }
+
+ return err
+ },
+}
+
+func kvPrinter(toplevel bool, genc string) func(sk string, value []byte) error {
+ seen := map[string]struct{}{}
+
+ return func(s string, value []byte) error {
+ if toplevel {
+ k := datastore.NewKey(datastore.NewKey(s).List()[0])
+ if k.Type() != "" {
+ s = k.Type()
+ } else {
+ s = k.String()
+ }
+
+ _, has := seen[s]
+ if has {
+ return nil
+ }
+ seen[s] = struct{}{}
+ }
+
+ s = fmt.Sprintf("%q", s)
+ s = strings.Trim(s, "\"")
+ fmt.Println(s)
+
+ if genc != "" {
+ fmt.Print("\t")
+ if err := printVal(genc, value); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+}
+
+func printVal(enc string, val []byte) error {
+ switch enc {
+ case "esc":
+ s := fmt.Sprintf("%q", string(val))
+ s = strings.Trim(s, "\"")
+ fmt.Println(s)
+ case "hex":
+ fmt.Printf("%x\n", val)
+ case "cbor":
+ var out interface{}
+ if err := cbor.Unmarshal(cbor.DecodeOptions{}, val, &out); err != nil {
+ return xerrors.Errorf("unmarshaling cbor: %w", err)
+ }
+ s, err := json.Marshal(&out)
+ if err != nil {
+ return xerrors.Errorf("remarshaling as json: %w", err)
+ }
+
+ fmt.Println(string(s))
+ default:
+ return xerrors.New("unknown encoding")
+ }
+
+ return nil
+}
diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go
new file mode 100644
index 000000000..8ded6bf4a
--- /dev/null
+++ b/cmd/lotus-shed/dealtracker.go
@@ -0,0 +1,325 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "net"
+ "net/http"
+ "sync"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/api"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+)
+
+type dealStatsServer struct {
+ api api.FullNode
+}
+
+// Requested by @jbenet
+// How many epochs back to look at for dealstats
+var epochLookback = abi.ChainEpoch(10)
+
+// these lists grow continuously with the network
+// TODO: need to switch this to an LRU of sorts, to ensure refreshes
+var knownFiltered = new(sync.Map)
+var resolvedWallets = new(sync.Map)
+
+func init() {
+ for _, a := range []string{
+ "t0100", // client for genesis miner
+ "t0101", // client for genesis miner
+ "t0102", // client for genesis miner
+ "t0112", // client for genesis miner
+ "t0113", // client for genesis miner
+ "t0114", // client for genesis miner
+ "t1nslxql4pck5pq7hddlzym3orxlx35wkepzjkm3i", // SR1 dealbot wallet
+ "t1stghxhdp2w53dym2nz2jtbpk6ccd4l2lxgmezlq", // SR1 dealbot wallet
+ "t1mcr5xkgv4jdl3rnz77outn6xbmygb55vdejgbfi", // SR1 dealbot wallet
+ "t1qiqdbbmrdalbntnuapriirduvxu5ltsc5mhy7si", // SR1 dealbot wallet
+ } {
+ a, err := address.NewFromString(a)
+ if err != nil {
+ panic(err)
+ }
+ knownFiltered.Store(a, true)
+ }
+}
+
+type dealCountResp struct {
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload int64 `json:"payload"`
+}
+
+func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *http.Request) {
+
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
+ w.WriteHeader(500)
+ return
+ }
+
+ if err := json.NewEncoder(w).Encode(&dealCountResp{
+ Endpoint: "COUNT_DEALS",
+ Payload: int64(len(deals)),
+ Epoch: epoch,
+ }); err != nil {
+ log.Warnf("failed to write back deal count response: %s", err)
+ return
+ }
+}
+
+type dealAverageResp struct {
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload int64 `json:"payload"`
+}
+
+func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, r *http.Request) {
+
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
+ w.WriteHeader(500)
+ return
+ }
+
+ var totalBytes int64
+ for _, d := range deals {
+ totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
+ }
+
+ if err := json.NewEncoder(w).Encode(&dealAverageResp{
+ Endpoint: "AVERAGE_DEAL_SIZE",
+ Payload: totalBytes / int64(len(deals)),
+ Epoch: epoch,
+ }); err != nil {
+ log.Warnf("failed to write back deal average response: %s", err)
+ return
+ }
+}
+
+type dealTotalResp struct {
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload int64 `json:"payload"`
+}
+
+func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r *http.Request) {
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
+ w.WriteHeader(500)
+ return
+ }
+
+ var totalBytes int64
+ for _, d := range deals {
+ totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
+ }
+
+ if err := json.NewEncoder(w).Encode(&dealTotalResp{
+ Endpoint: "DEAL_BYTES",
+ Payload: totalBytes,
+ Epoch: epoch,
+ }); err != nil {
+ log.Warnf("failed to write back deal average response: %s", err)
+ return
+ }
+
+}
+
+type clientStatsOutput struct {
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload []*clientStats `json:"payload"`
+}
+
+type clientStats struct {
+ Client address.Address `json:"client"`
+ DataSize int64 `json:"data_size"`
+ NumCids int `json:"num_cids"`
+ NumDeals int `json:"num_deals"`
+ NumMiners int `json:"num_miners"`
+
+ cids map[cid.Cid]bool
+ providers map[address.Address]bool
+}
+
+func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *http.Request) {
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
+ w.WriteHeader(500)
+ return
+ }
+
+ stats := make(map[address.Address]*clientStats)
+
+ for _, d := range deals {
+
+ st, ok := stats[d.deal.Proposal.Client]
+ if !ok {
+ st = &clientStats{
+ Client: d.resolvedWallet,
+ cids: make(map[cid.Cid]bool),
+ providers: make(map[address.Address]bool),
+ }
+ stats[d.deal.Proposal.Client] = st
+ }
+
+ st.DataSize += int64(d.deal.Proposal.PieceSize.Unpadded())
+ st.cids[d.deal.Proposal.PieceCID] = true
+ st.providers[d.deal.Proposal.Provider] = true
+ st.NumDeals++
+ }
+
+ out := clientStatsOutput{
+ Epoch: epoch,
+ Endpoint: "CLIENT_DEAL_STATS",
+ Payload: make([]*clientStats, 0, len(stats)),
+ }
+ for _, cs := range stats {
+ cs.NumCids = len(cs.cids)
+ cs.NumMiners = len(cs.providers)
+ out.Payload = append(out.Payload, cs)
+ }
+
+ if err := json.NewEncoder(w).Encode(out); err != nil {
+ log.Warnf("failed to write back client stats response: %s", err)
+ return
+ }
+}
+
+type dealInfo struct {
+ deal api.MarketDeal
+ resolvedWallet address.Address
+}
+
+// filteredDealList returns the current epoch and a list of filtered deals
+// on error returns an epoch of 0
+func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) {
+ ctx := context.Background()
+
+ head, err := dss.api.ChainHead(ctx)
+ if err != nil {
+ log.Warnf("failed to get chain head: %s", err)
+ return 0, nil
+ }
+
+ head, err = dss.api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key())
+ if err != nil {
+ log.Warnf("failed to walk back %s epochs: %s", epochLookback, err)
+ return 0, nil
+ }
+
+ // Disabled as per @pooja's request
+ //
+ // // Exclude any address associated with a miner
+ // miners, err := dss.api.StateListMiners(ctx, head.Key())
+ // if err != nil {
+ // log.Warnf("failed to get miner list: %s", err)
+ // return 0, nil
+ // }
+ // for _, m := range miners {
+ // info, err := dss.api.StateMinerInfo(ctx, m, head.Key())
+ // if err != nil {
+ // log.Warnf("failed to get info for known miner '%s': %s", m, err)
+ // continue
+ // }
+
+ // knownFiltered.Store(info.Owner, true)
+ // knownFiltered.Store(info.Worker, true)
+ // for _, a := range info.ControlAddresses {
+ // knownFiltered.Store(a, true)
+ // }
+ // }
+
+ deals, err := dss.api.StateMarketDeals(ctx, head.Key())
+ if err != nil {
+ log.Warnf("failed to get market deals: %s", err)
+ return 0, nil
+ }
+
+ ret := make(map[string]dealInfo, len(deals))
+ for dealKey, d := range deals {
+
+ // Counting no-longer-active deals as per Pooja's request
+ // // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85
+ // if d.State.SectorStartEpoch < 0 {
+ // continue
+ // }
+
+ if _, isFiltered := knownFiltered.Load(d.Proposal.Client); isFiltered {
+ continue
+ }
+
+ if _, wasSeen := resolvedWallets.Load(d.Proposal.Client); !wasSeen {
+ w, err := dss.api.StateAccountKey(ctx, d.Proposal.Client, head.Key())
+ if err != nil {
+ log.Warnf("failed to resolve id '%s' to wallet address: %s", d.Proposal.Client, err)
+ continue
+ } else {
+ resolvedWallets.Store(d.Proposal.Client, w)
+ }
+ }
+
+ w, _ := resolvedWallets.Load(d.Proposal.Client)
+ if _, isFiltered := knownFiltered.Load(w); isFiltered {
+ continue
+ }
+
+ ret[dealKey] = dealInfo{
+ deal: d,
+ resolvedWallet: w.(address.Address),
+ }
+ }
+
+ return int64(head.Height()), ret
+}
+
+var serveDealStatsCmd = &cli.Command{
+ Name: "serve-deal-stats",
+ Flags: []cli.Flag{},
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ _ = ctx
+
+ dss := &dealStatsServer{api}
+
+ mux := &http.ServeMux{}
+ mux.HandleFunc("/api/storagedeal/count", dss.handleStorageDealCount)
+ mux.HandleFunc("/api/storagedeal/averagesize", dss.handleStorageDealAverageSize)
+ mux.HandleFunc("/api/storagedeal/totalreal", dss.handleStorageDealTotalReal)
+ mux.HandleFunc("/api/storagedeal/clientstats", dss.handleStorageClientStats)
+
+ s := &http.Server{
+ Addr: ":7272",
+ Handler: mux,
+ }
+
+ go func() {
+ <-ctx.Done()
+ if err := s.Shutdown(context.TODO()); err != nil {
+ log.Error(err)
+ }
+ }()
+
+ list, err := net.Listen("tcp", ":7272") // nolint
+ if err != nil {
+ panic(err)
+ }
+
+ log.Warnf("deal-stat server listening on %s\n== NOTE: QUERIES ARE EXPENSIVE - YOU MUST FRONT-CACHE THIS SERVICE\n", list.Addr().String())
+
+ return s.Serve(list)
+ },
+}
diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go
new file mode 100644
index 000000000..c12cbd82d
--- /dev/null
+++ b/cmd/lotus-shed/export.go
@@ -0,0 +1,123 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/lib/blockstore"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+var exportChainCmd = &cli.Command{
+ Name: "export",
+ Description: "Export chain from repo (requires node to be offline)",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ Value: "~/.lotus",
+ },
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "tipset to export from",
+ },
+ &cli.Int64Flag{
+ Name: "recent-stateroots",
+ },
+ &cli.BoolFlag{
+ Name: "full-state",
+ },
+ &cli.BoolFlag{
+ Name: "skip-old-msgs",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name to write export to"))
+ }
+
+ ctx := context.TODO()
+
+ r, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ fi, err := os.Create(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("opening the output file: %w", err)
+ }
+
+ defer fi.Close() //nolint:errcheck
+
+ ds, err := lr.Datastore("/chain")
+ if err != nil {
+ return err
+ }
+
+ mds, err := lr.Datastore("/metadata")
+ if err != nil {
+ return err
+ }
+
+ bs := blockstore.NewBlockstore(ds)
+
+ cs := store.NewChainStore(bs, mds, nil)
+ if err := cs.Load(); err != nil {
+ return err
+ }
+
+ nroots := abi.ChainEpoch(cctx.Int64("recent-stateroots"))
+ fullstate := cctx.Bool("full-state")
+ skipoldmsgs := cctx.Bool("skip-old-msgs")
+
+ var ts *types.TipSet
+ if tss := cctx.String("tipset"); tss != "" {
+ cids, err := lcli.ParseTipSetString(tss)
+ if err != nil {
+ return xerrors.Errorf("failed to parse tipset (%q): %w", tss, err)
+ }
+
+ tsk := types.NewTipSetKey(cids...)
+
+ selts, err := cs.LoadTipSet(tsk)
+ if err != nil {
+ return xerrors.Errorf("loading tipset: %w", err)
+ }
+ ts = selts
+ } else {
+ ts = cs.GetHeaviestTipSet()
+ }
+
+ if fullstate {
+ nroots = ts.Height() + 1
+ }
+
+ if err := cs.Export(ctx, ts, nroots, skipoldmsgs, fi); err != nil {
+ return xerrors.Errorf("export failed: %w", err)
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go
index 4ab6458a9..9a47d6561 100644
--- a/cmd/lotus-shed/genesis-verify.go
+++ b/cmd/lotus-shed/genesis-verify.go
@@ -6,6 +6,8 @@ import (
"os"
"sort"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
"github.com/fatih/color"
"github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor"
@@ -13,17 +15,18 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/account"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- saacc "github.com/filecoin-project/specs-actors/actors/builtin/account"
- saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- samsig "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
)
type addrInfo struct {
@@ -90,36 +93,50 @@ var genesisVerifyCmd = &cli.Command{
kminers := make(map[address.Address]minerInfo)
ctx := context.TODO()
+ store := adt.WrapStore(ctx, cst)
if err := stree.ForEach(func(addr address.Address, act *types.Actor) error {
- switch act.Code {
- case builtin.StorageMinerActorCodeID:
- var st saminer.State
- if err := cst.Get(ctx, act.Head, &st); err != nil {
- return err
+ switch {
+ case builtin.IsStorageMinerActor(act.Code):
+ _, err := miner.Load(store, act)
+ if err != nil {
+ return xerrors.Errorf("miner actor: %w", err)
+ }
+ // TODO: actually verify something here?
+ kminers[addr] = minerInfo{}
+ case builtin.IsMultisigActor(act.Code):
+ st, err := multisig.Load(store, act)
+ if err != nil {
+ return xerrors.Errorf("multisig actor: %w", err)
}
- kminers[addr] = minerInfo{}
- case builtin.MultisigActorCodeID:
- var st samsig.State
- if err := cst.Get(ctx, act.Head, &st); err != nil {
+ signers, err := st.Signers()
+ if err != nil {
+ return xerrors.Errorf("multisig actor: %w", err)
+ }
+ threshold, err := st.Threshold()
+ if err != nil {
return xerrors.Errorf("multisig actor: %w", err)
}
kmultisigs[addr] = msigInfo{
Balance: types.FIL(act.Balance),
- Signers: st.Signers,
- Threshold: st.NumApprovalsThreshold,
+ Signers: signers,
+ Threshold: threshold,
}
msigAddrs = append(msigAddrs, addr)
- case builtin.AccountActorCodeID:
- var st saacc.State
- if err := cst.Get(ctx, act.Head, &st); err != nil {
- log.Warn(xerrors.Errorf("account actor %s: %w", addr, err))
+ case builtin.IsAccountActor(act.Code):
+ st, err := account.Load(store, act)
+ if err != nil {
+ // TODO: magik6k: this _used_ to log instead of failing, why?
+ return xerrors.Errorf("account actor %s: %w", addr, err)
+ }
+ pkaddr, err := st.PubkeyAddress()
+ if err != nil {
+ return xerrors.Errorf("failed to get actor pk address %s: %w", addr, err)
}
-
kaccounts[addr] = addrInfo{
- Key: st.Address,
+ Key: pkaddr,
Balance: types.FIL(act.Balance.Copy()),
}
accAddrs = append(accAddrs, addr)
diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go
index 01343c4a3..9cbff953b 100644
--- a/cmd/lotus-shed/import-car.go
+++ b/cmd/lotus-shed/import-car.go
@@ -1,10 +1,13 @@
package main
import (
+ "encoding/hex"
"fmt"
"io"
"os"
+ block "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
"github.com/ipld/go-car"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
@@ -81,3 +84,57 @@ var importCarCmd = &cli.Command{
}
},
}
+
+var importObjectCmd = &cli.Command{
+ Name: "import-obj",
+ Usage: "import a raw ipld object into your datastore",
+ Action: func(cctx *cli.Context) error {
+ r, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ ds, err := lr.Datastore("/chain")
+ if err != nil {
+ return err
+ }
+
+ bs := blockstore.NewBlockstore(ds)
+
+ c, err := cid.Decode(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ data, err := hex.DecodeString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ blk, err := block.NewBlockWithCid(data, c)
+ if err != nil {
+ return err
+ }
+
+ if err := bs.Put(blk); err != nil {
+ return err
+ }
+
+ return nil
+
+ },
+}
diff --git a/cmd/lotus-shed/jwt.go b/cmd/lotus-shed/jwt.go
index d37359f71..7fa1a18dd 100644
--- a/cmd/lotus-shed/jwt.go
+++ b/cmd/lotus-shed/jwt.go
@@ -1,6 +1,7 @@
package main
import (
+ "bufio"
"crypto/rand"
"encoding/hex"
"encoding/json"
@@ -8,10 +9,12 @@ import (
"io"
"io/ioutil"
"os"
+ "strings"
"github.com/gbrlsnchs/jwt/v3"
"github.com/urfave/cli/v2"
+ "github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules"
@@ -24,6 +27,102 @@ var jwtCmd = &cli.Command{
having to run the lotus daemon.`,
Subcommands: []*cli.Command{
jwtNewCmd,
+ jwtTokenCmd,
+ },
+}
+
+var jwtTokenCmd = &cli.Command{
+ Name: "token",
+ Usage: "create a token for a given jwt secret",
+ ArgsUsage: "",
+ Description: `The jwt tokens have four different levels of permissions that provide some ability
+ to control access to what methods can be invoked by the holder of the token.
+
+ This command only works on jwt secrets that are base16 encoded files, such as those produced by the
+ sibling 'new' command.
+ `,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "output",
+ Value: "token",
+ Usage: "specify a name",
+ },
+ &cli.BoolFlag{
+ Name: "read",
+ Value: false,
+ Usage: "add read permissions to the token",
+ },
+ &cli.BoolFlag{
+ Name: "write",
+ Value: false,
+ Usage: "add write permissions to the token",
+ },
+ &cli.BoolFlag{
+ Name: "sign",
+ Value: false,
+ Usage: "add sign permissions to the token",
+ },
+ &cli.BoolFlag{
+ Name: "admin",
+ Value: false,
+ Usage: "add admin permissions to the token",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("please specify a name")
+ }
+
+ inputFile, err := os.Open(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+ defer inputFile.Close() //nolint:errcheck
+ input := bufio.NewReader(inputFile)
+
+ encoded, err := ioutil.ReadAll(input)
+ if err != nil {
+ return err
+ }
+
+ decoded, err := hex.DecodeString(strings.TrimSpace(string(encoded)))
+ if err != nil {
+ return err
+ }
+
+ var keyInfo types.KeyInfo
+ if err := json.Unmarshal(decoded, &keyInfo); err != nil {
+ return err
+ }
+
+ perms := []auth.Permission{}
+
+ if cctx.Bool("read") {
+ perms = append(perms, apistruct.PermRead)
+ }
+
+ if cctx.Bool("write") {
+ perms = append(perms, apistruct.PermWrite)
+ }
+
+ if cctx.Bool("sign") {
+ perms = append(perms, apistruct.PermSign)
+ }
+
+ if cctx.Bool("admin") {
+ perms = append(perms, apistruct.PermAdmin)
+ }
+
+ p := modules.JwtPayload{
+ Allow: perms,
+ }
+
+ token, err := jwt.Sign(&p, jwt.NewHS256(keyInfo.PrivateKey))
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(cctx.String("output"), token, 0600)
},
}
diff --git a/cmd/lotus-shed/keyinfo.go b/cmd/lotus-shed/keyinfo.go
index b2ff97e4e..09be67d1e 100644
--- a/cmd/lotus-shed/keyinfo.go
+++ b/cmd/lotus-shed/keyinfo.go
@@ -9,16 +9,22 @@ import (
"io"
"io/ioutil"
"os"
+ "path"
"strings"
"text/template"
"github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/multiformats/go-base32"
+
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
+ "github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/lp2p"
"github.com/filecoin-project/lotus/node/repo"
@@ -43,6 +49,90 @@ var keyinfoCmd = &cli.Command{
keyinfoNewCmd,
keyinfoInfoCmd,
keyinfoImportCmd,
+ keyinfoVerifyCmd,
+ },
+}
+
+var keyinfoVerifyCmd = &cli.Command{
+ Name: "verify",
+ Usage: "verify the filename of a keystore object on disk with it's contents",
+ Description: `Keystore objects are base32 enocded strings, with wallets being dynamically named via
+ the wallet address. This command can ensure that the naming of these keystore objects are correct`,
+ Action: func(cctx *cli.Context) error {
+ filePath := cctx.Args().First()
+ fileName := path.Base(filePath)
+
+ inputFile, err := os.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer inputFile.Close() //nolint:errcheck
+ input := bufio.NewReader(inputFile)
+
+ keyContent, err := ioutil.ReadAll(input)
+ if err != nil {
+ return err
+ }
+
+ var keyInfo types.KeyInfo
+ if err := json.Unmarshal(keyContent, &keyInfo); err != nil {
+ return err
+ }
+
+ switch keyInfo.Type {
+ case lp2p.KTLibp2pHost:
+ name, err := base32.RawStdEncoding.DecodeString(fileName)
+ if err != nil {
+ return xerrors.Errorf("decoding key: '%s': %w", fileName, err)
+ }
+
+ if string(name) != keyInfo.Type {
+ return fmt.Errorf("%s of type %s is incorrect", fileName, keyInfo.Type)
+ }
+ case modules.KTJwtHmacSecret:
+ name, err := base32.RawStdEncoding.DecodeString(fileName)
+ if err != nil {
+ return xerrors.Errorf("decoding key: '%s': %w", fileName, err)
+ }
+
+ if string(name) != modules.JWTSecretName {
+ return fmt.Errorf("%s of type %s is incorrect", fileName, keyInfo.Type)
+ }
+ case wallet.KTSecp256k1, wallet.KTBLS:
+ keystore := wallet.NewMemKeyStore()
+ w, err := wallet.NewWallet(keystore)
+ if err != nil {
+ return err
+ }
+
+ if _, err := w.Import(&keyInfo); err != nil {
+ return err
+ }
+
+ list, err := keystore.List()
+ if err != nil {
+ return err
+ }
+
+ if len(list) != 1 {
+ return fmt.Errorf("Unexpected number of keys, expected 1, found %d", len(list))
+ }
+
+ name, err := base32.RawStdEncoding.DecodeString(fileName)
+ if err != nil {
+ return xerrors.Errorf("decoding key: '%s': %w", fileName, err)
+ }
+
+ if string(name) != list[0] {
+ return fmt.Errorf("%s of type %s; file is named for %s, but key is actually %s", fileName, keyInfo.Type, string(name), list[0])
+ }
+
+ break
+ default:
+ return fmt.Errorf("Unknown keytype %s", keyInfo.Type)
+ }
+
+ return nil
},
}
diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go
index 5438a31ef..4542551db 100644
--- a/cmd/lotus-shed/main.go
+++ b/cmd/lotus-shed/main.go
@@ -23,7 +23,9 @@ func main() {
noncefix,
bigIntParseCmd,
staterootCmd,
+ auditsCmd,
importCarCmd,
+ importObjectCmd,
commpToCidCmd,
fetchParamCmd,
proofsCmd,
@@ -31,6 +33,14 @@ func main() {
miscCmd,
mpoolCmd,
genesisVerifyCmd,
+ mathCmd,
+ mpoolStatsCmd,
+ exportChainCmd,
+ consensusCmd,
+ serveDealStatsCmd,
+ syncCmd,
+ stateTreePruneCmd,
+ datastoreCmd,
}
app := &cli.App{
@@ -45,6 +55,13 @@ func main() {
Hidden: true,
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
},
+ &cli.StringFlag{
+ Name: "log-level",
+ Value: "info",
+ },
+ },
+ Before: func(cctx *cli.Context) error {
+ return logging.SetLogLevel("lotus-shed", cctx.String("log-level"))
},
}
diff --git a/cmd/lotus-shed/math.go b/cmd/lotus-shed/math.go
new file mode 100644
index 000000000..434559f09
--- /dev/null
+++ b/cmd/lotus-shed/math.go
@@ -0,0 +1,103 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+var mathCmd = &cli.Command{
+ Name: "math",
+ Usage: "utility commands around doing math on a list of numbers",
+ Subcommands: []*cli.Command{
+ mathSumCmd,
+ },
+}
+
+func readLargeNumbers(i io.Reader) ([]types.BigInt, error) {
+ list := []types.BigInt{}
+ reader := bufio.NewReader(i)
+
+ exit := false
+ for {
+ if exit {
+ break
+ }
+
+ line, err := reader.ReadString('\n')
+ if err != nil && err != io.EOF {
+ break
+ }
+ if err == io.EOF {
+ exit = true
+ }
+
+ line = strings.Trim(line, "\n")
+
+ if len(line) == 0 {
+ continue
+ }
+
+ value, err := types.BigFromString(line)
+ if err != nil {
+ return []types.BigInt{}, fmt.Errorf("failed to parse line: %s", line)
+ }
+
+ list = append(list, value)
+ }
+
+ return list, nil
+}
+
+var mathSumCmd = &cli.Command{
+ Name: "sum",
+ Usage: "Sum numbers",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "avg",
+ Value: false,
+ Usage: "Print the average instead of the sum",
+ },
+ &cli.StringFlag{
+ Name: "format",
+ Value: "raw",
+ Usage: "format the number in a more readable way [fil,bytes2,bytes10]",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ list, err := readLargeNumbers(os.Stdin)
+ if err != nil {
+ return err
+ }
+
+ val := types.NewInt(0)
+ for _, value := range list {
+ val = types.BigAdd(val, value)
+ }
+
+ if cctx.Bool("avg") {
+ val = types.BigDiv(val, types.NewInt(uint64(len(list))))
+ }
+
+ switch cctx.String("format") {
+ case "byte2":
+ fmt.Printf("%s\n", types.SizeStr(val))
+ case "byte10":
+ fmt.Printf("%s\n", types.DeciStr(val))
+ case "fil":
+ fmt.Printf("%s\n", types.FIL(val))
+ case "raw":
+ fmt.Printf("%s\n", val)
+ default:
+ return fmt.Errorf("Unknown format")
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/mempool-stats.go b/cmd/lotus-shed/mempool-stats.go
new file mode 100644
index 000000000..d70cd4b71
--- /dev/null
+++ b/cmd/lotus-shed/mempool-stats.go
@@ -0,0 +1,275 @@
+package main
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+ "time"
+
+ "contrib.go.opencensus.io/exporter/prometheus"
+ "github.com/ipfs/go-cid"
+ logging "github.com/ipfs/go-log"
+ "github.com/urfave/cli/v2"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ "github.com/filecoin-project/go-address"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var (
+ MpoolAge = stats.Float64("mpoolage", "Age of messages in the mempool", stats.UnitSeconds)
+ MpoolSize = stats.Int64("mpoolsize", "Number of messages in mempool", stats.UnitDimensionless)
+ MpoolInboundRate = stats.Int64("inbound", "Counter for inbound messages", stats.UnitDimensionless)
+ BlockInclusionRate = stats.Int64("inclusion", "Counter for message included in blocks", stats.UnitDimensionless)
+ MsgWaitTime = stats.Float64("msg-wait-time", "Wait time of messages to make it into a block", stats.UnitSeconds)
+)
+
+var (
+ LeTag, _ = tag.NewKey("quantile")
+ MTTag, _ = tag.NewKey("msg_type")
+)
+
+var (
+ AgeView = &view.View{
+ Name: "mpool-age",
+ Measure: MpoolAge,
+ TagKeys: []tag.Key{LeTag, MTTag},
+ Aggregation: view.LastValue(),
+ }
+ SizeView = &view.View{
+ Name: "mpool-size",
+ Measure: MpoolSize,
+ TagKeys: []tag.Key{MTTag},
+ Aggregation: view.LastValue(),
+ }
+ InboundRate = &view.View{
+ Name: "msg-inbound",
+ Measure: MpoolInboundRate,
+ TagKeys: []tag.Key{MTTag},
+ Aggregation: view.Count(),
+ }
+ InclusionRate = &view.View{
+ Name: "msg-inclusion",
+ Measure: BlockInclusionRate,
+ TagKeys: []tag.Key{MTTag},
+ Aggregation: view.Count(),
+ }
+ MsgWait = &view.View{
+ Name: "msg-wait",
+ Measure: MsgWaitTime,
+ TagKeys: []tag.Key{MTTag},
+ Aggregation: view.Distribution(10, 30, 60, 120, 240, 600, 1800, 3600),
+ }
+)
+
+type msgInfo struct {
+ msg *types.SignedMessage
+ seen time.Time
+}
+
+var mpoolStatsCmd = &cli.Command{
+ Name: "mpool-stats",
+ Action: func(cctx *cli.Context) error {
+ logging.SetLogLevel("rpc", "ERROR")
+
+ if err := view.Register(AgeView, SizeView, InboundRate, InclusionRate, MsgWait); err != nil {
+ return err
+ }
+
+ expo, err := prometheus.NewExporter(prometheus.Options{
+ Namespace: "lotusmpool",
+ })
+ if err != nil {
+ return err
+ }
+
+ http.Handle("/debug/metrics", expo)
+
+ go func() {
+ if err := http.ListenAndServe(":10555", nil); err != nil {
+ panic(err)
+ }
+ }()
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ updates, err := api.MpoolSub(ctx)
+ if err != nil {
+ return err
+ }
+
+ mcache := make(map[address.Address]bool)
+ isMiner := func(addr address.Address) (bool, error) {
+ cache, ok := mcache[addr]
+ if ok {
+ return cache, nil
+ }
+
+ act, err := api.StateGetActor(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ return false, err
+ }
+
+ ism := builtin.IsStorageMinerActor(act.Code)
+ mcache[addr] = ism
+ return ism, nil
+ }
+
+ wpostTracker := make(map[cid.Cid]*msgInfo)
+ tracker := make(map[cid.Cid]*msgInfo)
+ tick := time.Tick(time.Second)
+ for {
+ select {
+ case u, ok := <-updates:
+ if !ok {
+ return fmt.Errorf("connection with lotus node broke")
+ }
+ switch u.Type {
+ case lapi.MpoolAdd:
+ stats.Record(ctx, MpoolInboundRate.M(1))
+ tracker[u.Message.Cid()] = &msgInfo{
+ msg: u.Message,
+ seen: time.Now(),
+ }
+
+ if u.Message.Message.Method == builtin0.MethodsMiner.SubmitWindowedPoSt {
+
+ miner, err := isMiner(u.Message.Message.To)
+ if err != nil {
+ log.Warnf("failed to determine if message target was to a miner: %s", err)
+ continue
+ }
+
+ if miner {
+ wpostTracker[u.Message.Cid()] = &msgInfo{
+ msg: u.Message,
+ seen: time.Now(),
+ }
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(MTTag, "wpost")}, MpoolInboundRate.M(1))
+ }
+ }
+
+ case lapi.MpoolRemove:
+ mi, ok := tracker[u.Message.Cid()]
+ if ok {
+ fmt.Printf("%s was in the mempool for %s (feecap=%s, prem=%s)\n", u.Message.Cid(), time.Since(mi.seen), u.Message.Message.GasFeeCap, u.Message.Message.GasPremium)
+ stats.Record(ctx, BlockInclusionRate.M(1))
+ stats.Record(ctx, MsgWaitTime.M(time.Since(mi.seen).Seconds()))
+ delete(tracker, u.Message.Cid())
+ }
+
+ wm, ok := wpostTracker[u.Message.Cid()]
+ if ok {
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(MTTag, "wpost")}, BlockInclusionRate.M(1))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(MTTag, "wpost")}, MsgWaitTime.M(time.Since(wm.seen).Seconds()))
+ delete(wpostTracker, u.Message.Cid())
+ }
+ default:
+ return fmt.Errorf("unrecognized mpool update state: %d", u.Type)
+ }
+ case <-tick:
+ var ages []time.Duration
+ if len(tracker) > 0 {
+ for _, v := range tracker {
+ age := time.Since(v.seen)
+ ages = append(ages, age)
+ }
+
+ st := ageStats(ages)
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "40")}, MpoolAge.M(st.Perc40.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "50")}, MpoolAge.M(st.Perc50.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "60")}, MpoolAge.M(st.Perc60.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "70")}, MpoolAge.M(st.Perc70.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "80")}, MpoolAge.M(st.Perc80.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "90")}, MpoolAge.M(st.Perc90.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "95")}, MpoolAge.M(st.Perc95.Seconds()))
+
+ stats.Record(ctx, MpoolSize.M(int64(len(tracker))))
+ fmt.Printf("%d messages in mempool for average of %s, (%s / %s / %s)\n", st.Count, st.Average, st.Perc50, st.Perc80, st.Perc95)
+ }
+
+ var wpages []time.Duration
+ if len(wpostTracker) > 0 {
+ for _, v := range wpostTracker {
+ age := time.Since(v.seen)
+ wpages = append(wpages, age)
+ }
+
+ st := ageStats(wpages)
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "40"), tag.Upsert(MTTag, "wpost")}, MpoolAge.M(st.Perc40.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "50"), tag.Upsert(MTTag, "wpost")}, MpoolAge.M(st.Perc50.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "60"), tag.Upsert(MTTag, "wpost")}, MpoolAge.M(st.Perc60.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "70"), tag.Upsert(MTTag, "wpost")}, MpoolAge.M(st.Perc70.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "80"), tag.Upsert(MTTag, "wpost")}, MpoolAge.M(st.Perc80.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "90"), tag.Upsert(MTTag, "wpost")}, MpoolAge.M(st.Perc90.Seconds()))
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(LeTag, "95"), tag.Upsert(MTTag, "wpost")}, MpoolAge.M(st.Perc95.Seconds()))
+
+ _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(MTTag, "wpost")}, MpoolSize.M(int64(len(wpostTracker))))
+ fmt.Printf("%d wpost messages in mempool for average of %s, (%s / %s / %s)\n", st.Count, st.Average, st.Perc50, st.Perc80, st.Perc95)
+ }
+ }
+ }
+ },
+}
+
+type ageStat struct {
+ Average time.Duration
+ Max time.Duration
+ Perc40 time.Duration
+ Perc50 time.Duration
+ Perc60 time.Duration
+ Perc70 time.Duration
+ Perc80 time.Duration
+ Perc90 time.Duration
+ Perc95 time.Duration
+ Count int
+}
+
+func ageStats(ages []time.Duration) *ageStat {
+ sort.Slice(ages, func(i, j int) bool {
+ return ages[i] < ages[j]
+ })
+
+ st := ageStat{
+ Count: len(ages),
+ }
+ var sum time.Duration
+ for _, a := range ages {
+ sum += a
+ if a > st.Max {
+ st.Max = a
+ }
+ }
+ st.Average = sum / time.Duration(len(ages))
+
+ p40 := (4 * len(ages)) / 10
+ p50 := len(ages) / 2
+ p60 := (6 * len(ages)) / 10
+ p70 := (7 * len(ages)) / 10
+ p80 := (4 * len(ages)) / 5
+ p90 := (9 * len(ages)) / 10
+ p95 := (19 * len(ages)) / 20
+
+ st.Perc40 = ages[p40]
+ st.Perc50 = ages[p50]
+ st.Perc60 = ages[p60]
+ st.Perc70 = ages[p70]
+ st.Perc80 = ages[p80]
+ st.Perc90 = ages[p90]
+ st.Perc95 = ages[p95]
+
+ return &st
+}
diff --git a/cmd/lotus-shed/nonce-fix.go b/cmd/lotus-shed/nonce-fix.go
index 4824a7bce..8102fd8a9 100644
--- a/cmd/lotus-shed/nonce-fix.go
+++ b/cmd/lotus-shed/nonce-fix.go
@@ -5,6 +5,8 @@ import (
"math"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/chain/types"
@@ -32,6 +34,10 @@ var noncefix = &cli.Command{
&cli.BoolFlag{
Name: "auto",
},
+ &cli.Int64Flag{
+ Name: "gas-fee-cap",
+ Usage: "specify gas fee cap for nonce filling messages",
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
@@ -84,12 +90,25 @@ var noncefix = &cli.Command{
}
fmt.Printf("Creating %d filler messages (%d ~ %d)\n", end-start, start, end)
+ ts, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ feeCap := big.Mul(ts.Blocks()[0].ParentBaseFee, big.NewInt(2)) // default fee cap to 2 * parent base fee
+ if fcf := cctx.Int64("gas-fee-cap"); fcf != 0 {
+ feeCap = abi.NewTokenAmount(fcf)
+ }
+
for i := start; i < end; i++ {
msg := &types.Message{
- From: addr,
- To: addr,
- Value: types.NewInt(1),
- Nonce: i,
+ From: addr,
+ To: addr,
+ Value: types.NewInt(0),
+ Nonce: i,
+ GasLimit: 1000000,
+ GasFeeCap: feeCap,
+ GasPremium: abi.NewTokenAmount(5),
}
smsg, err := api.WalletSignMessage(ctx, addr, msg)
if err != nil {
diff --git a/cmd/lotus-shed/proofs.go b/cmd/lotus-shed/proofs.go
index f18dc93fb..2379d8599 100644
--- a/cmd/lotus-shed/proofs.go
+++ b/cmd/lotus-shed/proofs.go
@@ -4,11 +4,13 @@ import (
"encoding/hex"
"fmt"
+ saproof "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/urfave/cli/v2"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
)
@@ -82,7 +84,7 @@ var verifySealProofCmd = &cli.Command{
snum := abi.SectorNumber(cctx.Uint64("sector-id"))
- ok, err := ffi.VerifySeal(abi.SealVerifyInfo{
+ ok, err := ffi.VerifySeal(saproof.SealVerifyInfo{
SectorID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: snum,
diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go
new file mode 100644
index 000000000..79158c3a3
--- /dev/null
+++ b/cmd/lotus-shed/pruning.go
@@ -0,0 +1,289 @@
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/lotus/lib/blockstore"
+ "github.com/filecoin-project/lotus/node/repo"
+ "github.com/ipfs/bbloom"
+ "github.com/ipfs/go-cid"
+ "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+ dshelp "github.com/ipfs/go-ipfs-ds-help"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+type cidSet interface {
+ Add(cid.Cid)
+ Has(cid.Cid) bool
+ HasRaw([]byte) bool
+ Len() int
+}
+
+type bloomSet struct {
+ bloom *bbloom.Bloom
+}
+
+func newBloomSet(size int64) (*bloomSet, error) {
+ b, err := bbloom.New(float64(size), 3)
+ if err != nil {
+ return nil, err
+ }
+
+ return &bloomSet{bloom: b}, nil
+}
+
+func (bs *bloomSet) Add(c cid.Cid) {
+ bs.bloom.Add(c.Hash())
+
+}
+
+func (bs *bloomSet) Has(c cid.Cid) bool {
+ return bs.bloom.Has(c.Hash())
+}
+
+func (bs *bloomSet) HasRaw(b []byte) bool {
+ return bs.bloom.Has(b)
+}
+
+func (bs *bloomSet) Len() int {
+ return int(bs.bloom.ElementsAdded())
+}
+
+type mapSet struct {
+ m map[string]struct{}
+}
+
+func newMapSet() *mapSet {
+ return &mapSet{m: make(map[string]struct{})}
+}
+
+func (bs *mapSet) Add(c cid.Cid) {
+ bs.m[string(c.Hash())] = struct{}{}
+}
+
+func (bs *mapSet) Has(c cid.Cid) bool {
+ _, ok := bs.m[string(c.Hash())]
+ return ok
+}
+
+func (bs *mapSet) HasRaw(b []byte) bool {
+ _, ok := bs.m[string(b)]
+ return ok
+}
+
+func (bs *mapSet) Len() int {
+ return len(bs.m)
+}
+
+var stateTreePruneCmd = &cli.Command{
+ Name: "state-prune",
+ Description: "Deletes old state root data from local chainstore",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ Value: "~/.lotus",
+ },
+ &cli.Int64Flag{
+ Name: "keep-from-lookback",
+ Usage: "keep stateroots at or newer than the current height minus this lookback",
+ Value: 1800, // 2 x finality
+ },
+ &cli.IntFlag{
+ Name: "delete-up-to",
+ Usage: "delete up to the given number of objects (used to run a faster 'partial' sync)",
+ },
+ &cli.BoolFlag{
+ Name: "use-bloom-set",
+ Usage: "use a bloom filter for the 'good' set instead of a map, reduces memory usage but may not clean up as much",
+ },
+ &cli.BoolFlag{
+ Name: "dry-run",
+ Usage: "only enumerate the good set, don't do any deletions",
+ },
+ &cli.BoolFlag{
+ Name: "only-ds-gc",
+ Usage: "Only run datastore GC",
+ },
+ &cli.IntFlag{
+ Name: "gc-count",
+ Usage: "number of times to run gc",
+ Value: 20,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := context.TODO()
+
+ fsrepo, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+
+ lkrepo, err := fsrepo.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+
+ defer lkrepo.Close() //nolint:errcheck
+
+ ds, err := lkrepo.Datastore("/chain")
+ if err != nil {
+ return err
+ }
+
+ defer ds.Close() //nolint:errcheck
+
+ mds, err := lkrepo.Datastore("/metadata")
+ if err != nil {
+ return err
+ }
+ defer mds.Close() //nolint:errcheck
+
+ if cctx.Bool("only-ds-gc") {
+ gcds, ok := ds.(datastore.GCDatastore)
+ if ok {
+ fmt.Println("running datastore gc....")
+ for i := 0; i < cctx.Int("gc-count"); i++ {
+ if err := gcds.CollectGarbage(); err != nil {
+ return xerrors.Errorf("datastore GC failed: %w", err)
+ }
+ }
+ fmt.Println("gc complete!")
+ return nil
+ }
+ return fmt.Errorf("datastore doesnt support gc")
+ }
+
+ bs := blockstore.NewBlockstore(ds)
+
+ cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier))
+ if err := cs.Load(); err != nil {
+ return fmt.Errorf("loading chainstore: %w", err)
+ }
+
+ var goodSet cidSet
+ if cctx.Bool("use-bloom-set") {
+ bset, err := newBloomSet(10000000)
+ if err != nil {
+ return err
+ }
+ goodSet = bset
+ } else {
+ goodSet = newMapSet()
+ }
+
+ ts := cs.GetHeaviestTipSet()
+
+ rrLb := abi.ChainEpoch(cctx.Int64("keep-from-lookback"))
+
+ if err := cs.WalkSnapshot(ctx, ts, rrLb, true, func(c cid.Cid) error {
+ if goodSet.Len()%20 == 0 {
+ fmt.Printf("\renumerating keep set: %d ", goodSet.Len())
+ }
+ goodSet.Add(c)
+ return nil
+ }); err != nil {
+ return fmt.Errorf("snapshot walk failed: %w", err)
+ }
+
+ fmt.Println()
+ fmt.Printf("Successfully marked keep set! (%d objects)\n", goodSet.Len())
+
+ if cctx.Bool("dry-run") {
+ return nil
+ }
+
+ var b datastore.Batch
+ var batchCount int
+ markForRemoval := func(c cid.Cid) error {
+ if b == nil {
+ nb, err := ds.Batch()
+ if err != nil {
+ return fmt.Errorf("opening batch: %w", err)
+ }
+
+ b = nb
+ }
+ batchCount++
+
+ if err := b.Delete(dshelp.MultihashToDsKey(c.Hash())); err != nil {
+ return err
+ }
+
+ if batchCount > 100 {
+ if err := b.Commit(); err != nil {
+ return xerrors.Errorf("failed to commit batch deletes: %w", err)
+ }
+ b = nil
+ batchCount = 0
+ }
+ return nil
+ }
+
+ res, err := ds.Query(query.Query{KeysOnly: true})
+ if err != nil {
+ return xerrors.Errorf("failed to query datastore: %w", err)
+ }
+
+ dupTo := cctx.Int("delete-up-to")
+
+ var deleteCount int
+ var goodHits int
+ for {
+ v, ok := res.NextSync()
+ if !ok {
+ break
+ }
+
+ bk, err := dshelp.BinaryFromDsKey(datastore.RawKey(v.Key[len("/blocks"):]))
+ if err != nil {
+ return xerrors.Errorf("failed to parse key: %w", err)
+ }
+
+ if goodSet.HasRaw(bk) {
+ goodHits++
+ continue
+ }
+
+ nc := cid.NewCidV1(cid.Raw, bk)
+
+ deleteCount++
+ if err := markForRemoval(nc); err != nil {
+ return fmt.Errorf("failed to remove cid %s: %w", nc, err)
+ }
+
+ if deleteCount%20 == 0 {
+ fmt.Printf("\rdeleting %d objects (good hits: %d)... ", deleteCount, goodHits)
+ }
+
+ if dupTo != 0 && deleteCount > dupTo {
+ break
+ }
+ }
+
+ if b != nil {
+ if err := b.Commit(); err != nil {
+ return xerrors.Errorf("failed to commit final batch delete: %w", err)
+ }
+ }
+
+ gcds, ok := ds.(datastore.GCDatastore)
+ if ok {
+ fmt.Println("running datastore gc....")
+ for i := 0; i < cctx.Int("gc-count"); i++ {
+ if err := gcds.CollectGarbage(); err != nil {
+ return xerrors.Errorf("datastore GC failed: %w", err)
+ }
+ }
+ fmt.Println("gc complete!")
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/stateroot-stats.go b/cmd/lotus-shed/stateroot-stats.go
index c02e0202a..023f782bd 100644
--- a/cmd/lotus-shed/stateroot-stats.go
+++ b/cmd/lotus-shed/stateroot-stats.go
@@ -182,6 +182,11 @@ var staterootStatCmd = &cli.Command{
return infos[i].Stat.Size > infos[j].Stat.Size
})
+ var totalActorsSize uint64
+ for _, info := range infos {
+ totalActorsSize += info.Stat.Size
+ }
+
outcap := 10
if cctx.Args().Len() > outcap {
outcap = cctx.Args().Len()
@@ -190,6 +195,15 @@ var staterootStatCmd = &cli.Command{
outcap = len(infos)
}
+ totalStat, err := api.ChainStatObj(ctx, ts.ParentState(), cid.Undef)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Total state tree size: ", totalStat.Size)
+ fmt.Println("Sum of actor state size: ", totalActorsSize)
+ fmt.Println("State tree structure size: ", totalStat.Size-totalActorsSize)
+
fmt.Print("Addr\tType\tSize\n")
for _, inf := range infos[:outcap] {
cmh, err := multihash.Decode(inf.Actor.Code.Hash())
diff --git a/cmd/lotus-shed/sync.go b/cmd/lotus-shed/sync.go
new file mode 100644
index 000000000..bfe7cc8b7
--- /dev/null
+++ b/cmd/lotus-shed/sync.go
@@ -0,0 +1,64 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/urfave/cli/v2"
+)
+
+var syncCmd = &cli.Command{
+ Name: "sync",
+ Usage: "tools for diagnosing sync issues",
+ Flags: []cli.Flag{},
+ Subcommands: []*cli.Command{
+ syncValidateCmd,
+ },
+}
+
+var syncValidateCmd = &cli.Command{
+ Name: "validate",
+ Usage: "checks whether a provided tipset is valid",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ if cctx.Args().Len() < 1 {
+ fmt.Println("usage: ...")
+ fmt.Println("At least one block cid must be provided")
+ return nil
+ }
+
+ args := cctx.Args().Slice()
+
+ var tscids []cid.Cid
+ for _, s := range args {
+ c, err := cid.Decode(s)
+ if err != nil {
+ return fmt.Errorf("block cid was invalid: %s", err)
+ }
+ tscids = append(tscids, c)
+ }
+
+ tsk := types.NewTipSetKey(tscids...)
+
+ valid, err := api.SyncValidateTipset(ctx, tsk)
+ if err != nil {
+ fmt.Println("Tipset is invalid: ", err)
+ }
+
+ if valid {
+ fmt.Println("Tipset is valid")
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/verifreg.go b/cmd/lotus-shed/verifreg.go
index 6e24dc0b6..860498302 100644
--- a/cmd/lotus-shed/verifreg.go
+++ b/cmd/lotus-shed/verifreg.go
@@ -3,17 +3,22 @@ package main
import (
"fmt"
+ "github.com/filecoin-project/go-state-types/big"
+
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
cbor "github.com/ipfs/go-ipld-cbor"
@@ -34,29 +39,31 @@ var verifRegCmd = &cli.Command{
}
var verifRegAddVerifierCmd = &cli.Command{
- Name: "add-verifier",
- Usage: "make a given account a verifier",
+ Name: "add-verifier",
+ Usage: "make a given account a verifier",
+ ArgsUsage: " ",
Action: func(cctx *cli.Context) error {
- fromk, err := address.NewFromString("t3qfoulel6fy6gn3hjmbhpdpf6fs5aqjb5fkurhtwvgssizq4jey5nw4ptq5up6h7jk7frdvvobv52qzmgjinq")
+ if cctx.Args().Len() != 3 {
+ return fmt.Errorf("must specify three arguments: sender, verifier, and allowance")
+ }
+
+ sender, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
- if cctx.Args().Len() != 2 {
- return fmt.Errorf("must specify two arguments: address and allowance")
- }
-
- target, err := address.NewFromString(cctx.Args().Get(0))
+ verifier, err := address.NewFromString(cctx.Args().Get(1))
if err != nil {
return err
}
- allowance, err := types.BigFromString(cctx.Args().Get(1))
+ allowance, err := types.BigFromString(cctx.Args().Get(2))
if err != nil {
return err
}
- params, err := actors.SerializeParams(&verifreg.AddVerifierParams{Address: target, Allowance: allowance})
+ // TODO: ActorUpgrade: Abstract
+ params, err := actors.SerializeParams(&verifreg0.AddVerifierParams{Address: verifier, Allowance: allowance})
if err != nil {
return err
}
@@ -68,21 +75,19 @@ var verifRegAddVerifierCmd = &cli.Command{
defer closer()
ctx := lcli.ReqContext(cctx)
- msg := &types.Message{
- To: builtin.VerifiedRegistryActorAddr,
- From: fromk,
- Method: builtin.MethodsVerifiedRegistry.AddVerifier,
- Params: params,
- }
-
- smsg, err := api.MpoolPushMessage(ctx, msg, nil)
+ vrk, err := api.StateVerifiedRegistryRootKey(ctx, types.EmptyTSK)
if err != nil {
return err
}
- fmt.Printf("message sent, now waiting on cid: %s\n", smsg.Cid())
+ smsg, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(builtin0.MethodsVerifiedRegistry.AddVerifier), params)
+ if err != nil {
+ return err
+ }
- mwait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ fmt.Printf("message sent, now waiting on cid: %s\n", smsg)
+
+ mwait, err := api.StateWaitMsg(ctx, smsg, build.MessageConfidence)
if err != nil {
return err
}
@@ -91,6 +96,7 @@ var verifRegAddVerifierCmd = &cli.Command{
return fmt.Errorf("failed to add verifier: %d", mwait.Receipt.ExitCode)
}
+ //TODO: Internal msg might still have failed
return nil
},
@@ -130,7 +136,7 @@ var verifRegVerifyClientCmd = &cli.Command{
return err
}
- params, err := actors.SerializeParams(&verifreg.AddVerifiedClientParams{Address: target, Allowance: allowance})
+ params, err := actors.SerializeParams(&verifreg0.AddVerifiedClientParams{Address: target, Allowance: allowance})
if err != nil {
return err
}
@@ -143,9 +149,9 @@ var verifRegVerifyClientCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
msg := &types.Message{
- To: builtin.VerifiedRegistryActorAddr,
+ To: verifreg.Address,
From: fromk,
- Method: builtin.MethodsVerifiedRegistry.AddVerifiedClient,
+ Method: builtin0.MethodsVerifiedRegistry.AddVerifiedClient,
Params: params,
}
@@ -180,7 +186,7 @@ var verifRegListVerifiersCmd = &cli.Command{
defer closer()
ctx := lcli.ReqContext(cctx)
- act, err := api.StateGetActor(ctx, builtin.VerifiedRegistryActorAddr, types.EmptyTSK)
+ act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
if err != nil {
return err
}
@@ -188,31 +194,14 @@ var verifRegListVerifiersCmd = &cli.Command{
apibs := apibstore.NewAPIBlockstore(api)
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
- var st verifreg.State
- if err := store.Get(ctx, act.Head, &st); err != nil {
- return err
- }
-
- vh, err := adt.AsMap(store, st.Verifiers)
+ st, err := verifreg.Load(store, act)
if err != nil {
return err
}
-
- var dcap verifreg.DataCap
- if err := vh.ForEach(&dcap, func(k string) error {
- addr, err := address.NewFromBytes([]byte(k))
- if err != nil {
- return err
- }
-
- fmt.Printf("%s: %s\n", addr, dcap)
-
- return nil
- }); err != nil {
+ return st.ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error {
+ _, err := fmt.Printf("%s: %s\n", addr, dcap)
return err
- }
-
- return nil
+ })
},
}
@@ -227,7 +216,7 @@ var verifRegListClientsCmd = &cli.Command{
defer closer()
ctx := lcli.ReqContext(cctx)
- act, err := api.StateGetActor(ctx, builtin.VerifiedRegistryActorAddr, types.EmptyTSK)
+ act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
if err != nil {
return err
}
@@ -235,31 +224,14 @@ var verifRegListClientsCmd = &cli.Command{
apibs := apibstore.NewAPIBlockstore(api)
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
- var st verifreg.State
- if err := store.Get(ctx, act.Head, &st); err != nil {
- return err
- }
-
- vh, err := adt.AsMap(store, st.VerifiedClients)
+ st, err := verifreg.Load(store, act)
if err != nil {
return err
}
-
- var dcap verifreg.DataCap
- if err := vh.ForEach(&dcap, func(k string) error {
- addr, err := address.NewFromBytes([]byte(k))
- if err != nil {
- return err
- }
-
- fmt.Printf("%s: %s\n", addr, dcap)
-
- return nil
- }); err != nil {
+ return st.ForEachClient(func(addr address.Address, dcap abi.StoragePower) error {
+ _, err := fmt.Printf("%s: %s\n", addr, dcap)
return err
- }
-
- return nil
+ })
},
}
@@ -317,7 +289,17 @@ var verifRegCheckVerifierCmd = &cli.Command{
defer closer()
ctx := lcli.ReqContext(cctx)
- act, err := api.StateGetActor(ctx, builtin.VerifiedRegistryActorAddr, types.EmptyTSK)
+ head, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ vid, err := api.StateLookupID(ctx, vaddr, head.Key())
+ if err != nil {
+ return err
+ }
+
+ act, err := api.StateGetActor(ctx, verifreg.Address, head.Key())
if err != nil {
return err
}
@@ -325,20 +307,16 @@ var verifRegCheckVerifierCmd = &cli.Command{
apibs := apibstore.NewAPIBlockstore(api)
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
- var st verifreg.State
- if err := store.Get(ctx, act.Head, &st); err != nil {
- return err
- }
-
- vh, err := adt.AsMap(store, st.Verifiers)
+ st, err := verifreg.Load(store, act)
if err != nil {
return err
}
- var dcap verifreg.DataCap
- if found, err := vh.Get(adt.AddrKey(vaddr), &dcap); err != nil {
+ found, dcap, err := st.VerifierDataCap(vid)
+ if err != nil {
return err
- } else if !found {
+ }
+ if !found {
return fmt.Errorf("not found")
}
diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-storage-miner/actor.go
index e69a338ab..fa320289e 100644
--- a/cmd/lotus-storage-miner/actor.go
+++ b/cmd/lotus-storage-miner/actor.go
@@ -5,6 +5,9 @@ import (
"os"
"strings"
+ "github.com/filecoin-project/lotus/build"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
"github.com/fatih/color"
"github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
@@ -12,10 +15,11 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types"
@@ -31,6 +35,7 @@ var actorCmd = &cli.Command{
actorSetAddrsCmd,
actorWithdrawCmd,
actorSetPeeridCmd,
+ actorSetOwnerCmd,
actorControl,
},
}
@@ -87,7 +92,7 @@ var actorSetAddrsCmd = &cli.Command{
return err
}
- params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs})
+ params, err := actors.SerializeParams(&miner0.ChangeMultiaddrsParams{NewMultiaddrs: addrs})
if err != nil {
return err
}
@@ -152,7 +157,7 @@ var actorSetPeeridCmd = &cli.Command{
return err
}
- params, err := actors.SerializeParams(&miner.ChangePeerIDParams{NewID: abi.PeerID(pid)})
+ params, err := actors.SerializeParams(&miner0.ChangePeerIDParams{NewID: abi.PeerID(pid)})
if err != nil {
return err
}
@@ -225,7 +230,7 @@ var actorWithdrawCmd = &cli.Command{
}
}
- params, err := actors.SerializeParams(&miner.WithdrawBalanceParams{
+ params, err := actors.SerializeParams(&miner0.WithdrawBalanceParams{
AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor
})
if err != nil {
@@ -450,7 +455,7 @@ var actorControlSet = &cli.Command{
return nil
}
- cwp := &miner.ChangeWorkerAddressParams{
+ cwp := &miner0.ChangeWorkerAddressParams{
NewWorker: mi.Worker,
NewControlAddrs: toSet,
}
@@ -477,3 +482,117 @@ var actorControlSet = &cli.Command{
return nil
},
}
+
+var actorSetOwnerCmd = &cli.Command{
+ Name: "set-owner",
+ Usage: "Set owner address",
+ ArgsUsage: "[address]",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ Usage: "Actually send transaction performing the action",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Bool("really-do-it") {
+ fmt.Println("Pass --really-do-it to actually execute this action")
+ return nil
+ }
+
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass address of new owner address")
+ }
+
+ nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ api, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ na, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ maddr, err := nodeApi.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+
+ mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ sp, err := actors.SerializeParams(&newAddr)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ smsg, err := api.MpoolPushMessage(ctx, &types.Message{
+ From: mi.Owner,
+ To: maddr,
+ Method: builtin2.MethodsMiner.ChangeOwnerAddress,
+ Value: big.Zero(),
+ Params: sp,
+ }, nil)
+ if err != nil {
+ return xerrors.Errorf("mpool push: %w", err)
+ }
+
+ fmt.Println("Propose Message CID:", smsg.Cid())
+
+ // wait for it to get mined into a block
+ wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Println("Propose owner change failed!")
+ return err
+ }
+
+ smsg, err = api.MpoolPushMessage(ctx, &types.Message{
+ From: newAddr,
+ To: maddr,
+ Method: builtin2.MethodsMiner.ChangeOwnerAddress,
+ Value: big.Zero(),
+ Params: sp,
+ }, nil)
+ if err != nil {
+ return xerrors.Errorf("mpool push: %w", err)
+ }
+
+ fmt.Println("Approve Message CID:", smsg.Cid())
+
+ // wait for it to get mined into a block
+ wait, err = api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Println("Approve owner change failed!")
+ return err
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-storage-miner/allinfo_test.go b/cmd/lotus-storage-miner/allinfo_test.go
new file mode 100644
index 000000000..8f744c4b3
--- /dev/null
+++ b/cmd/lotus-storage-miner/allinfo_test.go
@@ -0,0 +1,78 @@
+package main
+
+import (
+ "flag"
+ "testing"
+ "time"
+
+ "github.com/filecoin-project/lotus/node"
+
+ logging "github.com/ipfs/go-log/v2"
+ "github.com/stretchr/testify/require"
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/api/test"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+ "github.com/filecoin-project/lotus/lib/lotuslog"
+ "github.com/filecoin-project/lotus/node/repo"
+ builder "github.com/filecoin-project/lotus/node/test"
+)
+
+func TestMinerAllInfo(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ _ = logging.SetLogLevel("*", "INFO")
+
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
+
+ _test = true
+
+ lotuslog.SetupLogLevels()
+ logging.SetLogLevel("miner", "ERROR")
+ logging.SetLogLevel("chainstore", "ERROR")
+ logging.SetLogLevel("chain", "ERROR")
+ logging.SetLogLevel("sub", "ERROR")
+ logging.SetLogLevel("storageminer", "ERROR")
+
+ oldDelay := policy.GetPreCommitChallengeDelay()
+ policy.SetPreCommitChallengeDelay(5)
+ t.Cleanup(func() {
+ policy.SetPreCommitChallengeDelay(oldDelay)
+ })
+
+ var n []test.TestNode
+ var sn []test.TestStorageNode
+
+ run := func(t *testing.T) {
+ app := cli.NewApp()
+ app.Metadata = map[string]interface{}{
+ "repoType": repo.StorageMiner,
+ "testnode-full": n[0],
+ "testnode-storage": sn[0],
+ }
+ build.RunningNodeType = build.NodeMiner
+
+ cctx := cli.NewContext(app, flag.NewFlagSet("", flag.ContinueOnError), nil)
+
+ require.NoError(t, infoAllCmd.Action(cctx))
+ }
+
+ bp := func(t *testing.T, nFull int, storage []test.StorageMiner, opts ...node.Option) ([]test.TestNode, []test.TestStorageNode) {
+ n, sn = builder.Builder(t, nFull, storage, opts...)
+
+ t.Run("pre-info-all", run)
+
+ return n, sn
+ }
+
+ test.TestDealFlow(t, bp, time.Second, false, false)
+
+ t.Run("post-info-all", run)
+}
diff --git a/cmd/lotus-storage-miner/backup.go b/cmd/lotus-storage-miner/backup.go
new file mode 100644
index 000000000..cf8c9f912
--- /dev/null
+++ b/cmd/lotus-storage-miner/backup.go
@@ -0,0 +1,14 @@
+package main
+
+import (
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-jsonrpc"
+
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+var backupCmd = lcli.BackupCmd(FlagMinerRepo, repo.StorageMiner, func(cctx *cli.Context) (lcli.BackupAPI, jsonrpc.ClientCloser, error) {
+ return lcli.GetStorageMinerAPI(cctx)
+})
diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go
index dbb93c972..213d62e6e 100644
--- a/cmd/lotus-storage-miner/info.go
+++ b/cmd/lotus-storage-miner/info.go
@@ -1,7 +1,6 @@
package main
import (
- "bytes"
"context"
"fmt"
"sort"
@@ -14,15 +13,14 @@ import (
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-fil-markets/storagemarket"
+ "github.com/filecoin-project/go-state-types/abi"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/blockstore"
@@ -35,6 +33,12 @@ var infoCmd = &cli.Command{
Subcommands: []*cli.Command{
infoAllCmd,
},
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "hide-sectors-info",
+ Usage: "hide sectors info",
+ },
+ },
Action: infoCmdAct,
}
@@ -55,11 +59,6 @@ func infoCmdAct(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx)
- head, err := api.ChainHead(ctx)
- if err != nil {
- return xerrors.Errorf("getting chain head: %w", err)
- }
-
maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
if err != nil {
return err
@@ -69,15 +68,11 @@ func infoCmdAct(cctx *cli.Context) error {
if err != nil {
return err
}
- var mas miner.State
- {
- rmas, err := api.ChainReadObj(ctx, mact.Head)
- if err != nil {
- return err
- }
- if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
- return err
- }
+
+ tbs := bufbstore.NewTieredBstore(apibstore.NewAPIBlockstore(api), blockstore.NewTemporary())
+ mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact)
+ if err != nil {
+ return err
}
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
@@ -112,31 +107,24 @@ func infoCmdAct(cctx *cli.Context) error {
if err != nil {
return err
}
- faults, err := api.StateMinerFaults(ctx, maddr, types.EmptyTSK)
- if err != nil {
- return err
- }
- nfaults, err := faults.Count()
- if err != nil {
- return xerrors.Errorf("counting faults: %w", err)
- }
-
- fmt.Printf("\tCommitted: %s\n", types.SizeStr(types.BigMul(types.NewInt(secCounts.Sectors), types.NewInt(uint64(mi.SectorSize)))))
+ proving := secCounts.Active + secCounts.Faulty
+ nfaults := secCounts.Faulty
+ fmt.Printf("\tCommitted: %s\n", types.SizeStr(types.BigMul(types.NewInt(secCounts.Live), types.NewInt(uint64(mi.SectorSize)))))
if nfaults == 0 {
- fmt.Printf("\tProving: %s\n", types.SizeStr(types.BigMul(types.NewInt(secCounts.Active), types.NewInt(uint64(mi.SectorSize)))))
+ fmt.Printf("\tProving: %s\n", types.SizeStr(types.BigMul(types.NewInt(proving), types.NewInt(uint64(mi.SectorSize)))))
} else {
var faultyPercentage float64
- if secCounts.Sectors != 0 {
- faultyPercentage = float64(10000*nfaults/secCounts.Sectors) / 100.
+ if secCounts.Live != 0 {
+ faultyPercentage = float64(10000*nfaults/secCounts.Live) / 100.
}
fmt.Printf("\tProving: %s (%s Faulty, %.2f%%)\n",
- types.SizeStr(types.BigMul(types.NewInt(secCounts.Sectors), types.NewInt(uint64(mi.SectorSize)))),
+ types.SizeStr(types.BigMul(types.NewInt(proving), types.NewInt(uint64(mi.SectorSize)))),
types.SizeStr(types.BigMul(types.NewInt(nfaults), types.NewInt(uint64(mi.SectorSize)))),
faultyPercentage)
}
- if pow.MinerPower.RawBytePower.LessThan(power.ConsensusMinerMinPower) {
+ if !pow.HasMinPower {
fmt.Print("Below minimum power threshold, no blocks will be won")
} else {
expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000
@@ -180,17 +168,21 @@ func infoCmdAct(cctx *cli.Context) error {
fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes))))
fmt.Println()
- tbs := bufbstore.NewTieredBstore(apibstore.NewAPIBlockstore(api), blockstore.NewTemporary())
- _, err = mas.UnlockVestedFunds(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), head.Height())
+ // NOTE: there's no need to unlock anything here. Funds only
+ // vest on deadline boundaries, and they're unlocked by cron.
+ lockedFunds, err := mas.LockedFunds()
if err != nil {
- return xerrors.Errorf("calculating vested funds: %w", err)
+ return xerrors.Errorf("getting locked funds: %w", err)
+ }
+ availBalance, err := mas.AvailableBalance(mact.Balance)
+ if err != nil {
+ return xerrors.Errorf("getting available balance: %w", err)
}
-
fmt.Printf("Miner Balance: %s\n", color.YellowString("%s", types.FIL(mact.Balance)))
- fmt.Printf("\tPreCommit: %s\n", types.FIL(mas.PreCommitDeposits))
- fmt.Printf("\tPledge: %s\n", types.FIL(mas.InitialPledgeRequirement))
- fmt.Printf("\tLocked: %s\n", types.FIL(mas.LockedFunds))
- color.Green("\tAvailable: %s", types.FIL(mas.GetAvailableBalance(mact.Balance)))
+ fmt.Printf("\tPreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits))
+ fmt.Printf("\tPledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement))
+ fmt.Printf("\tVesting: %s\n", types.FIL(lockedFunds.VestingFunds))
+ color.Green("\tAvailable: %s", types.FIL(availBalance))
wb, err := api.WalletBalance(ctx, mi.Worker)
if err != nil {
return xerrors.Errorf("getting worker balance: %w", err)
@@ -213,10 +205,12 @@ func infoCmdAct(cctx *cli.Context) error {
fmt.Printf("Expected Seal Duration: %s\n\n", sealdur)
- fmt.Println("Sectors:")
- err = sectorsInfo(ctx, nodeApi)
- if err != nil {
- return err
+ if !cctx.Bool("hide-sectors-info") {
+ fmt.Println("Sectors:")
+ err = sectorsInfo(ctx, nodeApi)
+ if err != nil {
+ return err
+ }
}
// TODO: grab actr state / info
diff --git a/cmd/lotus-storage-miner/info_all.go b/cmd/lotus-storage-miner/info_all.go
index 265ba78a4..517553028 100644
--- a/cmd/lotus-storage-miner/info_all.go
+++ b/cmd/lotus-storage-miner/info_all.go
@@ -10,6 +10,8 @@ import (
lcli "github.com/filecoin-project/lotus/cli"
)
+var _test = false
+
var infoAllCmd = &cli.Command{
Name: "all",
Usage: "dump all related miner info",
@@ -150,9 +152,11 @@ var infoAllCmd = &cli.Command{
}
}
- fmt.Println("\n#: Goroutines")
- if err := lcli.PprofGoroutines.Action(cctx); err != nil {
- return err
+ if !_test {
+ fmt.Println("\n#: Goroutines")
+ if err := lcli.PprofGoroutines.Action(cctx); err != nil {
+ return err
+ }
}
return nil
diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go
index 01569d5e9..9218faa77 100644
--- a/cmd/lotus-storage-miner/init.go
+++ b/cmd/lotus-storage-miner/init.go
@@ -12,7 +12,7 @@ import (
"path/filepath"
"strconv"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/docker/go-units"
"github.com/google/uuid"
@@ -26,15 +26,16 @@ import (
"github.com/filecoin-project/go-address"
cborutil "github.com/filecoin-project/go-cbor-util"
paramfetch "github.com/filecoin-project/go-paramfetch"
+ "github.com/filecoin-project/go-state-types/abi"
+ crypto2 "github.com/filecoin-project/go-state-types/crypto"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- crypto2 "github.com/filecoin-project/specs-actors/actors/crypto"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@@ -44,6 +45,7 @@ import (
lcli "github.com/filecoin-project/lotus/cli"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/genesis"
+ "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
@@ -113,6 +115,9 @@ var initCmd = &cli.Command{
Usage: "select which address to send actor creation message from",
},
},
+ Subcommands: []*cli.Command{
+ initRestoreCmd,
+ },
Action: func(cctx *cli.Context) error {
log.Info("Initializing lotus miner")
@@ -179,8 +184,8 @@ var initCmd = &cli.Command{
return err
}
- if !v.APIVersion.EqMajorMinor(build.APIVersion) {
- return xerrors.Errorf("Remote API version didn't match (local %s, remote %s)", build.APIVersion, v.APIVersion)
+ if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) {
+ return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion)
}
log.Info("Initializing repo")
@@ -371,7 +376,7 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string,
return mds.Put(datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size])
}
-func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market.DealProposal) (abi.DealID, error) {
+func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market0.DealProposal) (abi.DealID, error) {
// TODO: find a better way
// (this is only used by genesis miners)
@@ -459,6 +464,12 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
return err
}
+ if jrnl, err := journal.OpenFSJournal(lr, journal.DefaultDisabledEvents); err == nil {
+ journal.J = jrnl
+ } else {
+ return fmt.Errorf("failed to open filesystem journal: %w", err)
+ }
+
m := miner.NewMiner(api, epp, a, slashfilter.New(mds))
{
if err := m.Start(ctx); err != nil {
@@ -559,7 +570,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address.
return xerrors.Errorf("getWorkerAddr returned bad address: %w", err)
}
- enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)})
+ enc, err := actors.SerializeParams(&miner0.ChangePeerIDParams{NewID: abi.PeerID(peerid)})
if err != nil {
return err
}
@@ -567,7 +578,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address.
msg := &types.Message{
To: addr,
From: mi.Worker,
- Method: builtin.MethodsMiner.ChangePeerID,
+ Method: builtin0.MethodsMiner.ChangePeerID,
Params: enc,
Value: types.NewInt(0),
GasPremium: gasPrice,
@@ -626,7 +637,7 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
return address.Undef, err
}
- params, err := actors.SerializeParams(&power.CreateMinerParams{
+ params, err := actors.SerializeParams(&power0.CreateMinerParams{
Owner: owner,
Worker: worker,
SealProofType: spt,
@@ -646,11 +657,11 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
}
createStorageMinerMsg := &types.Message{
- To: builtin.StoragePowerActorAddr,
+ To: builtin0.StoragePowerActorAddr,
From: sender,
Value: big.Zero(),
- Method: builtin.MethodsPower.CreateMiner,
+ Method: builtin0.MethodsPower.CreateMiner,
Params: params,
GasLimit: 0,
@@ -674,7 +685,7 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
return address.Undef, xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode)
}
- var retval power.CreateMinerReturn
+ var retval power0.CreateMinerReturn
if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil {
return address.Undef, err
}
diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go
new file mode 100644
index 000000000..bdbb99fe0
--- /dev/null
+++ b/cmd/lotus-storage-miner/init_restore.go
@@ -0,0 +1,274 @@
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+
+ "github.com/docker/go-units"
+ "github.com/ipfs/go-datastore"
+ "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/mitchellh/go-homedir"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+ "gopkg.in/cheggaaa/pb.v1"
+
+ "github.com/filecoin-project/go-address"
+ paramfetch "github.com/filecoin-project/go-paramfetch"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/extern/sector-storage/stores"
+ "github.com/filecoin-project/lotus/lib/backupds"
+ "github.com/filecoin-project/lotus/node/config"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+var initRestoreCmd = &cli.Command{
+ Name: "restore",
+ Usage: "Initialize a lotus miner repo from a backup",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "nosync",
+ Usage: "don't check full-node sync status",
+ },
+ &cli.StringFlag{
+ Name: "config",
+ Usage: "config file (config.toml)",
+ },
+ &cli.StringFlag{
+ Name: "storage-config",
+ Usage: "storage paths config (storage.json)",
+ },
+ },
+ ArgsUsage: "[backupFile]",
+ Action: func(cctx *cli.Context) error {
+ log.Info("Initializing lotus miner using a backup")
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("expected 1 argument")
+ }
+
+ log.Info("Trying to connect to full node RPC")
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ log.Info("Checking full node version")
+
+ ctx := lcli.ReqContext(cctx)
+
+ v, err := api.Version(ctx)
+ if err != nil {
+ return err
+ }
+
+ if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) {
+ return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion)
+ }
+
+ if !cctx.Bool("nosync") {
+ if err := lcli.SyncWait(ctx, api); err != nil {
+ return xerrors.Errorf("sync wait: %w", err)
+ }
+ }
+
+ bf, err := homedir.Expand(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("expand backup file path: %w", err)
+ }
+
+ st, err := os.Stat(bf)
+ if err != nil {
+ return xerrors.Errorf("stat backup file (%s): %w", bf, err)
+ }
+
+ f, err := os.Open(bf)
+ if err != nil {
+ return xerrors.Errorf("opening backup file: %w", err)
+ }
+ defer f.Close() // nolint:errcheck
+
+ log.Info("Checking if repo exists")
+
+ repoPath := cctx.String(FlagMinerRepo)
+ r, err := repo.NewFS(repoPath)
+ if err != nil {
+ return err
+ }
+
+ ok, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if ok {
+ return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo))
+ }
+
+ log.Info("Initializing repo")
+
+ if err := r.Init(repo.StorageMiner); err != nil {
+ return err
+ }
+
+ lr, err := r.Lock(repo.StorageMiner)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ if cctx.IsSet("config") {
+ log.Info("Restoring config")
+
+ cf, err := homedir.Expand(cctx.String("config"))
+ if err != nil {
+ return xerrors.Errorf("expanding config path: %w", err)
+ }
+
+ _, err = os.Stat(cf)
+ if err != nil {
+ return xerrors.Errorf("stat config file (%s): %w", cf, err)
+ }
+
+ var cerr error
+ err = lr.SetConfig(func(raw interface{}) {
+ rcfg, ok := raw.(*config.StorageMiner)
+ if !ok {
+ cerr = xerrors.New("expected miner config")
+ return
+ }
+
+ ff, err := config.FromFile(cf, rcfg)
+ if err != nil {
+ cerr = xerrors.Errorf("loading config: %w", err)
+ return
+ }
+
+ *rcfg = *ff.(*config.StorageMiner)
+ })
+ if cerr != nil {
+ return cerr
+ }
+ if err != nil {
+ return xerrors.Errorf("setting config: %w", err)
+ }
+
+ } else {
+ log.Warn("--config NOT SET, WILL USE DEFAULT VALUES")
+ }
+
+ if cctx.IsSet("storage-config") {
+ log.Info("Restoring storage path config")
+
+ cf, err := homedir.Expand(cctx.String("storage-config"))
+ if err != nil {
+ return xerrors.Errorf("expanding storage config path: %w", err)
+ }
+
+ cfb, err := ioutil.ReadFile(cf)
+ if err != nil {
+ return xerrors.Errorf("reading storage config: %w", err)
+ }
+
+ var cerr error
+ err = lr.SetStorage(func(scfg *stores.StorageConfig) {
+ cerr = json.Unmarshal(cfb, scfg)
+ })
+ if cerr != nil {
+ return xerrors.Errorf("unmarshalling storage config: %w", cerr)
+ }
+ if err != nil {
+ return xerrors.Errorf("setting storage config: %w", err)
+ }
+ } else {
+ log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
+ }
+
+ log.Info("Restoring metadata backup")
+
+ mds, err := lr.Datastore("/metadata")
+ if err != nil {
+ return err
+ }
+
+ bar := pb.New64(st.Size())
+ br := bar.NewProxyReader(f)
+ bar.ShowTimeLeft = true
+ bar.ShowPercent = true
+ bar.ShowSpeed = true
+ bar.Units = pb.U_BYTES
+
+ bar.Start()
+ err = backupds.RestoreInto(br, mds)
+ bar.Finish()
+
+ if err != nil {
+ return xerrors.Errorf("restoring metadata: %w", err)
+ }
+
+ log.Info("Checking actor metadata")
+
+ abytes, err := mds.Get(datastore.NewKey("miner-address"))
+ if err != nil {
+ return xerrors.Errorf("getting actor address from metadata datastore: %w", err)
+ }
+
+ maddr, err := address.NewFromBytes(abytes)
+ if err != nil {
+ return xerrors.Errorf("parsing actor address: %w", err)
+ }
+
+ log.Info("ACTOR ADDRESS: ", maddr.String())
+
+ mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting miner info: %w", err)
+ }
+
+ log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize)))
+
+ wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("resolving worker key: %w", err)
+ }
+
+ has, err := api.WalletHas(ctx, wk)
+ if err != nil {
+ return xerrors.Errorf("checking worker address: %w", err)
+ }
+
+ if !has {
+ return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr)
+ }
+
+ log.Info("Checking proof parameters")
+
+ if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(mi.SectorSize)); err != nil {
+ return xerrors.Errorf("fetching proof parameters: %w", err)
+ }
+
+ log.Info("Initializing libp2p identity")
+
+ p2pSk, err := makeHostKey(lr)
+ if err != nil {
+ return xerrors.Errorf("make host key: %w", err)
+ }
+
+ peerid, err := peer.IDFromPrivateKey(p2pSk)
+ if err != nil {
+ return xerrors.Errorf("peer ID from private key: %w", err)
+ }
+
+ log.Info("Configuring miner actor")
+
+ if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
+ return err
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-storage-miner/main.go b/cmd/lotus-storage-miner/main.go
index cc704f891..671f75cf0 100644
--- a/cmd/lotus-storage-miner/main.go
+++ b/cmd/lotus-storage-miner/main.go
@@ -26,6 +26,8 @@ const FlagMinerRepo = "miner-repo"
const FlagMinerRepoDeprecation = "storagerepo"
func main() {
+ build.RunningNodeType = build.NodeMiner
+
lotuslog.SetupLogLevels()
local := []*cli.Command{
@@ -33,6 +35,7 @@ func main() {
runCmd,
stopCmd,
configCmd,
+ backupCmd,
lcli.WithCategory("chain", actorCmd),
lcli.WithCategory("chain", infoCmd),
lcli.WithCategory("market", storageDealsCmd),
diff --git a/cmd/lotus-storage-miner/market.go b/cmd/lotus-storage-miner/market.go
index 39671f74c..bb1ebd9ec 100644
--- a/cmd/lotus-storage-miner/market.go
+++ b/cmd/lotus-storage-miner/market.go
@@ -20,7 +20,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-fil-markets/storagemarket"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
@@ -154,21 +154,15 @@ var setAskCmd = &cli.Command{
Name: "set-ask",
Usage: "Configure the miner's ask",
Flags: []cli.Flag{
- &cli.Uint64Flag{
+ &cli.StringFlag{
Name: "price",
- Usage: "Set the price of the ask for unverified deals (specified as FIL / GiB / Epoch) to `PRICE`",
- Required: true,
- },
- &cli.Uint64Flag{
- Name: "verified-price",
- Usage: "Set the price of the ask for verified deals (specified as FIL / GiB / Epoch) to `PRICE`",
+ Usage: "Set the price of the ask for unverified deals (specified as FIL / GiB / Epoch) to `PRICE`.",
Required: true,
},
&cli.StringFlag{
- Name: "duration",
- Usage: "Set duration of ask (a quantity of time after which the ask expires) `DURATION`",
- DefaultText: "720h0m0s",
- Value: "720h0m0s",
+ Name: "verified-price",
+ Usage: "Set the price of the ask for verified deals (specified as FIL / GiB / Epoch) to `PRICE`",
+ Required: true,
},
&cli.StringFlag{
Name: "min-piece-size",
@@ -191,10 +185,17 @@ var setAskCmd = &cli.Command{
}
defer closer()
- pri := types.NewInt(cctx.Uint64("price"))
- vpri := types.NewInt(cctx.Uint64("verified-price"))
+ pri, err := types.ParseFIL(cctx.String("price"))
+ if err != nil {
+ return err
+ }
- dur, err := time.ParseDuration(cctx.String("duration"))
+ vpri, err := types.ParseFIL(cctx.String("verified-price"))
+ if err != nil {
+ return err
+ }
+
+ dur, err := time.ParseDuration("720h0m0s")
if err != nil {
return xerrors.Errorf("cannot parse duration: %w", err)
}
@@ -235,7 +236,7 @@ var setAskCmd = &cli.Command{
return xerrors.Errorf("max piece size (w/bit-padding) %s cannot exceed miner sector size %s", types.SizeStr(types.NewInt(uint64(max))), types.SizeStr(types.NewInt(uint64(smax))))
}
- return api.MarketSetAsk(ctx, pri, vpri, abi.ChainEpoch(qty), abi.PaddedPieceSize(min), abi.PaddedPieceSize(max))
+ return api.MarketSetAsk(ctx, types.BigInt(pri), types.BigInt(vpri), abi.ChainEpoch(qty), abi.PaddedPieceSize(min), abi.PaddedPieceSize(max))
},
}
@@ -287,7 +288,7 @@ var getAskCmd = &cli.Command{
rem = (time.Second * time.Duration(int64(dlt)*int64(build.BlockDelaySecs))).String()
}
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s\t%d\n", ask.Price, ask.VerifiedPrice, types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), ask.Expiry, rem, ask.SeqNo)
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s\t%d\n", types.FIL(ask.Price), types.FIL(ask.VerifiedPrice), types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), ask.Expiry, rem, ask.SeqNo)
return w.Flush()
},
diff --git a/cmd/lotus-storage-miner/proving.go b/cmd/lotus-storage-miner/proving.go
index 502edb57b..377b81d32 100644
--- a/cmd/lotus-storage-miner/proving.go
+++ b/cmd/lotus-storage-miner/proving.go
@@ -1,20 +1,18 @@
package main
import (
- "bytes"
"fmt"
"os"
+ "strconv"
"text/tabwriter"
- "time"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
-
- "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
@@ -25,6 +23,7 @@ var provingCmd = &cli.Command{
Subcommands: []*cli.Command{
provingInfoCmd,
provingDeadlinesCmd,
+ provingDeadlineInfoCmd,
provingFaultsCmd,
},
}
@@ -49,54 +48,41 @@ var provingFaultsCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
+ stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api))
+
maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
if err != nil {
return err
}
- var mas miner.State
- {
- mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
- if err != nil {
- return err
- }
- rmas, err := api.ChainReadObj(ctx, mact.Head)
- if err != nil {
- return err
- }
- if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
- return err
- }
+ mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ mas, err := miner.Load(stor, mact)
+ if err != nil {
+ return err
}
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
- head, err := api.ChainHead(ctx)
- if err != nil {
- return xerrors.Errorf("getting chain head: %w", err)
- }
- deadlines, err := api.StateMinerDeadlines(ctx, maddr, head.Key())
- if err != nil {
- return xerrors.Errorf("getting miner deadlines: %w", err)
- }
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintln(tw, "deadline\tpartition\tsectors")
- for dlIdx := range deadlines {
- partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
- if err != nil {
- return xerrors.Errorf("loading partitions for deadline %d: %w", dlIdx, err)
- }
-
- for partIdx, partition := range partitions {
- faulty, err := partition.Faults.All(10000000)
+ err = mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error {
+ return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error {
+ faults, err := part.FaultySectors()
if err != nil {
return err
}
-
- for _, num := range faulty {
+ return faults.ForEach(func(num uint64) error {
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\n", dlIdx, partIdx, num)
- }
- }
+ return nil
+ })
+ })
+ })
+ if err != nil {
+ return err
}
return tw.Flush()
},
@@ -132,67 +118,63 @@ var provingInfoCmd = &cli.Command{
return xerrors.Errorf("getting chain head: %w", err)
}
+ mact, err := api.StateGetActor(ctx, maddr, head.Key())
+ if err != nil {
+ return err
+ }
+
+ stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api))
+
+ mas, err := miner.Load(stor, mact)
+ if err != nil {
+ return err
+ }
+
cd, err := api.StateMinerProvingDeadline(ctx, maddr, head.Key())
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
- deadlines, err := api.StateMinerDeadlines(ctx, maddr, head.Key())
- if err != nil {
- return xerrors.Errorf("getting miner deadlines: %w", err)
- }
-
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
- var mas miner.State
- {
- mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
- if err != nil {
- return err
- }
- rmas, err := api.ChainReadObj(ctx, mact.Head)
- if err != nil {
- return err
- }
- if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
- return err
- }
- }
-
- parts := map[uint64][]*miner.Partition{}
- for dlIdx := range deadlines {
- part, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
- if err != nil {
- return xerrors.Errorf("getting miner partition: %w", err)
- }
-
- parts[uint64(dlIdx)] = part
- }
-
proving := uint64(0)
faults := uint64(0)
recovering := uint64(0)
+ curDeadlineSectors := uint64(0)
- for _, partitions := range parts {
- for _, partition := range partitions {
- sc, err := partition.Sectors.Count()
- if err != nil {
- return xerrors.Errorf("count partition sectors: %w", err)
+ if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error {
+ return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error {
+ if bf, err := part.LiveSectors(); err != nil {
+ return err
+ } else if count, err := bf.Count(); err != nil {
+ return err
+ } else {
+ proving += count
+ if dlIdx == cd.Index {
+ curDeadlineSectors += count
+ }
}
- proving += sc
- fc, err := partition.Faults.Count()
- if err != nil {
- return xerrors.Errorf("count partition faults: %w", err)
+ if bf, err := part.FaultySectors(); err != nil {
+ return err
+ } else if count, err := bf.Count(); err != nil {
+ return err
+ } else {
+ faults += count
}
- faults += fc
- rc, err := partition.Recoveries.Count()
- if err != nil {
- return xerrors.Errorf("count partition recoveries: %w", err)
+ if bf, err := part.RecoveringSectors(); err != nil {
+ return err
+ } else if count, err := bf.Count(); err != nil {
+ return err
+ } else {
+ recovering += count
}
- recovering += rc
- }
+
+ return nil
+ })
+ }); err != nil {
+ return xerrors.Errorf("walking miner deadlines and partitions: %w", err)
}
var faultPerc float64
@@ -202,49 +184,23 @@ var provingInfoCmd = &cli.Command{
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
- fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%miner.WPoStProvingPeriod)
- fmt.Printf("Proving Period Start: %s\n", epochTime(cd.CurrentEpoch, cd.PeriodStart))
- fmt.Printf("Next Period Start: %s\n\n", epochTime(cd.CurrentEpoch, cd.PeriodStart+miner.WPoStProvingPeriod))
+ fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%cd.WPoStProvingPeriod)
+ fmt.Printf("Proving Period Start: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.PeriodStart))
+ fmt.Printf("Next Period Start: %s\n\n", lcli.EpochTime(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod))
fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc)
fmt.Printf("Recovering: %d\n", recovering)
fmt.Printf("Deadline Index: %d\n", cd.Index)
-
- if cd.Index < miner.WPoStPeriodDeadlines {
- curDeadlineSectors := uint64(0)
- for _, partition := range parts[cd.Index] {
- sc, err := partition.Sectors.Count()
- if err != nil {
- return xerrors.Errorf("counting current deadline sectors: %w", err)
- }
- curDeadlineSectors += sc
- }
-
- fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
- }
-
- fmt.Printf("Deadline Open: %s\n", epochTime(cd.CurrentEpoch, cd.Open))
- fmt.Printf("Deadline Close: %s\n", epochTime(cd.CurrentEpoch, cd.Close))
- fmt.Printf("Deadline Challenge: %s\n", epochTime(cd.CurrentEpoch, cd.Challenge))
- fmt.Printf("Deadline FaultCutoff: %s\n", epochTime(cd.CurrentEpoch, cd.FaultCutoff))
+ fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
+ fmt.Printf("Deadline Open: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Open))
+ fmt.Printf("Deadline Close: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Close))
+ fmt.Printf("Deadline Challenge: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Challenge))
+ fmt.Printf("Deadline FaultCutoff: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.FaultCutoff))
return nil
},
}
-func epochTime(curr, e abi.ChainEpoch) string {
- switch {
- case curr > e:
- return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e)))
- case curr == e:
- return fmt.Sprintf("%d (now)", e)
- case curr < e:
- return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr)))
- }
-
- panic("math broke")
-}
-
var provingDeadlinesCmd = &cli.Command{
Name: "deadlines",
Usage: "View the current proving period deadlines information",
@@ -280,21 +236,6 @@ var provingDeadlinesCmd = &cli.Command{
return xerrors.Errorf("getting deadlines: %w", err)
}
- var mas miner.State
- {
- mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
- if err != nil {
- return err
- }
- rmas, err := api.ChainReadObj(ctx, mact.Head)
- if err != nil {
- return err
- }
- if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
- return err
- }
- }
-
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
@@ -315,14 +256,14 @@ var provingDeadlinesCmd = &cli.Command{
faults := uint64(0)
for _, partition := range partitions {
- sc, err := partition.Sectors.Count()
+ sc, err := partition.AllSectors.Count()
if err != nil {
return err
}
sectors += sc
- fc, err := partition.Faults.Count()
+ fc, err := partition.FaultySectors.Count()
if err != nil {
return err
}
@@ -340,3 +281,93 @@ var provingDeadlinesCmd = &cli.Command{
return tw.Flush()
},
}
+
+var provingDeadlineInfoCmd = &cli.Command{
+ Name: "deadline",
+ Usage: "View the current proving period deadline information by its index ",
+ ArgsUsage: "",
+ Action: func(cctx *cli.Context) error {
+
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("must pass deadline index")
+ }
+
+ dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
+ if err != nil {
+ return xerrors.Errorf("could not parse deadline index: %w", err)
+ }
+
+ nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ api, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
+ if err != nil {
+ return err
+ }
+
+ deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting deadlines: %w", err)
+ }
+
+ di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting deadlines: %w", err)
+ }
+
+ partitions, err := api.StateMinerPartitions(ctx, maddr, dlIdx, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err)
+ }
+
+ provenPartitions, err := deadlines[dlIdx].PostSubmissions.Count()
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Deadline Index: %d\n", dlIdx)
+ fmt.Printf("Partitions: %d\n", len(partitions))
+ fmt.Printf("Proven Partitions: %d\n", provenPartitions)
+ fmt.Printf("Current: %t\n\n", di.Index == dlIdx)
+
+ for pIdx, partition := range partitions {
+ sectorCount, err := partition.AllSectors.Count()
+ if err != nil {
+ return err
+ }
+
+ sectorNumbers, err := partition.AllSectors.All(sectorCount)
+ if err != nil {
+ return err
+ }
+
+ faultsCount, err := partition.FaultySectors.Count()
+ if err != nil {
+ return err
+ }
+
+ fn, err := partition.FaultySectors.All(faultsCount)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Partition Index: %d\n", pIdx)
+ fmt.Printf("Sectors: %d\n", sectorCount)
+ fmt.Printf("Sector Numbers: %v\n", sectorNumbers)
+ fmt.Printf("Faults: %d\n", faultsCount)
+ fmt.Printf("Faulty Sectors: %d\n", fn)
+ }
+ return nil
+ },
+}
diff --git a/cmd/lotus-storage-miner/retrieval-deals.go b/cmd/lotus-storage-miner/retrieval-deals.go
index df194978d..03d397852 100644
--- a/cmd/lotus-storage-miner/retrieval-deals.go
+++ b/cmd/lotus-storage-miner/retrieval-deals.go
@@ -5,9 +5,12 @@ import (
"os"
"text/tabwriter"
+ "github.com/docker/go-units"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/urfave/cli/v2"
+ "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
@@ -17,6 +20,8 @@ var retrievalDealsCmd = &cli.Command{
Subcommands: []*cli.Command{
retrievalDealSelectionCmd,
retrievalDealsListCmd,
+ retrievalSetAskCmd,
+ retrievalGetAskCmd,
},
}
@@ -154,3 +159,112 @@ var retrievalDealsListCmd = &cli.Command{
return w.Flush()
},
}
+
+var retrievalSetAskCmd = &cli.Command{
+ Name: "set-ask",
+ Usage: "Configure the provider's retrieval ask",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "price",
+ Usage: "Set the price of the ask for retrievals (FIL/GiB)",
+ },
+ &cli.StringFlag{
+ Name: "unseal-price",
+ Usage: "Set the price to unseal",
+ },
+ &cli.StringFlag{
+ Name: "payment-interval",
+ Usage: "Set the payment interval (in bytes) for retrieval",
+ DefaultText: "1MiB",
+ },
+ &cli.StringFlag{
+ Name: "payment-interval-increase",
+ Usage: "Set the payment interval increase (in bytes) for retrieval",
+ DefaultText: "1MiB",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.DaemonContext(cctx)
+
+ api, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ask, err := api.MarketGetRetrievalAsk(ctx)
+ if err != nil {
+ return err
+ }
+
+ if cctx.IsSet("price") {
+ v, err := types.ParseFIL(cctx.String("price"))
+ if err != nil {
+ return err
+ }
+ ask.PricePerByte = types.BigDiv(types.BigInt(v), types.NewInt(1<<30))
+ }
+
+ if cctx.IsSet("unseal-price") {
+ v, err := types.ParseFIL(cctx.String("unseal-price"))
+ if err != nil {
+ return err
+ }
+ ask.UnsealPrice = abi.TokenAmount(v)
+ }
+
+ if cctx.IsSet("payment-interval") {
+ v, err := units.RAMInBytes(cctx.String("payment-interval"))
+ if err != nil {
+ return err
+ }
+ ask.PaymentInterval = uint64(v)
+ }
+
+ if cctx.IsSet("payment-interval-increase") {
+ v, err := units.RAMInBytes(cctx.String("payment-interval-increase"))
+ if err != nil {
+ return err
+ }
+ ask.PaymentIntervalIncrease = uint64(v)
+ }
+
+ return api.MarketSetRetrievalAsk(ctx, ask)
+ },
+}
+
+var retrievalGetAskCmd = &cli.Command{
+ Name: "get-ask",
+ Usage: "Get the provider's current retrieval ask",
+ Flags: []cli.Flag{},
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.DaemonContext(cctx)
+
+ api, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ask, err := api.MarketGetRetrievalAsk(ctx)
+ if err != nil {
+ return err
+ }
+
+ w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
+ fmt.Fprintf(w, "Price per Byte\tUnseal Price\tPayment Interval\tPayment Interval Increase\n")
+ if ask == nil {
+ fmt.Fprintf(w, "\n")
+ return w.Flush()
+ }
+
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
+ types.FIL(ask.PricePerByte),
+ types.FIL(ask.UnsealPrice),
+ units.BytesSize(float64(ask.PaymentInterval)),
+ units.BytesSize(float64(ask.PaymentIntervalIncrease)),
+ )
+ return w.Flush()
+
+ },
+}
diff --git a/cmd/lotus-storage-miner/run.go b/cmd/lotus-storage-miner/run.go
index 7c88b74c4..98a9cfaba 100644
--- a/cmd/lotus-storage-miner/run.go
+++ b/cmd/lotus-storage-miner/run.go
@@ -33,7 +33,7 @@ var runCmd = &cli.Command{
Usage: "Start a lotus miner process",
Flags: []cli.Flag{
&cli.StringFlag{
- Name: "api",
+ Name: "miner-api",
Usage: "2345",
},
&cli.BoolFlag{
@@ -61,7 +61,7 @@ var runCmd = &cli.Command{
nodeApi, ncloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
- return err
+ return xerrors.Errorf("getting full node api: %w", err)
}
defer ncloser()
ctx := lcli.DaemonContext(cctx)
@@ -77,8 +77,8 @@ var runCmd = &cli.Command{
}
}
- if v.APIVersion != build.APIVersion {
- return xerrors.Errorf("lotus-daemon API version doesn't match: local: %s", api.Version{APIVersion: build.APIVersion})
+ if v.APIVersion != build.FullAPIVersion {
+ return xerrors.Errorf("lotus-daemon API version doesn't match: expected: %s", api.Version{APIVersion: build.FullAPIVersion})
}
log.Info("Checking full node sync status")
@@ -112,29 +112,29 @@ var runCmd = &cli.Command{
node.Online(),
node.Repo(r),
- node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
+ node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") },
node.Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) {
- return multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" + cctx.String("api"))
+ return multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" + cctx.String("miner-api"))
})),
node.Override(new(api.FullNode), nodeApi),
)
if err != nil {
- return err
+ return xerrors.Errorf("creating node: %w", err)
}
endpoint, err := r.APIEndpoint()
if err != nil {
- return err
+ return xerrors.Errorf("getting API endpoint: %w", err)
}
// Bootstrap with full node
remoteAddrs, err := nodeApi.NetAddrsListen(ctx)
if err != nil {
- return err
+ return xerrors.Errorf("getting full node libp2p address: %w", err)
}
if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil {
- return err
+ return xerrors.Errorf("connecting to full node (libp2p): %w", err)
}
log.Infof("Remote version %s", v)
@@ -163,8 +163,10 @@ var runCmd = &cli.Command{
sigChan := make(chan os.Signal, 2)
go func() {
select {
- case <-sigChan:
+ case sig := <-sigChan:
+ log.Warnw("received shutdown", "signal", sig)
case <-shutdownChan:
+ log.Warn("received shutdown")
}
log.Warn("Shutting down...")
diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go
index 8bd8ff6a5..b50f4a86d 100644
--- a/cmd/lotus-storage-miner/sectors.go
+++ b/cmd/lotus-storage-miner/sectors.go
@@ -5,19 +5,25 @@ import (
"os"
"sort"
"strconv"
- "text/tabwriter"
"time"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
-
+ "github.com/docker/go-units"
+ "github.com/fatih/color"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/lib/tablewriter"
+
lcli "github.com/filecoin-project/lotus/cli"
+ sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
)
var sectorsCmd = &cli.Command{
@@ -136,7 +142,24 @@ var sectorsStatusCmd = &cli.Command{
var sectorsListCmd = &cli.Command{
Name: "list",
Usage: "List sectors",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "show-removed",
+ Usage: "show removed sectors",
+ },
+ &cli.BoolFlag{
+ Name: "color",
+ Aliases: []string{"c"},
+ Value: true,
+ },
+ &cli.BoolFlag{
+ Name: "fast",
+ Usage: "don't show on-chain info for better performance",
+ },
+ },
Action: func(cctx *cli.Context) error {
+ color.NoColor = !cctx.Bool("color")
+
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
@@ -161,53 +184,114 @@ var sectorsListCmd = &cli.Command{
return err
}
- activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK)
+ head, err := fullApi.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, head.Key())
if err != nil {
return err
}
activeIDs := make(map[abi.SectorNumber]struct{}, len(activeSet))
for _, info := range activeSet {
- activeIDs[info.ID] = struct{}{}
+ activeIDs[info.SectorNumber] = struct{}{}
}
- sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, true, types.EmptyTSK)
+ sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, head.Key())
if err != nil {
return err
}
commitedIDs := make(map[abi.SectorNumber]struct{}, len(activeSet))
for _, info := range sset {
- commitedIDs[info.ID] = struct{}{}
+ commitedIDs[info.SectorNumber] = struct{}{}
}
sort.Slice(list, func(i, j int) bool {
return list[i] < list[j]
})
- w := tabwriter.NewWriter(os.Stdout, 8, 4, 1, ' ', 0)
+ tw := tablewriter.New(
+ tablewriter.Col("ID"),
+ tablewriter.Col("State"),
+ tablewriter.Col("OnChain"),
+ tablewriter.Col("Active"),
+ tablewriter.Col("Expiration"),
+ tablewriter.Col("Deals"),
+ tablewriter.Col("DealWeight"),
+ tablewriter.NewLineCol("Error"),
+ tablewriter.NewLineCol("EarlyExpiration"))
+
+ fast := cctx.Bool("fast")
for _, s := range list {
- st, err := nodeApi.SectorsStatus(ctx, s, false)
+ st, err := nodeApi.SectorsStatus(ctx, s, !fast)
if err != nil {
- fmt.Fprintf(w, "%d:\tError: %s\n", s, err)
+ tw.Write(map[string]interface{}{
+ "ID": s,
+ "Error": err,
+ })
continue
}
- _, inSSet := commitedIDs[s]
- _, inASet := activeIDs[s]
+ if cctx.Bool("show-removed") || st.State != api.SectorState(sealing.Removed) {
+ _, inSSet := commitedIDs[s]
+ _, inASet := activeIDs[s]
- fmt.Fprintf(w, "%d: %s\tsSet: %s\tactive: %s\ttktH: %d\tseedH: %d\tdeals: %v\t toUpgrade:%t\n",
- s,
- st.State,
- yesno(inSSet),
- yesno(inASet),
- st.Ticket.Epoch,
- st.Seed.Epoch,
- st.Deals,
- st.ToUpgrade,
- )
+ dw := .0
+ if st.Expiration-st.Activation > 0 {
+ dw = float64(big.Div(st.DealWeight, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
+ }
+
+ var deals int
+ for _, deal := range st.Deals {
+ if deal != 0 {
+ deals++
+ }
+ }
+
+ exp := st.Expiration
+ if st.OnTime > 0 && st.OnTime < exp {
+ exp = st.OnTime // Can be different when the sector was CC upgraded
+ }
+
+ m := map[string]interface{}{
+ "ID": s,
+ "State": color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State),
+ "OnChain": yesno(inSSet),
+ "Active": yesno(inASet),
+ }
+
+ if deals > 0 {
+ m["Deals"] = color.GreenString("%d", deals)
+ } else {
+ m["Deals"] = color.BlueString("CC")
+ if st.ToUpgrade {
+ m["Deals"] = color.CyanString("CC(upgrade)")
+ }
+ }
+
+ if !fast {
+ if !inSSet {
+ m["Expiration"] = "n/a"
+ } else {
+ m["Expiration"] = lcli.EpochTime(head.Height(), exp)
+
+ if !fast && deals > 0 {
+ m["DealWeight"] = units.BytesSize(dw)
+ }
+
+ if st.Early > 0 {
+ m["EarlyExpiration"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
+ }
+ }
+ }
+
+ tw.Write(m)
+ }
}
- return w.Flush()
+ return tw.Flush(os.Stdout)
},
}
@@ -379,7 +463,7 @@ var sectorsCapacityCollateralCmd = &cli.Command{
Expiration: abi.ChainEpoch(cctx.Uint64("expiration")),
}
if pci.Expiration == 0 {
- pci.Expiration = miner.MaxSectorExpirationExtension
+ pci.Expiration = miner0.MaxSectorExpirationExtension
}
pc, err := nApi.StateMinerInitialPledgeCollateral(ctx, maddr, pci, types.EmptyTSK)
if err != nil {
@@ -393,8 +477,9 @@ var sectorsCapacityCollateralCmd = &cli.Command{
}
var sectorsUpdateCmd = &cli.Command{
- Name: "update-state",
- Usage: "ADVANCED: manually update the state of a sector, this may aid in error recovery",
+ Name: "update-state",
+ Usage: "ADVANCED: manually update the state of a sector, this may aid in error recovery",
+ ArgsUsage: " ",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
@@ -420,13 +505,22 @@ var sectorsUpdateCmd = &cli.Command{
return xerrors.Errorf("could not parse sector number: %w", err)
}
+ newState := cctx.Args().Get(1)
+ if _, ok := sealing.ExistSectorStateList[sealing.SectorState(newState)]; !ok {
+ fmt.Printf(" \"%s\" is not a valid state. Possible states for sectors are: \n", newState)
+ for state := range sealing.ExistSectorStateList {
+ fmt.Printf("%s\n", string(state))
+ }
+ return nil
+ }
+
return nodeApi.SectorsUpdate(ctx, abi.SectorNumber(id), api.SectorState(cctx.Args().Get(1)))
},
}
func yesno(b bool) string {
if b {
- return "YES"
+ return color.GreenString("YES")
}
- return "NO"
+ return color.RedString("NO")
}
diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-storage-miner/storage.go
index 7fadcf83f..77792f32a 100644
--- a/cmd/lotus-storage-miner/storage.go
+++ b/cmd/lotus-storage-miner/storage.go
@@ -18,9 +18,9 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
- "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
@@ -31,6 +31,10 @@ const metaFile = "sectorstore.json"
var storageCmd = &cli.Command{
Name: "storage",
Usage: "manage sector storage",
+ Description: `Sectors can be stored across many filesystem paths. These
+commands provide ways to manage the storage the miner will used to store sectors
+long term for proving (references as 'store') as well as how sectors will be
+stored while moving through the sealing pipeline (references as 'seal').`,
Subcommands: []*cli.Command{
storageAttachCmd,
storageListCmd,
@@ -41,6 +45,25 @@ var storageCmd = &cli.Command{
var storageAttachCmd = &cli.Command{
Name: "attach",
Usage: "attach local storage path",
+ Description: `Storage can be attached to the miner using this command. The storage volume
+list is stored local to the miner in $LOTUS_MINER_PATH/storage.json. We do not
+recommend manually modifying this value without further understanding of the
+storage system.
+
+Each storage volume contains a configuration file which describes the
+capabilities of the volume. When the '--init' flag is provided, this file will
+be created using the additional flags.
+
+Weight
+A high weight value means data will be more likely to be stored in this path
+
+Seal
+Data for the sealing process will be stored here
+
+Store
+Finalized sectors that will be moved here for long term storage and be proven
+over time
+ `,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "init",
@@ -370,7 +393,7 @@ var storageFindCmd = &cli.Command{
}
fmt.Printf("In %s (%s)\n", info.id, types[:len(types)-2])
- fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanSeal)
+ fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanStore)
if localPath, ok := local[info.id]; ok {
fmt.Printf("\tLocal (%s)\n", localPath)
} else {
diff --git a/cmd/lotus/backup.go b/cmd/lotus/backup.go
new file mode 100644
index 000000000..aec0000c9
--- /dev/null
+++ b/cmd/lotus/backup.go
@@ -0,0 +1,14 @@
+package main
+
+import (
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-jsonrpc"
+
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+var backupCmd = lcli.BackupCmd("repo", repo.FullNode, func(cctx *cli.Context) (lcli.BackupAPI, jsonrpc.ClientCloser, error) {
+ return lcli.GetFullNodeAPI(cctx)
+})
diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go
index e0fee6564..a0f754a60 100644
--- a/cmd/lotus/daemon.go
+++ b/cmd/lotus/daemon.go
@@ -3,11 +3,14 @@
package main
import (
+ "bufio"
"context"
"encoding/hex"
"encoding/json"
"fmt"
+ "io"
"io/ioutil"
+ "net/http"
"os"
"runtime/pprof"
"strings"
@@ -23,6 +26,7 @@ import (
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
+ "gopkg.in/cheggaaa/pb.v1"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@@ -100,11 +104,11 @@ var DaemonCmd = &cli.Command{
},
&cli.StringFlag{
Name: "import-chain",
- Usage: "on first run, load chain from given file and validate",
+ Usage: "on first run, load chain from given file or url and validate",
},
&cli.StringFlag{
Name: "import-snapshot",
- Usage: "import chain state from a given chain export file",
+ Usage: "import chain state from a given chain export file or url",
},
&cli.BoolFlag{
Name: "halt-after-import",
@@ -123,6 +127,10 @@ var DaemonCmd = &cli.Command{
Usage: "manage open file limit",
Value: true,
},
+ &cli.StringFlag{
+ Name: "config",
+ Usage: "specify path of config file to use",
+ },
},
Action: func(cctx *cli.Context) error {
err := runmetrics.Enable(runmetrics.RunMetricOptions{
@@ -176,6 +184,10 @@ var DaemonCmd = &cli.Command{
return xerrors.Errorf("opening fs repo: %w", err)
}
+ if cctx.String("config") != "" {
+ r.SetConfigPath(cctx.String("config"))
+ }
+
if err := r.Init(repo.FullNode); err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
@@ -206,11 +218,6 @@ var DaemonCmd = &cli.Command{
issnapshot = true
}
- chainfile, err := homedir.Expand(chainfile)
- if err != nil {
- return err
- }
-
if err := ImportChain(r, chainfile, issnapshot); err != nil {
return err
}
@@ -326,12 +333,42 @@ func importKey(ctx context.Context, api api.FullNode, f string) error {
return nil
}
-func ImportChain(r repo.Repo, fname string, snapshot bool) error {
- fi, err := os.Open(fname)
- if err != nil {
- return err
+func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
+ var rd io.Reader
+ var l int64
+ if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
+ resp, err := http.Get(fname) //nolint:gosec
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusOK {
+ return xerrors.Errorf("non-200 response: %d", resp.StatusCode)
+ }
+
+ rd = resp.Body
+ l = resp.ContentLength
+ } else {
+ fname, err = homedir.Expand(fname)
+ if err != nil {
+ return err
+ }
+
+ fi, err := os.Open(fname)
+ if err != nil {
+ return err
+ }
+ defer fi.Close() //nolint:errcheck
+
+ st, err := os.Stat(fname)
+ if err != nil {
+ return err
+ }
+
+ rd = fi
+ l = st.Size()
}
- defer fi.Close() //nolint:errcheck
lr, err := r.Lock(repo.FullNode)
if err != nil {
@@ -353,8 +390,21 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) error {
cst := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier))
- log.Info("importing chain from file...")
- ts, err := cst.Import(fi)
+ log.Infof("importing chain from %s...", fname)
+
+ bufr := bufio.NewReaderSize(rd, 1<<20)
+
+ bar := pb.New64(l)
+ br := bar.NewProxyReader(bufr)
+ bar.ShowTimeLeft = true
+ bar.ShowPercent = true
+ bar.ShowSpeed = true
+ bar.Units = pb.U_BYTES
+
+ bar.Start()
+ ts, err := cst.Import(br)
+ bar.Finish()
+
if err != nil {
return xerrors.Errorf("importing chain failed: %w", err)
}
diff --git a/cmd/lotus/debug_advance.go b/cmd/lotus/debug_advance.go
index 699182472..4e74a995f 100644
--- a/cmd/lotus/debug_advance.go
+++ b/cmd/lotus/debug_advance.go
@@ -7,12 +7,12 @@ import (
"time"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/crypto"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"golang.org/x/xerrors"
"github.com/urfave/cli/v2"
@@ -46,6 +46,7 @@ func init() {
return xerrors.Errorf("StateMinerWorker: %w", err)
}
+ // XXX: This can't be right
rand, err := api.ChainGetRandomnessFromTickets(ctx, head.Key(), crypto.DomainSeparationTag_TicketProduction, head.Height(), addr.Bytes())
if err != nil {
return xerrors.Errorf("failed to get randomness: %w", err)
diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go
index 1e2c7faec..eb97045ee 100644
--- a/cmd/lotus/main.go
+++ b/cmd/lotus/main.go
@@ -16,10 +16,13 @@ import (
var AdvanceBlockCmd *cli.Command
func main() {
+ build.RunningNodeType = build.NodeFull
+
lotuslog.SetupLogLevels()
local := []*cli.Command{
DaemonCmd,
+ backupCmd,
}
if AdvanceBlockCmd != nil {
local = append(local, AdvanceBlockCmd)
diff --git a/cmd/lotus/rpc.go b/cmd/lotus/rpc.go
index fbe05e938..9718deb3a 100644
--- a/cmd/lotus/rpc.go
+++ b/cmd/lotus/rpc.go
@@ -66,8 +66,10 @@ func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shut
shutdownDone := make(chan struct{})
go func() {
select {
- case <-sigCh:
+ case sig := <-sigCh:
+ log.Warnw("received shutdown", "signal", sig)
case <-shutdownCh:
+ log.Warn("received shutdown")
}
log.Warn("Shutting down...")
diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go
new file mode 100644
index 000000000..9ec6f9e2b
--- /dev/null
+++ b/cmd/tvx/exec.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+
+ "github.com/fatih/color"
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/conformance"
+
+ "github.com/filecoin-project/test-vectors/schema"
+)
+
+var execFlags struct {
+ file string
+}
+
+var execCmd = &cli.Command{
+ Name: "exec",
+ Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, or a ndjson stdin stream",
+ Action: runExecLotus,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "file",
+ Usage: "input file; if not supplied, the vector will be read from stdin",
+ TakesFile: true,
+ Destination: &execFlags.file,
+ },
+ },
+}
+
+func runExecLotus(_ *cli.Context) error {
+ if file := execFlags.file; file != "" {
+ // we have a single test vector supplied as a file.
+ file, err := os.Open(file)
+ if err != nil {
+ return fmt.Errorf("failed to open test vector: %w", err)
+ }
+
+ var (
+ dec = json.NewDecoder(file)
+ tv schema.TestVector
+ )
+
+ if err = dec.Decode(&tv); err != nil {
+ return fmt.Errorf("failed to decode test vector: %w", err)
+ }
+
+ return executeTestVector(tv)
+ }
+
+ for dec := json.NewDecoder(os.Stdin); ; {
+ var tv schema.TestVector
+ switch err := dec.Decode(&tv); err {
+ case nil:
+ if err = executeTestVector(tv); err != nil {
+ return err
+ }
+ case io.EOF:
+ // we're done.
+ return nil
+ default:
+ // something bad happened.
+ return err
+ }
+ }
+}
+
+func executeTestVector(tv schema.TestVector) error {
+ log.Println("executing test vector:", tv.Meta.ID)
+ r := new(conformance.LogReporter)
+ switch class := tv.Class; class {
+ case "message":
+ conformance.ExecuteMessageVector(r, &tv)
+ case "tipset":
+ conformance.ExecuteTipsetVector(r, &tv)
+ default:
+ return fmt.Errorf("test vector class %s not supported", class)
+ }
+
+ if r.Failed() {
+ log.Println(color.HiRedString("❌ test vector failed"))
+ } else {
+ log.Println(color.GreenString("✅ test vector succeeded"))
+ }
+
+ return nil
+}
diff --git a/cmd/tvx/extract.go b/cmd/tvx/extract.go
new file mode 100644
index 000000000..b0ed574df
--- /dev/null
+++ b/cmd/tvx/extract.go
@@ -0,0 +1,511 @@
+package main
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+
+ "github.com/fatih/color"
+
+ "github.com/filecoin-project/lotus/api"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/conformance"
+
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ "github.com/filecoin-project/test-vectors/schema"
+
+ "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+)
+
+const (
+ PrecursorSelectAll = "all"
+ PrecursorSelectSender = "sender"
+)
+
+type extractOpts struct {
+ id string
+ block string
+ class string
+ cid string
+ file string
+ retain string
+ precursor string
+}
+
+var extractFlags extractOpts
+
+var extractCmd = &cli.Command{
+ Name: "extract",
+ Description: "generate a test vector by extracting it from a live chain",
+ Action: runExtract,
+ Flags: []cli.Flag{
+ &repoFlag,
+ &cli.StringFlag{
+ Name: "class",
+ Usage: "class of vector to extract; other required flags depend on the; values: 'message'",
+ Value: "message",
+ Destination: &extractFlags.class,
+ },
+ &cli.StringFlag{
+ Name: "id",
+ Usage: "identifier to name this test vector with",
+ Value: "(undefined)",
+ Destination: &extractFlags.id,
+ },
+ &cli.StringFlag{
+ Name: "block",
+ Usage: "optionally, the block CID the message was included in, to avoid expensive chain scanning",
+ Destination: &extractFlags.block,
+ },
+ &cli.StringFlag{
+ Name: "cid",
+ Usage: "message CID to generate test vector from",
+ Required: true,
+ Destination: &extractFlags.cid,
+ },
+ &cli.StringFlag{
+ Name: "out",
+ Aliases: []string{"o"},
+ Usage: "file to write test vector to",
+ Destination: &extractFlags.file,
+ },
+ &cli.StringFlag{
+ Name: "state-retain",
+ Usage: "state retention policy; values: 'accessed-cids', 'accessed-actors'",
+ Value: "accessed-cids",
+ Destination: &extractFlags.retain,
+ },
+ &cli.StringFlag{
+ Name: "precursor-select",
+ Usage: "precursors to apply; values: 'all', 'sender'; 'all' selects all preceding" +
+ "messages in the canonicalised tipset, 'sender' selects only preceding messages from the same" +
+ "sender. Usually, 'sender' is a good tradeoff and gives you sufficient accuracy. If the receipt sanity" +
+ "check fails due to gas reasons, switch to 'all', as previous messages in the tipset may have" +
+ "affected state in a disruptive way",
+ Value: "sender",
+ Destination: &extractFlags.precursor,
+ },
+ },
+}
+
+func runExtract(c *cli.Context) error {
+ // LOTUS_DISABLE_VM_BUF disables what's called "VM state tree buffering",
+ // which stashes write operations in a BufferedBlockstore
+ // (https://github.com/filecoin-project/lotus/blob/b7a4dbb07fd8332b4492313a617e3458f8003b2a/lib/bufbstore/buf_bstore.go#L21)
+ // such that they're not written until the VM is actually flushed.
+ //
+ // For some reason, the standard behaviour was not working for me (raulk),
+ // and disabling it (such that the state transformations are written immediately
+ // to the blockstore) worked.
+ _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea")
+
+ ctx := context.Background()
+
+ // Make the API client.
+ fapi, closer, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ return doExtract(ctx, fapi, extractFlags)
+}
+
+func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error {
+ mcid, err := cid.Decode(opts.cid)
+ if err != nil {
+ return err
+ }
+
+ msg, execTs, incTs, err := resolveFromChain(ctx, fapi, mcid, opts.block)
+ if err != nil {
+ return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err)
+ }
+
+ // get the circulating supply before the message was executed.
+ circSupplyDetail, err := fapi.StateCirculatingSupply(ctx, incTs.Key())
+ if err != nil {
+ return fmt.Errorf("failed while fetching circulating supply: %w", err)
+ }
+
+ circSupply := circSupplyDetail.FilCirculating
+
+ log.Printf("message was executed in tipset: %s", execTs.Key())
+ log.Printf("message was included in tipset: %s", incTs.Key())
+ log.Printf("circulating supply at inclusion tipset: %d", circSupply)
+ log.Printf("finding precursor messages using mode: %s", opts.precursor)
+
+ // Fetch messages in canonical order from inclusion tipset.
+ msgs, err := fapi.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid())
+ if err != nil {
+ return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err)
+ }
+
+ related, found, err := findMsgAndPrecursors(opts.precursor, msg, msgs)
+ if err != nil {
+ return fmt.Errorf("failed while finding message and precursors: %w", err)
+ }
+
+ if !found {
+ return fmt.Errorf("message not found; precursors found: %d", len(related))
+ }
+
+ var (
+ precursors = related[:len(related)-1]
+ precursorsCids []cid.Cid
+ )
+
+ for _, p := range precursors {
+ precursorsCids = append(precursorsCids, p.Cid())
+ }
+
+ log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids))
+
+ var (
+ // create a read-through store that uses ChainGetObject to fetch unknown CIDs.
+ pst = NewProxyingStores(ctx, fapi)
+ g = NewSurgeon(ctx, fapi, pst)
+ )
+
+ driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{
+ DisableVMFlush: true,
+ })
+
+ // this is the root of the state tree we start with.
+ root := incTs.ParentState()
+ log.Printf("base state tree root CID: %s", root)
+
+ basefee := incTs.Blocks()[0].ParentBaseFee
+ log.Printf("basefee: %s", basefee)
+
+ // on top of that state tree, we apply all precursors.
+ log.Printf("number of precursors to apply: %d", len(precursors))
+ for i, m := range precursors {
+ log.Printf("applying precursor %d, cid: %s", i, m.Cid())
+ _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: root,
+ Epoch: execTs.Height(),
+ Message: m,
+ CircSupply: circSupplyDetail.FilCirculating,
+ BaseFee: basefee,
+ // recorded randomness will be discarded.
+ Rand: conformance.NewRecordingRand(new(conformance.LogReporter), fapi),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute precursor message: %w", err)
+ }
+ }
+
+ var (
+ preroot cid.Cid
+ postroot cid.Cid
+ applyret *vm.ApplyRet
+ carWriter func(w io.Writer) error
+ retention = opts.retain
+
+ // recordingRand will record randomness so we can embed it in the test vector.
+ recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), fapi)
+ )
+
+ log.Printf("using state retention strategy: %s", retention)
+ switch retention {
+ case "accessed-cids":
+ tbs, ok := pst.Blockstore.(TracingBlockstore)
+ if !ok {
+ return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present")
+ }
+
+ tbs.StartTracing()
+
+ preroot = root
+ applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: preroot,
+ Epoch: execTs.Height(),
+ Message: msg,
+ CircSupply: circSupplyDetail.FilCirculating,
+ BaseFee: basefee,
+ Rand: recordingRand,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute message: %w", err)
+ }
+ accessed := tbs.FinishTracing()
+ carWriter = func(w io.Writer) error {
+ return g.WriteCARIncluding(w, accessed, preroot, postroot)
+ }
+
+ case "accessed-actors":
+ log.Printf("calculating accessed actors")
+ // get actors accessed by message.
+ retain, err := g.GetAccessedActors(ctx, fapi, mcid)
+ if err != nil {
+ return fmt.Errorf("failed to calculate accessed actors: %w", err)
+ }
+ // also append the reward actor and the burnt funds actor.
+ retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address)
+ log.Printf("calculated accessed actors: %v", retain)
+
+ // get the masked state tree from the root,
+ preroot, err = g.GetMaskedStateTree(root, retain)
+ if err != nil {
+ return err
+ }
+ applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: preroot,
+ Epoch: execTs.Height(),
+ Message: msg,
+ CircSupply: circSupplyDetail.FilCirculating,
+ BaseFee: basefee,
+ Rand: recordingRand,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute message: %w", err)
+ }
+ carWriter = func(w io.Writer) error {
+ return g.WriteCAR(w, preroot, postroot)
+ }
+
+ default:
+ return fmt.Errorf("unknown state retention option: %s", retention)
+ }
+
+ log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot)
+ log.Println("performing sanity check on receipt")
+
+ // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯
+ // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2
+ // This code is lenient and skips receipt comparison in case of a nil receipt.
+ rec, err := fapi.StateGetReceipt(ctx, mcid, execTs.Key())
+ if err != nil {
+ return fmt.Errorf("failed to find receipt on chain: %w", err)
+ }
+ log.Printf("found receipt: %+v", rec)
+
+ // generate the schema receipt; if we got
+ var receipt *schema.Receipt
+ if rec != nil {
+ receipt = &schema.Receipt{
+ ExitCode: int64(rec.ExitCode),
+ ReturnValue: rec.Return,
+ GasUsed: rec.GasUsed,
+ }
+ reporter := new(conformance.LogReporter)
+ conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed")
+ if reporter.Failed() {
+ log.Println(color.RedString("receipt sanity check failed; aborting"))
+ return fmt.Errorf("vector generation aborted")
+ }
+ log.Println(color.GreenString("receipt sanity check succeeded"))
+ } else {
+ receipt = &schema.Receipt{
+ ExitCode: int64(applyret.ExitCode),
+ ReturnValue: applyret.Return,
+ GasUsed: applyret.GasUsed,
+ }
+ log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus"))
+ }
+
+ log.Println("generating vector")
+ msgBytes, err := msg.Serialize()
+ if err != nil {
+ return err
+ }
+
+ var (
+ out = new(bytes.Buffer)
+ gw = gzip.NewWriter(out)
+ )
+ if err := carWriter(gw); err != nil {
+ return err
+ }
+ if err = gw.Flush(); err != nil {
+ return err
+ }
+ if err = gw.Close(); err != nil {
+ return err
+ }
+
+ version, err := fapi.Version(ctx)
+ if err != nil {
+ return err
+ }
+
+ ntwkName, err := fapi.StateNetworkName(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Write out the test vector.
+ vector := schema.TestVector{
+ Class: schema.ClassMessage,
+ Meta: &schema.Metadata{
+ ID: opts.id,
+ // TODO need to replace schema.GenerationData with a more flexible
+ // data structure that makes no assumption about the traceability
+ // data that's being recorded; a flexible map[string]string
+ // would do.
+ Gen: []schema.GenerationData{
+ {Source: fmt.Sprintf("network:%s", ntwkName)},
+ {Source: fmt.Sprintf("message:%s", msg.Cid().String())},
+ {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())},
+ {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())},
+ {Source: "github.com/filecoin-project/lotus", Version: version.String()}},
+ },
+ Randomness: recordingRand.Recorded(),
+ CAR: out.Bytes(),
+ Pre: &schema.Preconditions{
+ Epoch: int64(execTs.Height()),
+ CircSupply: circSupply.Int,
+ BaseFee: basefee.Int,
+ StateTree: &schema.StateTree{
+ RootCID: preroot,
+ },
+ },
+ ApplyMessages: []schema.Message{{Bytes: msgBytes}},
+ Post: &schema.Postconditions{
+ StateTree: &schema.StateTree{
+ RootCID: postroot,
+ },
+ Receipts: []*schema.Receipt{
+ {
+ ExitCode: int64(applyret.ExitCode),
+ ReturnValue: applyret.Return,
+ GasUsed: applyret.GasUsed,
+ },
+ },
+ },
+ }
+
+ output := io.WriteCloser(os.Stdout)
+ if file := opts.file; file != "" {
+ dir := filepath.Dir(file)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return fmt.Errorf("unable to create directory %s: %w", dir, err)
+ }
+ output, err = os.Create(file)
+ if err != nil {
+ return err
+ }
+ defer output.Close() //nolint:errcheck
+ defer log.Printf("wrote test vector to file: %s", file)
+ }
+
+ enc := json.NewEncoder(output)
+ enc.SetIndent("", " ")
+ if err := enc.Encode(&vector); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// resolveFromChain queries the chain for the provided message, using the block CID to
+// speed up the query, if provided
+func resolveFromChain(ctx context.Context, api api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) {
+ // Extract the full message.
+ msg, err = api.ChainGetMessage(ctx, mcid)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ log.Printf("found message with CID %s: %+v", mcid, msg)
+
+ if block == "" {
+ log.Printf("locating message in blockchain")
+
+ // Locate the message.
+ msgInfo, err := api.StateSearchMsg(ctx, mcid)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err)
+ }
+
+ log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode)
+
+ execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet)
+ return msg, execTs, incTs, err
+ }
+
+ bcid, err := cid.Decode(block)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid)
+
+ blk, err := api.ChainGetBlock(ctx, bcid)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get block: %w", err)
+ }
+
+ // types.EmptyTSK hints to use the HEAD.
+ execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err)
+ }
+
+ // walk back from the execTs instead of HEAD, to save time.
+ incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key())
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err)
+ }
+
+ return msg, execTs, incTs, nil
+}
+
+// fetchThisAndPrevTipset returns the full tipset identified by the key, as well
+// as the previous tipset. In the context of vector generation, the target
+// tipset is the one where a message was executed, and the previous tipset is
+// the one where the message was included.
+func fetchThisAndPrevTipset(ctx context.Context, api api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) {
+ // get the tipset on which this message was "executed" on.
+ // https://github.com/filecoin-project/lotus/issues/2847
+ targetTs, err = api.ChainGetTipSet(ctx, target)
+ if err != nil {
+ return nil, nil, err
+ }
+ // get the previous tipset, on which this message was mined,
+ // i.e. included on-chain.
+ prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents())
+ if err != nil {
+ return nil, nil, err
+ }
+ return targetTs, prevTs, nil
+}
+
+// findMsgAndPrecursors ranges through the canonical messages slice, locating
+// the target message and returning precursors in accordance to the supplied
+// mode.
+func findMsgAndPrecursors(mode string, target *types.Message, msgs []api.Message) (related []*types.Message, found bool, err error) {
+ // Range through canonicalised messages, selecting only the precursors based
+ // on selection mode.
+ for _, other := range msgs {
+ switch {
+ case mode == PrecursorSelectAll:
+ fallthrough
+ case mode == PrecursorSelectSender && other.Message.From == target.From:
+ related = append(related, other.Message)
+ }
+
+ // this message is the target; we're done.
+ if other.Cid == target.Cid() {
+ return related, true, nil
+ }
+ }
+
+ // this could happen because a block contained related messages, but not
+ // the target (that is, messages with a lower nonce, but ultimately not the
+ // target).
+ return related, false, nil
+}
diff --git a/cmd/tvx/extract_many.go b/cmd/tvx/extract_many.go
new file mode 100644
index 000000000..fe0ce6a6c
--- /dev/null
+++ b/cmd/tvx/extract_many.go
@@ -0,0 +1,243 @@
+package main
+
+import (
+ "context"
+ "encoding/csv"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/fatih/color"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/hashicorp/go-multierror"
+ "github.com/ipfs/go-cid"
+ "github.com/multiformats/go-multihash"
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var extractManyFlags struct {
+ in string
+ outdir string
+ batchId string
+}
+
+var extractManyCmd = &cli.Command{
+ Name: "extract-many",
+ Description: `generate many test vectors by repeatedly calling tvx extract, using a csv file as input.
+
+ The CSV file must have a format just like the following:
+
+ message_cid,receiver_code,method_num,exit_code,height,block_cid,seq
+ bafy2bzacedvuvgpsnwq7i7kltfap6hnp7fdmzf6lr4w34zycjrthb3v7k6zi6,fil/1/account,0,0,67972,bafy2bzacebthpxzlk7zhlkz3jfzl4qw7mdoswcxlf3rkof3b4mbxfj3qzfk7w,1
+ bafy2bzacedwicofymn4imgny2hhbmcm4o5bikwnv3qqgohyx73fbtopiqlro6,fil/1/account,0,0,67860,bafy2bzacebj7beoxyzll522o6o76mt7von4psn3tlvunokhv4zhpwmfpipgti,2
+ ...
+
+ The first row MUST be a header row. At the bare minimum, those seven fields
+ must appear, in the order specified. Extra fields are accepted, but always
+ after these compulsory seven.
+`,
+ Action: runExtractMany,
+ Flags: []cli.Flag{
+ &repoFlag,
+ &cli.StringFlag{
+ Name: "batch-id",
+ Usage: "batch id; a four-digit left-zero-padded sequential number (e.g. 0041)",
+ Required: true,
+ Destination: &extractManyFlags.batchId,
+ },
+ &cli.StringFlag{
+ Name: "in",
+ Usage: "path to input file (csv)",
+ Destination: &extractManyFlags.in,
+ },
+ &cli.StringFlag{
+ Name: "outdir",
+ Usage: "output directory",
+ Destination: &extractManyFlags.outdir,
+ },
+ },
+}
+
+func runExtractMany(c *cli.Context) error {
+ // LOTUS_DISABLE_VM_BUF disables what's called "VM state tree buffering",
+ // which stashes write operations in a BufferedBlockstore
+ // (https://github.com/filecoin-project/lotus/blob/b7a4dbb07fd8332b4492313a617e3458f8003b2a/lib/bufbstore/buf_bstore.go#L21)
+ // such that they're not written until the VM is actually flushed.
+ //
+ // For some reason, the standard behaviour was not working for me (raulk),
+ // and disabling it (such that the state transformations are written immediately
+ // to the blockstore) worked.
+ _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea")
+
+ ctx := context.Background()
+
+ // Make the API client.
+ fapi, closer, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ var (
+ in = extractManyFlags.in
+ outdir = extractManyFlags.outdir
+ )
+
+ if in == "" {
+ return fmt.Errorf("input file not provided")
+ }
+
+ if outdir == "" {
+ return fmt.Errorf("output dir not provided")
+ }
+
+ // Open the CSV file for reading.
+ f, err := os.Open(in)
+ if err != nil {
+ return fmt.Errorf("could not open file %s: %w", in, err)
+ }
+
+ // Ensure the output directory exists.
+ if err := os.MkdirAll(outdir, 0755); err != nil {
+ return fmt.Errorf("could not create output dir %s: %w", outdir, err)
+ }
+
+ // Create a CSV reader and validate the header row.
+ reader := csv.NewReader(f)
+ if header, err := reader.Read(); err != nil {
+ return fmt.Errorf("failed to read header from csv: %w", err)
+ } else if l := len(header); l < 7 {
+ return fmt.Errorf("insufficient number of fields: %d", l)
+ } else if f := header[0]; f != "message_cid" {
+ return fmt.Errorf("csv sanity check failed: expected first field in header to be 'message_cid'; was: %s", f)
+ } else {
+ log.Println(color.GreenString("csv sanity check succeeded; header contains fields: %v", header))
+ }
+
+ codeCidBuilder := cid.V1Builder{Codec: cid.Raw, MhType: multihash.IDENTITY}
+
+ var (
+ generated []string
+ merr = new(multierror.Error)
+ retry []extractOpts // to retry with 'canonical' precursor selection mode
+ )
+
+ // Read each row and extract the requested message.
+ for {
+ row, err := reader.Read()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return fmt.Errorf("failed to read row: %w", err)
+ }
+ var (
+ mcid = row[0]
+ actorcode = row[1]
+ methodnumstr = row[2]
+ exitcodestr = row[3]
+ _ = row[4]
+ block = row[5]
+ seq = row[6]
+
+ exit int
+ methodnum int
+ methodname string
+ )
+
+ // Parse the exit code.
+ if exit, err = strconv.Atoi(exitcodestr); err != nil {
+ return fmt.Errorf("invalid exitcode number: %d", exit)
+ }
+ // Parse the method number.
+ if methodnum, err = strconv.Atoi(methodnumstr); err != nil {
+ return fmt.Errorf("invalid method number: %s", methodnumstr)
+ }
+
+ codeCid, err := codeCidBuilder.Sum([]byte(actorcode))
+ if err != nil {
+ return fmt.Errorf("failed to compute actor code CID")
+ }
+
+ // Lookup the method in actor method table.
+ if m, ok := stmgr.MethodsMap[codeCid]; !ok {
+ return fmt.Errorf("unrecognized actor: %s", actorcode)
+ } else if methodnum >= len(m) {
+ return fmt.Errorf("unrecognized method number for actor %s: %d", actorcode, methodnum)
+ } else {
+ methodname = m[abi.MethodNum(methodnum)].Name
+ }
+
+ // exitcode string representations are of kind ErrType(0); strip out
+ // the number portion.
+ exitcodename := strings.Split(exitcode.ExitCode(exit).String(), "(")[0]
+ // replace the slashes in the actor code name with underscores.
+ actorcodename := strings.ReplaceAll(actorcode, "/", "_")
+
+ // Compute the ID of the vector.
+ id := fmt.Sprintf("ext-%s-%s-%s-%s-%s", extractManyFlags.batchId, actorcodename, methodname, exitcodename, seq)
+ // Vector filename, using a base of outdir.
+ file := filepath.Join(outdir, actorcodename, methodname, exitcodename, id) + ".json"
+
+ log.Println(color.YellowString("processing message cid with 'sender' precursor mode: %s", id))
+
+ opts := extractOpts{
+ id: id,
+ block: block,
+ class: "message",
+ cid: mcid,
+ file: file,
+ retain: "accessed-cids",
+ precursor: PrecursorSelectSender,
+ }
+
+ if err := doExtract(ctx, fapi, opts); err != nil {
+ log.Println(color.RedString("failed to extract vector for message %s: %s; queuing for 'canonical' precursor selection", mcid, err))
+ retry = append(retry, opts)
+ continue
+ }
+
+ log.Println(color.MagentaString("generated file: %s", file))
+
+ generated = append(generated, file)
+ }
+
+ log.Printf("extractions to try with canonical precursor selection mode: %d", len(retry))
+
+ for _, r := range retry {
+ log.Printf("retrying %s: %s", r.cid, r.id)
+
+ r.precursor = PrecursorSelectAll
+ if err := doExtract(ctx, fapi, r); err != nil {
+ merr = multierror.Append(merr, fmt.Errorf("failed to extract vector for message %s: %w", r.cid, err))
+ continue
+ }
+
+ log.Println(color.MagentaString("generated file: %s", r.file))
+ generated = append(generated, r.file)
+ }
+
+ if len(generated) == 0 {
+ log.Println("no files generated")
+ } else {
+ log.Println("files generated:")
+ for _, g := range generated {
+ log.Println(g)
+ }
+ }
+
+ if merr.ErrorOrNil() != nil {
+ log.Println(color.YellowString("done processing with errors: %v", merr))
+ } else {
+ log.Println(color.GreenString("done processing with no errors"))
+ }
+
+ return merr.ErrorOrNil()
+}
diff --git a/cmd/tvx/main.go b/cmd/tvx/main.go
new file mode 100644
index 000000000..6c887d163
--- /dev/null
+++ b/cmd/tvx/main.go
@@ -0,0 +1,71 @@
+package main
+
+import (
+ "log"
+ "os"
+ "sort"
+
+ "github.com/urfave/cli/v2"
+)
+
+// DefaultLotusRepoPath is where the fallback path where to look for a Lotus
+// client repo. It is expanded with mitchellh/go-homedir, so it'll work with all
+// OSes despite the Unix twiddle notation.
+const DefaultLotusRepoPath = "~/.lotus"
+
+var repoFlag = cli.StringFlag{
+ Name: "repo",
+ EnvVars: []string{"LOTUS_PATH"},
+ Value: DefaultLotusRepoPath,
+ TakesFile: true,
+}
+
+func main() {
+ app := &cli.App{
+ Name: "tvx",
+ Description: `tvx is a tool for extracting and executing test vectors. It has three subcommands.
+
+ tvx extract extracts a test vector from a live network. It requires access to
+ a Filecoin client that exposes the standard JSON-RPC API endpoint. Only
+ message class test vectors are supported at this time.
+
+ tvx exec executes test vectors against Lotus. Either you can supply one in a
+ file, or many as an ndjson stdin stream.
+
+ tvx extract-many performs a batch extraction of many messages, supplied in a
+ CSV file. Refer to the help of that subcommand for more info.
+
+ SETTING THE JSON-RPC API ENDPOINT
+
+ You can set the JSON-RPC API endpoint through one of the following methods.
+
+ 1. Directly set the API endpoint on the FULLNODE_API_INFO env variable.
+ The format is [token]:multiaddr, where token is optional for commands not
+ accessing privileged operations.
+
+ 2. If you're running tvx against a local Lotus client, you can set the REPO
+ env variable to have the API endpoint and token extracted from the repo.
+ Alternatively, you can pass the --repo CLI flag.
+
+ 3. Rely on the default fallback, which inspects ~/.lotus and extracts the
+ API endpoint string if the location is a Lotus repo.
+
+ tvx will apply these methods in the same order of precedence they're listed.
+`,
+ Usage: "tvx is a tool for extracting and executing test vectors",
+ Commands: []*cli.Command{
+ extractCmd,
+ execCmd,
+ extractManyCmd,
+ },
+ }
+
+ sort.Sort(cli.CommandsByName(app.Commands))
+ for _, c := range app.Commands {
+ sort.Sort(cli.FlagsByName(c.Flags))
+ }
+
+ if err := app.Run(os.Args); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/cmd/tvx/state.go b/cmd/tvx/state.go
new file mode 100644
index 000000000..bff5cbd6e
--- /dev/null
+++ b/cmd/tvx/state.go
@@ -0,0 +1,293 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ format "github.com/ipfs/go-ipld-format"
+ "github.com/ipld/go-car"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/api"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+// StateSurgeon is an object used to fetch and manipulate state.
+type StateSurgeon struct {
+ ctx context.Context
+ api api.FullNode
+ stores *Stores
+}
+
+// NewSurgeon returns a state surgeon, an object used to fetch and manipulate
+// state.
+func NewSurgeon(ctx context.Context, api api.FullNode, stores *Stores) *StateSurgeon {
+ return &StateSurgeon{
+ ctx: ctx,
+ api: api,
+ stores: stores,
+ }
+}
+
+// GetMaskedStateTree trims the state tree at the supplied tipset to contain
+// only the state of the actors in the retain set. It also "dives" into some
+// singleton system actors, like the init actor, to trim the state so as to
+// compute a minimal state tree. In the future, thid method will dive into
+// other system actors like the power actor and the market actor.
+func (sg *StateSurgeon) GetMaskedStateTree(previousRoot cid.Cid, retain []address.Address) (cid.Cid, error) {
+ // TODO: this will need to be parameterized on network version.
+ st, err := state.LoadStateTree(sg.stores.CBORStore, previousRoot)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ initActor, initState, err := sg.loadInitActor(st)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ err = sg.retainInitEntries(initState, retain)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ err = sg.saveInitActor(initActor, initState, st)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ // resolve all addresses to ID addresses.
+ resolved, err := sg.resolveAddresses(retain, initState)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ st, err = sg.transplantActors(st, resolved)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ root, err := st.Flush(sg.ctx)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ return root, nil
+}
+
+// GetAccessedActors identifies the actors that were accessed during the
+// execution of a message.
+func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a api.FullNode, mid cid.Cid) ([]address.Address, error) {
+ log.Printf("calculating accessed actors during execution of message: %s", mid)
+ msgInfo, err := a.StateSearchMsg(ctx, mid)
+ if err != nil {
+ return nil, err
+ }
+ if msgInfo == nil {
+ return nil, fmt.Errorf("message info is nil")
+ }
+
+ msgObj, err := a.ChainGetMessage(ctx, mid)
+ if err != nil {
+ return nil, err
+ }
+
+ ts, err := a.ChainGetTipSet(ctx, msgInfo.TipSet)
+ if err != nil {
+ return nil, err
+ }
+
+ trace, err := a.StateCall(ctx, msgObj, ts.Parents())
+ if err != nil {
+ return nil, fmt.Errorf("could not replay msg: %w", err)
+ }
+
+ accessed := make(map[address.Address]struct{})
+
+ var recur func(trace *types.ExecutionTrace)
+ recur = func(trace *types.ExecutionTrace) {
+ accessed[trace.Msg.To] = struct{}{}
+ accessed[trace.Msg.From] = struct{}{}
+ for i := range trace.Subcalls {
+ recur(&trace.Subcalls[i])
+ }
+ }
+ recur(&trace.ExecutionTrace)
+
+ ret := make([]address.Address, 0, len(accessed))
+ for k := range accessed {
+ ret = append(ret, k)
+ }
+
+ return ret, nil
+}
+
+// WriteCAR recursively writes the tree referenced by the root as a CAR into the
+// supplied io.Writer.
+func (sg *StateSurgeon) WriteCAR(w io.Writer, roots ...cid.Cid) error {
+ carWalkFn := func(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+ }
+ return car.WriteCarWithWalker(sg.ctx, sg.stores.DAGService, roots, w, carWalkFn)
+}
+
+// WriteCARIncluding writes a CAR including only the CIDs that are listed in
+// the include set. This leads to an intentially sparse tree with dangling links.
+func (sg *StateSurgeon) WriteCARIncluding(w io.Writer, include map[cid.Cid]struct{}, roots ...cid.Cid) error {
+ carWalkFn := func(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if _, ok := include[link.Cid]; !ok {
+ continue
+ }
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+ }
+ return car.WriteCarWithWalker(sg.ctx, sg.stores.DAGService, roots, w, carWalkFn)
+}
+
+// transplantActors plucks the state from the supplied actors at the given
+// tipset, and places it into the supplied state map.
+func (sg *StateSurgeon) transplantActors(src *state.StateTree, pluck []address.Address) (*state.StateTree, error) {
+ log.Printf("transplanting actor states: %v", pluck)
+
+ dst, err := state.NewStateTree(sg.stores.CBORStore, src.Version())
+ if err != nil {
+ return nil, err
+ }
+
+ for _, a := range pluck {
+ actor, err := src.GetActor(a)
+ if err != nil {
+ return nil, fmt.Errorf("get actor %s failed: %w", a, err)
+ }
+
+ err = dst.SetActor(a, actor)
+ if err != nil {
+ return nil, err
+ }
+
+ // recursive copy of the actor state.
+ err = vm.Copy(context.TODO(), sg.stores.Blockstore, sg.stores.Blockstore, actor.Head)
+ if err != nil {
+ return nil, err
+ }
+
+ actorState, err := sg.api.ChainReadObj(sg.ctx, actor.Head)
+ if err != nil {
+ return nil, err
+ }
+
+ cid, err := sg.stores.CBORStore.Put(sg.ctx, &cbg.Deferred{Raw: actorState})
+ if err != nil {
+ return nil, err
+ }
+
+ if cid != actor.Head {
+ panic("mismatched cids")
+ }
+ }
+
+ return dst, nil
+}
+
+// saveInitActor saves the state of the init actor to the provided state map.
+func (sg *StateSurgeon) saveInitActor(initActor *types.Actor, initState init_.State, st *state.StateTree) error {
+ log.Printf("saving init actor into state tree")
+
+ // Store the state of the init actor.
+ cid, err := sg.stores.CBORStore.Put(sg.ctx, initState)
+ if err != nil {
+ return err
+ }
+ actor := *initActor
+ actor.Head = cid
+
+ err = st.SetActor(init_.Address, &actor)
+ if err != nil {
+ return err
+ }
+
+ cid, _ = st.Flush(sg.ctx)
+ log.Printf("saved init actor into state tree; new root: %s", cid)
+ return nil
+}
+
+// retainInitEntries takes an old init actor state, and retains only the
+// entries in the retain set, returning a new init actor state.
+func (sg *StateSurgeon) retainInitEntries(state init_.State, retain []address.Address) error {
+ log.Printf("retaining init actor entries for addresses: %v", retain)
+
+ m := make(map[address.Address]struct{}, len(retain))
+ for _, a := range retain {
+ m[a] = struct{}{}
+ }
+
+ var remove []address.Address
+ _ = state.ForEachActor(func(id abi.ActorID, address address.Address) error {
+ if _, ok := m[address]; !ok {
+ remove = append(remove, address)
+ }
+ return nil
+ })
+
+ err := state.Remove(remove...)
+ log.Printf("new init actor state: %+v", state)
+ return err
+}
+
+// resolveAddresses resolved the requested addresses from the provided
+// InitActor state, returning a slice of length len(orig), where each index
+// contains the resolved address.
+func (sg *StateSurgeon) resolveAddresses(orig []address.Address, ist init_.State) (ret []address.Address, err error) {
+ log.Printf("resolving addresses: %v", orig)
+
+ ret = make([]address.Address, len(orig))
+ for i, addr := range orig {
+ resolved, found, err := ist.ResolveAddress(addr)
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ return nil, fmt.Errorf("address not found: %s", addr)
+ }
+ ret[i] = resolved
+ }
+
+ log.Printf("resolved addresses: %v", ret)
+ return ret, nil
+}
+
+// loadInitActor loads the init actor state from a given tipset.
+func (sg *StateSurgeon) loadInitActor(st *state.StateTree) (*types.Actor, init_.State, error) {
+ actor, err := st.GetActor(init_.Address)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ initState, err := init_.Load(sg.stores.ADTStore, actor)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ log.Printf("loaded init actor state: %+v", initState)
+
+ return actor, initState, nil
+}
diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go
new file mode 100644
index 000000000..93e0d215f
--- /dev/null
+++ b/cmd/tvx/stores.go
@@ -0,0 +1,142 @@
+package main
+
+import (
+ "context"
+ "log"
+ "sync"
+
+ "github.com/fatih/color"
+ dssync "github.com/ipfs/go-datastore/sync"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/lib/blockstore"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-blockservice"
+ "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ exchange "github.com/ipfs/go-ipfs-exchange-interface"
+ offline "github.com/ipfs/go-ipfs-exchange-offline"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ format "github.com/ipfs/go-ipld-format"
+ "github.com/ipfs/go-merkledag"
+)
+
+// Stores is a collection of the different stores and services that are needed
+// to deal with the data layer of Filecoin, conveniently interlinked with one
+// another.
+type Stores struct {
+ CBORStore cbor.IpldStore
+ ADTStore adt.Store
+ Datastore ds.Batching
+ Blockstore blockstore.Blockstore
+ BlockService blockservice.BlockService
+ Exchange exchange.Interface
+ DAGService format.DAGService
+}
+
+// NewProxyingStores is a set of Stores backed by a proxying Blockstore that
+// proxies Get requests for unknown CIDs to a Filecoin node, via the
+// ChainReadObj RPC.
+func NewProxyingStores(ctx context.Context, api api.FullNode) *Stores {
+ ds := dssync.MutexWrap(ds.NewMapDatastore())
+ bs := &proxyingBlockstore{
+ ctx: ctx,
+ api: api,
+ Blockstore: blockstore.NewBlockstore(ds),
+ }
+ return NewStores(ctx, ds, bs)
+}
+
+// NewStores creates a non-proxying set of Stores.
+func NewStores(ctx context.Context, ds ds.Batching, bs blockstore.Blockstore) *Stores {
+ var (
+ cborstore = cbor.NewCborStore(bs)
+ offl = offline.Exchange(bs)
+ blkserv = blockservice.New(bs, offl)
+ dserv = merkledag.NewDAGService(blkserv)
+ )
+
+ return &Stores{
+ CBORStore: cborstore,
+ ADTStore: adt.WrapStore(ctx, cborstore),
+ Datastore: ds,
+ Blockstore: bs,
+ Exchange: offl,
+ BlockService: blkserv,
+ DAGService: dserv,
+ }
+}
+
+// TracingBlockstore is a Blockstore trait that records CIDs that were accessed
+// through Get.
+type TracingBlockstore interface {
+ // StartTracing starts tracing CIDs accessed through the this Blockstore.
+ StartTracing()
+
+ // FinishTracing finishes tracing accessed CIDs, and returns a map of the
+ // CIDs that were traced.
+ FinishTracing() map[cid.Cid]struct{}
+}
+
+// proxyingBlockstore is a Blockstore wrapper that fetches unknown CIDs from
+// a Filecoin node via JSON-RPC.
+type proxyingBlockstore struct {
+ ctx context.Context
+ api api.FullNode
+
+ lk sync.RWMutex
+ tracing bool
+ traced map[cid.Cid]struct{}
+
+ blockstore.Blockstore
+}
+
+var _ TracingBlockstore = (*proxyingBlockstore)(nil)
+
+func (pb *proxyingBlockstore) StartTracing() {
+ pb.lk.Lock()
+ pb.tracing = true
+ pb.traced = map[cid.Cid]struct{}{}
+ pb.lk.Unlock()
+}
+
+func (pb *proxyingBlockstore) FinishTracing() map[cid.Cid]struct{} {
+ pb.lk.Lock()
+ ret := pb.traced
+ pb.tracing = false
+ pb.traced = map[cid.Cid]struct{}{}
+ pb.lk.Unlock()
+ return ret
+}
+
+func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) {
+ pb.lk.RLock()
+ if pb.tracing {
+ pb.traced[cid] = struct{}{}
+ }
+ pb.lk.RUnlock()
+
+ if block, err := pb.Blockstore.Get(cid); err == nil {
+ return block, err
+ }
+
+ log.Println(color.CyanString("fetching cid via rpc: %v", cid))
+ item, err := pb.api.ChainReadObj(pb.ctx, cid)
+ if err != nil {
+ return nil, err
+ }
+ block, err := blocks.NewBlockWithCid(item, cid)
+ if err != nil {
+ return nil, err
+ }
+
+ err = pb.Blockstore.Put(block)
+ if err != nil {
+ return nil, err
+ }
+
+ return block, nil
+}
diff --git a/conformance/chaos/actor.go b/conformance/chaos/actor.go
new file mode 100644
index 000000000..d5e0b4352
--- /dev/null
+++ b/conformance/chaos/actor.go
@@ -0,0 +1,292 @@
+package chaos
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/filecoin-project/go-state-types/rt"
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+ "github.com/filecoin-project/specs-actors/actors/runtime"
+ "github.com/ipfs/go-cid"
+)
+
+//go:generate go run ./gen
+
+// Actor is a chaos actor. It implements a variety of illegal behaviours that
+// trigger violations of VM invariants. These behaviours are not found in
+// production code, but are important to test that the VM constraints are
+// properly enforced.
+//
+// The chaos actor is being incubated and its behaviour and ABI be standardised
+// shortly. Its CID is ChaosActorCodeCID, and its singleton address is 98 (Address).
+// It cannot be instantiated via the init actor, and its constructor panics.
+//
+// Test vectors relying on the chaos actor being deployed will carry selector
+// "chaos_actor:true".
+type Actor struct{}
+
+// CallerValidationBranch is an enum used to select a branch in the
+// CallerValidation method.
+type CallerValidationBranch int64
+
+const (
+ // CallerValidationBranchNone causes no caller validation to take place.
+ CallerValidationBranchNone CallerValidationBranch = iota
+ // CallerValidationBranchTwice causes Runtime.ValidateImmediateCallerAcceptAny to be called twice.
+ CallerValidationBranchTwice
+ // CallerValidationBranchIsAddress causes caller validation against CallerValidationArgs.Addrs.
+ CallerValidationBranchIsAddress
+ // CallerValidationBranchIsType causes caller validation against CallerValidationArgs.Types.
+ CallerValidationBranchIsType
+)
+
+// MutateStateBranch is an enum used to select the type of state mutation to attempt.
+type MutateStateBranch int64
+
+const (
+ // MutateInTransaction legally mutates state within a transaction.
+ MutateInTransaction MutateStateBranch = iota
+ // MutateReadonly ILLEGALLY mutates readonly state.
+ MutateReadonly
+ // MutateAfterTransaction ILLEGALLY mutates state after a transaction.
+ MutateAfterTransaction
+)
+
+const (
+ _ = 0 // skip zero iota value; first usage of iota gets 1.
+ MethodCallerValidation = builtin.MethodConstructor + iota
+ MethodCreateActor
+ MethodResolveAddress
+ // MethodDeleteActor is the identifier for the method that deletes this actor.
+ MethodDeleteActor
+ // MethodSend is the identifier for the method that sends a message to another actor.
+ MethodSend
+ // MethodMutateState is the identifier for the method that attempts to mutate
+ // a state value in the actor.
+ MethodMutateState
+ // MethodAbortWith is the identifier for the method that panics optionally with
+ // a passed exit code.
+ MethodAbortWith
+ // MethodInspectRuntime is the identifier for the method that returns the
+ // current runtime values.
+ MethodInspectRuntime
+)
+
+// Exports defines the methods this actor exposes publicly.
+func (a Actor) Exports() []interface{} {
+ return []interface{}{
+ builtin.MethodConstructor: a.Constructor,
+ MethodCallerValidation: a.CallerValidation,
+ MethodCreateActor: a.CreateActor,
+ MethodResolveAddress: a.ResolveAddress,
+ MethodDeleteActor: a.DeleteActor,
+ MethodSend: a.Send,
+ MethodMutateState: a.MutateState,
+ MethodAbortWith: a.AbortWith,
+ MethodInspectRuntime: a.InspectRuntime,
+ }
+}
+
+func (a Actor) Code() cid.Cid { return ChaosActorCodeCID }
+func (a Actor) State() cbor.Er { return new(State) }
+func (a Actor) IsSingleton() bool { return true }
+
+var _ rt.VMActor = Actor{}
+
+// SendArgs are the arguments for the Send method.
+type SendArgs struct {
+ To address.Address
+ Value abi.TokenAmount
+ Method abi.MethodNum
+ Params []byte
+}
+
+// SendReturn is the return values for the Send method.
+type SendReturn struct {
+ Return runtime.CBORBytes
+ Code exitcode.ExitCode
+}
+
+// Send requests for this actor to send a message to an actor with the
+// passed parameters.
+func (a Actor) Send(rt runtime.Runtime, args *SendArgs) *SendReturn {
+ rt.ValidateImmediateCallerAcceptAny()
+ var out runtime.CBORBytes
+ code := rt.Send(
+ args.To,
+ args.Method,
+ runtime.CBORBytes(args.Params),
+ args.Value,
+ &out,
+ )
+ return &SendReturn{
+ Return: out,
+ Code: code,
+ }
+}
+
+// Constructor will panic because the Chaos actor is a singleton.
+func (a Actor) Constructor(_ runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue {
+ panic("constructor should not be called; the Chaos actor is a singleton actor")
+}
+
+// CallerValidationArgs are the arguments to Actor.CallerValidation.
+type CallerValidationArgs struct {
+ Branch CallerValidationBranch
+ Addrs []address.Address
+ Types []cid.Cid
+}
+
+// CallerValidation violates VM call validation constraints.
+//
+// CallerValidationBranchNone performs no validation.
+// CallerValidationBranchTwice validates twice.
+// CallerValidationBranchIsAddress validates caller against CallerValidationArgs.Addrs.
+// CallerValidationBranchIsType validates caller against CallerValidationArgs.Types.
+func (a Actor) CallerValidation(rt runtime.Runtime, args *CallerValidationArgs) *abi.EmptyValue {
+ switch args.Branch {
+ case CallerValidationBranchNone:
+ case CallerValidationBranchTwice:
+ rt.ValidateImmediateCallerAcceptAny()
+ rt.ValidateImmediateCallerAcceptAny()
+ case CallerValidationBranchIsAddress:
+ rt.ValidateImmediateCallerIs(args.Addrs...)
+ case CallerValidationBranchIsType:
+ rt.ValidateImmediateCallerType(args.Types...)
+ default:
+ panic("invalid branch passed to CallerValidation")
+ }
+
+ return nil
+}
+
+// CreateActorArgs are the arguments to CreateActor.
+type CreateActorArgs struct {
+ // UndefActorCID instructs us to use cid.Undef; we can't pass cid.Undef
+ // in ActorCID because it doesn't serialize.
+ UndefActorCID bool
+ ActorCID cid.Cid
+
+ // UndefAddress is the same as UndefActorCID but for Address.
+ UndefAddress bool
+ Address address.Address
+}
+
+// CreateActor creates an actor with the supplied CID and Address.
+func (a Actor) CreateActor(rt runtime.Runtime, args *CreateActorArgs) *abi.EmptyValue {
+ rt.ValidateImmediateCallerAcceptAny()
+
+ var (
+ acid = args.ActorCID
+ addr = args.Address
+ )
+
+ if args.UndefActorCID {
+ acid = cid.Undef
+ }
+ if args.UndefAddress {
+ addr = address.Undef
+ }
+
+ rt.CreateActor(acid, addr)
+ return nil
+}
+
+// ResolveAddressResponse holds the response of a call to runtime.ResolveAddress
+type ResolveAddressResponse struct {
+ Address address.Address
+ Success bool
+}
+
+func (a Actor) ResolveAddress(rt runtime.Runtime, args *address.Address) *ResolveAddressResponse {
+ rt.ValidateImmediateCallerAcceptAny()
+
+ resolvedAddr, ok := rt.ResolveAddress(*args)
+ if !ok {
+ invalidAddr, _ := address.NewIDAddress(0)
+ resolvedAddr = invalidAddr
+ }
+ return &ResolveAddressResponse{resolvedAddr, ok}
+}
+
+// DeleteActor deletes the executing actor from the state tree, transferring any
+// balance to beneficiary.
+func (a Actor) DeleteActor(rt runtime.Runtime, beneficiary *address.Address) *abi.EmptyValue {
+ rt.ValidateImmediateCallerAcceptAny()
+ rt.DeleteActor(*beneficiary)
+ return nil
+}
+
+// MutateStateArgs specify the value to set on the state and the way in which
+// it should be attempted to be set.
+type MutateStateArgs struct {
+ Value string
+ Branch MutateStateBranch
+}
+
+// MutateState attempts to mutate a state value in the actor.
+func (a Actor) MutateState(rt runtime.Runtime, args *MutateStateArgs) *abi.EmptyValue {
+ rt.ValidateImmediateCallerAcceptAny()
+ var st State
+ switch args.Branch {
+ case MutateInTransaction:
+ rt.StateTransaction(&st, func() {
+ st.Value = args.Value
+ })
+ case MutateReadonly:
+ rt.StateReadonly(&st)
+ st.Value = args.Value
+ case MutateAfterTransaction:
+ rt.StateTransaction(&st, func() {
+ st.Value = args.Value + "-in"
+ })
+ st.Value = args.Value
+ default:
+ panic("unknown mutation type")
+ }
+ return nil
+}
+
+// AbortWithArgs are the arguments to the Actor.AbortWith method, specifying the
+// exit code to (optionally) abort with and the message.
+type AbortWithArgs struct {
+ Code exitcode.ExitCode
+ Message string
+ Uncontrolled bool
+}
+
+// AbortWith simply causes a panic with the passed exit code.
+func (a Actor) AbortWith(rt runtime.Runtime, args *AbortWithArgs) *abi.EmptyValue {
+ if args.Uncontrolled { // uncontrolled abort: directly panic
+ panic(args.Message)
+ } else {
+ rt.Abortf(args.Code, args.Message)
+ }
+ return nil
+}
+
+// InspectRuntimeReturn is the return value for the Actor.InspectRuntime method.
+type InspectRuntimeReturn struct {
+ Caller address.Address
+ Receiver address.Address
+ ValueReceived abi.TokenAmount
+ CurrEpoch abi.ChainEpoch
+ CurrentBalance abi.TokenAmount
+ State State
+}
+
+// InspectRuntime returns a copy of the serializable values available in the Runtime.
+func (a Actor) InspectRuntime(rt runtime.Runtime, _ *abi.EmptyValue) *InspectRuntimeReturn {
+ rt.ValidateImmediateCallerAcceptAny()
+ var st State
+ rt.StateReadonly(&st)
+ return &InspectRuntimeReturn{
+ Caller: rt.Caller(),
+ Receiver: rt.Receiver(),
+ ValueReceived: rt.ValueReceived(),
+ CurrEpoch: rt.CurrEpoch(),
+ CurrentBalance: rt.CurrentBalance(),
+ State: st,
+ }
+}
diff --git a/conformance/chaos/actor_test.go b/conformance/chaos/actor_test.go
new file mode 100644
index 000000000..2061efb82
--- /dev/null
+++ b/conformance/chaos/actor_test.go
@@ -0,0 +1,275 @@
+package chaos
+
+import (
+ "context"
+ "testing"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+ "github.com/filecoin-project/specs-actors/support/mock"
+ atesting "github.com/filecoin-project/specs-actors/support/testing"
+ "github.com/ipfs/go-cid"
+)
+
+func TestSingleton(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ msg := "constructor should not be called; the Chaos actor is a singleton actor"
+ rt.ExpectAssertionFailure(msg, func() {
+ rt.Call(a.Constructor, abi.Empty)
+ })
+ rt.Verify()
+}
+
+func TestCallerValidationNone(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ rt.Call(a.CallerValidation, &CallerValidationArgs{Branch: CallerValidationBranchNone})
+ rt.Verify()
+}
+
+func TestCallerValidationIs(t *testing.T) {
+ caller := atesting.NewIDAddr(t, 100)
+ receiver := atesting.NewIDAddr(t, 101)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ rt.SetCaller(caller, builtin.AccountActorCodeID)
+ var a Actor
+
+ caddrs := []address.Address{atesting.NewIDAddr(t, 101)}
+
+ rt.ExpectValidateCallerAddr(caddrs...)
+ // FIXME: https://github.com/filecoin-project/specs-actors/pull/1155
+ rt.ExpectAbort(exitcode.ErrForbidden, func() {
+ rt.Call(a.CallerValidation, &CallerValidationArgs{
+ Branch: CallerValidationBranchIsAddress,
+ Addrs: caddrs,
+ })
+ })
+ rt.Verify()
+
+ rt.ExpectValidateCallerAddr(caller)
+ rt.Call(a.CallerValidation, &CallerValidationArgs{
+ Branch: CallerValidationBranchIsAddress,
+ Addrs: []address.Address{caller},
+ })
+ rt.Verify()
+}
+
+func TestCallerValidationType(t *testing.T) {
+ caller := atesting.NewIDAddr(t, 100)
+ receiver := atesting.NewIDAddr(t, 101)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ rt.SetCaller(caller, builtin.AccountActorCodeID)
+ var a Actor
+
+ rt.ExpectValidateCallerType(builtin.CronActorCodeID)
+ // FIXME: https://github.com/filecoin-project/specs-actors/pull/1155
+ rt.ExpectAbort(exitcode.ErrForbidden, func() {
+ rt.Call(a.CallerValidation, &CallerValidationArgs{
+ Branch: CallerValidationBranchIsType,
+ Types: []cid.Cid{builtin.CronActorCodeID},
+ })
+ })
+ rt.Verify()
+
+ rt.ExpectValidateCallerType(builtin.AccountActorCodeID)
+ rt.Call(a.CallerValidation, &CallerValidationArgs{
+ Branch: CallerValidationBranchIsType,
+ Types: []cid.Cid{builtin.AccountActorCodeID},
+ })
+ rt.Verify()
+}
+
+func TestCallerValidationInvalidBranch(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ rt.ExpectAssertionFailure("invalid branch passed to CallerValidation", func() {
+ rt.Call(a.CallerValidation, &CallerValidationArgs{Branch: -1})
+ })
+ rt.Verify()
+}
+
+func TestDeleteActor(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ beneficiary := atesting.NewIDAddr(t, 101)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ rt.ExpectValidateCallerAny()
+ rt.ExpectDeleteActor(beneficiary)
+ rt.Call(a.DeleteActor, &beneficiary)
+ rt.Verify()
+}
+
+func TestMutateStateInTransaction(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ rt.ExpectValidateCallerAny()
+ rt.StateCreate(&State{})
+
+ val := "__mutstat test"
+ rt.Call(a.MutateState, &MutateStateArgs{
+ Value: val,
+ Branch: MutateInTransaction,
+ })
+
+ var st State
+ rt.GetState(&st)
+
+ if st.Value != val {
+ t.Fatal("state was not updated")
+ }
+
+ rt.Verify()
+}
+
+func TestMutateStateAfterTransaction(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ rt.ExpectValidateCallerAny()
+ rt.StateCreate(&State{})
+
+ val := "__mutstat test"
+ rt.Call(a.MutateState, &MutateStateArgs{
+ Value: val,
+ Branch: MutateAfterTransaction,
+ })
+
+ var st State
+ rt.GetState(&st)
+
+ // state should be updated successfully _in_ the transaction but not outside
+ if st.Value != val+"-in" {
+ t.Fatal("state was not updated")
+ }
+
+ rt.Verify()
+}
+
+func TestMutateStateReadonly(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ rt.ExpectValidateCallerAny()
+ rt.StateCreate(&State{})
+
+ val := "__mutstat test"
+ rt.Call(a.MutateState, &MutateStateArgs{
+ Value: val,
+ Branch: MutateReadonly,
+ })
+
+ var st State
+ rt.GetState(&st)
+
+ if st.Value != "" {
+ t.Fatal("state was not expected to be updated")
+ }
+
+ rt.Verify()
+}
+
+func TestMutateStateInvalidBranch(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ rt.ExpectValidateCallerAny()
+ rt.ExpectAssertionFailure("unknown mutation type", func() {
+ rt.Call(a.MutateState, &MutateStateArgs{Branch: -1})
+ })
+ rt.Verify()
+}
+
+func TestAbortWith(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ msg := "__test forbidden"
+ rt.ExpectAbortContainsMessage(exitcode.ErrForbidden, msg, func() {
+ rt.Call(a.AbortWith, &AbortWithArgs{
+ Code: exitcode.ErrForbidden,
+ Message: msg,
+ Uncontrolled: false,
+ })
+ })
+ rt.Verify()
+}
+
+func TestAbortWithUncontrolled(t *testing.T) {
+ receiver := atesting.NewIDAddr(t, 100)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ var a Actor
+
+ msg := "__test uncontrolled panic"
+ rt.ExpectAssertionFailure(msg, func() {
+ rt.Call(a.AbortWith, &AbortWithArgs{
+ Message: msg,
+ Uncontrolled: true,
+ })
+ })
+ rt.Verify()
+}
+
+func TestInspectRuntime(t *testing.T) {
+ caller := atesting.NewIDAddr(t, 100)
+ receiver := atesting.NewIDAddr(t, 101)
+ builder := mock.NewBuilder(context.Background(), receiver)
+
+ rt := builder.Build(t)
+ rt.SetCaller(caller, builtin.AccountActorCodeID)
+ rt.StateCreate(&State{})
+ var a Actor
+
+ rt.ExpectValidateCallerAny()
+ ret := rt.Call(a.InspectRuntime, abi.Empty)
+ rtr, ok := ret.(*InspectRuntimeReturn)
+ if !ok {
+ t.Fatal("invalid return value")
+ }
+ if rtr.Caller != caller {
+ t.Fatal("unexpected runtime caller")
+ }
+ if rtr.Receiver != receiver {
+ t.Fatal("unexpected runtime receiver")
+ }
+ rt.Verify()
+}
diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go
new file mode 100644
index 000000000..882af7026
--- /dev/null
+++ b/conformance/chaos/cbor_gen.go
@@ -0,0 +1,1033 @@
+// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
+
+package chaos
+
+import (
+ "fmt"
+ "io"
+
+ address "github.com/filecoin-project/go-address"
+ abi "github.com/filecoin-project/go-state-types/abi"
+ exitcode "github.com/filecoin-project/go-state-types/exitcode"
+ cid "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ xerrors "golang.org/x/xerrors"
+)
+
+var _ = xerrors.Errorf
+
+var lengthBufState = []byte{130}
+
+func (t *State) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufState); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Value (string) (string)
+ if len(t.Value) > cbg.MaxLength {
+ return xerrors.Errorf("Value in field t.Value was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Value))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string(t.Value)); err != nil {
+ return err
+ }
+
+ // t.Unmarshallable ([]*chaos.UnmarshallableCBOR) (slice)
+ if len(t.Unmarshallable) > cbg.MaxLength {
+ return xerrors.Errorf("Slice value in field t.Unmarshallable was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Unmarshallable))); err != nil {
+ return err
+ }
+ for _, v := range t.Unmarshallable {
+ if err := v.MarshalCBOR(w); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *State) UnmarshalCBOR(r io.Reader) error {
+ *t = State{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 2 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Value (string) (string)
+
+ {
+ sval, err := cbg.ReadStringBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ t.Value = string(sval)
+ }
+ // t.Unmarshallable ([]*chaos.UnmarshallableCBOR) (slice)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("t.Unmarshallable: array too large (%d)", extra)
+ }
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("expected cbor array")
+ }
+
+ if extra > 0 {
+ t.Unmarshallable = make([]*UnmarshallableCBOR, extra)
+ }
+
+ for i := 0; i < int(extra); i++ {
+
+ var v UnmarshallableCBOR
+ if err := v.UnmarshalCBOR(br); err != nil {
+ return err
+ }
+
+ t.Unmarshallable[i] = &v
+ }
+
+ return nil
+}
+
+var lengthBufCallerValidationArgs = []byte{131}
+
+func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufCallerValidationArgs); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Branch (chaos.CallerValidationBranch) (int64)
+ if t.Branch >= 0 {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Branch)); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil {
+ return err
+ }
+ }
+
+ // t.Addrs ([]address.Address) (slice)
+ if len(t.Addrs) > cbg.MaxLength {
+ return xerrors.Errorf("Slice value in field t.Addrs was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Addrs))); err != nil {
+ return err
+ }
+ for _, v := range t.Addrs {
+ if err := v.MarshalCBOR(w); err != nil {
+ return err
+ }
+ }
+
+ // t.Types ([]cid.Cid) (slice)
+ if len(t.Types) > cbg.MaxLength {
+ return xerrors.Errorf("Slice value in field t.Types was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Types))); err != nil {
+ return err
+ }
+ for _, v := range t.Types {
+ if err := cbg.WriteCidBuf(scratch, w, v); err != nil {
+ return xerrors.Errorf("failed writing cid field t.Types: %w", err)
+ }
+ }
+ return nil
+}
+
+func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error {
+ *t = CallerValidationArgs{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 3 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Branch (chaos.CallerValidationBranch) (int64)
+ {
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ var extraI int64
+ if err != nil {
+ return err
+ }
+ switch maj {
+ case cbg.MajUnsignedInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 positive overflow")
+ }
+ case cbg.MajNegativeInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 negative oveflow")
+ }
+ extraI = -1 - extraI
+ default:
+ return fmt.Errorf("wrong type for int64 field: %d", maj)
+ }
+
+ t.Branch = CallerValidationBranch(extraI)
+ }
+ // t.Addrs ([]address.Address) (slice)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("t.Addrs: array too large (%d)", extra)
+ }
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("expected cbor array")
+ }
+
+ if extra > 0 {
+ t.Addrs = make([]address.Address, extra)
+ }
+
+ for i := 0; i < int(extra); i++ {
+
+ var v address.Address
+ if err := v.UnmarshalCBOR(br); err != nil {
+ return err
+ }
+
+ t.Addrs[i] = v
+ }
+
+ // t.Types ([]cid.Cid) (slice)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("t.Types: array too large (%d)", extra)
+ }
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("expected cbor array")
+ }
+
+ if extra > 0 {
+ t.Types = make([]cid.Cid, extra)
+ }
+
+ for i := 0; i < int(extra); i++ {
+
+ c, err := cbg.ReadCid(br)
+ if err != nil {
+ return xerrors.Errorf("reading cid field t.Types failed: %w", err)
+ }
+ t.Types[i] = c
+ }
+
+ return nil
+}
+
+var lengthBufCreateActorArgs = []byte{132}
+
+func (t *CreateActorArgs) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufCreateActorArgs); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.UndefActorCID (bool) (bool)
+ if err := cbg.WriteBool(w, t.UndefActorCID); err != nil {
+ return err
+ }
+
+ // t.ActorCID (cid.Cid) (struct)
+
+ if err := cbg.WriteCidBuf(scratch, w, t.ActorCID); err != nil {
+ return xerrors.Errorf("failed to write cid field t.ActorCID: %w", err)
+ }
+
+ // t.UndefAddress (bool) (bool)
+ if err := cbg.WriteBool(w, t.UndefAddress); err != nil {
+ return err
+ }
+
+ // t.Address (address.Address) (struct)
+ if err := t.Address.MarshalCBOR(w); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) error {
+ *t = CreateActorArgs{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 4 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.UndefActorCID (bool) (bool)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajOther {
+ return fmt.Errorf("booleans must be major type 7")
+ }
+ switch extra {
+ case 20:
+ t.UndefActorCID = false
+ case 21:
+ t.UndefActorCID = true
+ default:
+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
+ }
+ // t.ActorCID (cid.Cid) (struct)
+
+ {
+
+ c, err := cbg.ReadCid(br)
+ if err != nil {
+ return xerrors.Errorf("failed to read cid field t.ActorCID: %w", err)
+ }
+
+ t.ActorCID = c
+
+ }
+ // t.UndefAddress (bool) (bool)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajOther {
+ return fmt.Errorf("booleans must be major type 7")
+ }
+ switch extra {
+ case 20:
+ t.UndefAddress = false
+ case 21:
+ t.UndefAddress = true
+ default:
+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
+ }
+ // t.Address (address.Address) (struct)
+
+ {
+
+ if err := t.Address.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.Address: %w", err)
+ }
+
+ }
+ return nil
+}
+
+var lengthBufResolveAddressResponse = []byte{130}
+
+func (t *ResolveAddressResponse) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufResolveAddressResponse); err != nil {
+ return err
+ }
+
+ // t.Address (address.Address) (struct)
+ if err := t.Address.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.Success (bool) (bool)
+ if err := cbg.WriteBool(w, t.Success); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *ResolveAddressResponse) UnmarshalCBOR(r io.Reader) error {
+ *t = ResolveAddressResponse{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 2 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Address (address.Address) (struct)
+
+ {
+
+ if err := t.Address.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.Address: %w", err)
+ }
+
+ }
+ // t.Success (bool) (bool)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajOther {
+ return fmt.Errorf("booleans must be major type 7")
+ }
+ switch extra {
+ case 20:
+ t.Success = false
+ case 21:
+ t.Success = true
+ default:
+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
+ }
+ return nil
+}
+
+var lengthBufSendArgs = []byte{132}
+
+func (t *SendArgs) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufSendArgs); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.To (address.Address) (struct)
+ if err := t.To.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.Value (big.Int) (struct)
+ if err := t.Value.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.Method (abi.MethodNum) (uint64)
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil {
+ return err
+ }
+
+ // t.Params ([]uint8) (slice)
+ if len(t.Params) > cbg.ByteArrayMaxLen {
+ return xerrors.Errorf("Byte array in field t.Params was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Params))); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(t.Params[:]); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *SendArgs) UnmarshalCBOR(r io.Reader) error {
+ *t = SendArgs{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 4 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.To (address.Address) (struct)
+
+ {
+
+ if err := t.To.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.To: %w", err)
+ }
+
+ }
+ // t.Value (big.Int) (struct)
+
+ {
+
+ if err := t.Value.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.Value: %w", err)
+ }
+
+ }
+ // t.Method (abi.MethodNum) (uint64)
+
+ {
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.Method = abi.MethodNum(extra)
+
+ }
+ // t.Params ([]uint8) (slice)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.ByteArrayMaxLen {
+ return fmt.Errorf("t.Params: byte array too large (%d)", extra)
+ }
+ if maj != cbg.MajByteString {
+ return fmt.Errorf("expected byte array")
+ }
+
+ if extra > 0 {
+ t.Params = make([]uint8, extra)
+ }
+
+ if _, err := io.ReadFull(br, t.Params[:]); err != nil {
+ return err
+ }
+ return nil
+}
+
+var lengthBufSendReturn = []byte{130}
+
+func (t *SendReturn) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufSendReturn); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Return (runtime.CBORBytes) (slice)
+ if len(t.Return) > cbg.ByteArrayMaxLen {
+ return xerrors.Errorf("Byte array in field t.Return was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Return))); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(t.Return[:]); err != nil {
+ return err
+ }
+
+ // t.Code (exitcode.ExitCode) (int64)
+ if t.Code >= 0 {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *SendReturn) UnmarshalCBOR(r io.Reader) error {
+ *t = SendReturn{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 2 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Return (runtime.CBORBytes) (slice)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.ByteArrayMaxLen {
+ return fmt.Errorf("t.Return: byte array too large (%d)", extra)
+ }
+ if maj != cbg.MajByteString {
+ return fmt.Errorf("expected byte array")
+ }
+
+ if extra > 0 {
+ t.Return = make([]uint8, extra)
+ }
+
+ if _, err := io.ReadFull(br, t.Return[:]); err != nil {
+ return err
+ }
+ // t.Code (exitcode.ExitCode) (int64)
+ {
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ var extraI int64
+ if err != nil {
+ return err
+ }
+ switch maj {
+ case cbg.MajUnsignedInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 positive overflow")
+ }
+ case cbg.MajNegativeInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 negative oveflow")
+ }
+ extraI = -1 - extraI
+ default:
+ return fmt.Errorf("wrong type for int64 field: %d", maj)
+ }
+
+ t.Code = exitcode.ExitCode(extraI)
+ }
+ return nil
+}
+
+var lengthBufMutateStateArgs = []byte{130}
+
+func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufMutateStateArgs); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Value (string) (string)
+ if len(t.Value) > cbg.MaxLength {
+ return xerrors.Errorf("Value in field t.Value was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Value))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string(t.Value)); err != nil {
+ return err
+ }
+
+ // t.Branch (chaos.MutateStateBranch) (int64)
+ if t.Branch >= 0 {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Branch)); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) error {
+ *t = MutateStateArgs{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 2 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Value (string) (string)
+
+ {
+ sval, err := cbg.ReadStringBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ t.Value = string(sval)
+ }
+ // t.Branch (chaos.MutateStateBranch) (int64)
+ {
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ var extraI int64
+ if err != nil {
+ return err
+ }
+ switch maj {
+ case cbg.MajUnsignedInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 positive overflow")
+ }
+ case cbg.MajNegativeInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 negative oveflow")
+ }
+ extraI = -1 - extraI
+ default:
+ return fmt.Errorf("wrong type for int64 field: %d", maj)
+ }
+
+ t.Branch = MutateStateBranch(extraI)
+ }
+ return nil
+}
+
+var lengthBufAbortWithArgs = []byte{131}
+
+func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufAbortWithArgs); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Code (exitcode.ExitCode) (int64)
+ if t.Code >= 0 {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil {
+ return err
+ }
+ }
+
+ // t.Message (string) (string)
+ if len(t.Message) > cbg.MaxLength {
+ return xerrors.Errorf("Value in field t.Message was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string(t.Message)); err != nil {
+ return err
+ }
+
+ // t.Uncontrolled (bool) (bool)
+ if err := cbg.WriteBool(w, t.Uncontrolled); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) error {
+ *t = AbortWithArgs{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 3 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Code (exitcode.ExitCode) (int64)
+ {
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ var extraI int64
+ if err != nil {
+ return err
+ }
+ switch maj {
+ case cbg.MajUnsignedInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 positive overflow")
+ }
+ case cbg.MajNegativeInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 negative oveflow")
+ }
+ extraI = -1 - extraI
+ default:
+ return fmt.Errorf("wrong type for int64 field: %d", maj)
+ }
+
+ t.Code = exitcode.ExitCode(extraI)
+ }
+ // t.Message (string) (string)
+
+ {
+ sval, err := cbg.ReadStringBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ t.Message = string(sval)
+ }
+ // t.Uncontrolled (bool) (bool)
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajOther {
+ return fmt.Errorf("booleans must be major type 7")
+ }
+ switch extra {
+ case 20:
+ t.Uncontrolled = false
+ case 21:
+ t.Uncontrolled = true
+ default:
+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
+ }
+ return nil
+}
+
+var lengthBufInspectRuntimeReturn = []byte{134}
+
+func (t *InspectRuntimeReturn) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufInspectRuntimeReturn); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Caller (address.Address) (struct)
+ if err := t.Caller.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.Receiver (address.Address) (struct)
+ if err := t.Receiver.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.ValueReceived (big.Int) (struct)
+ if err := t.ValueReceived.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.CurrEpoch (abi.ChainEpoch) (int64)
+ if t.CurrEpoch >= 0 {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrEpoch)); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CurrEpoch-1)); err != nil {
+ return err
+ }
+ }
+
+ // t.CurrentBalance (big.Int) (struct)
+ if err := t.CurrentBalance.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.State (chaos.State) (struct)
+ if err := t.State.MarshalCBOR(w); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
+ *t = InspectRuntimeReturn{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 6 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Caller (address.Address) (struct)
+
+ {
+
+ if err := t.Caller.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.Caller: %w", err)
+ }
+
+ }
+ // t.Receiver (address.Address) (struct)
+
+ {
+
+ if err := t.Receiver.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.Receiver: %w", err)
+ }
+
+ }
+ // t.ValueReceived (big.Int) (struct)
+
+ {
+
+ if err := t.ValueReceived.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.ValueReceived: %w", err)
+ }
+
+ }
+ // t.CurrEpoch (abi.ChainEpoch) (int64)
+ {
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ var extraI int64
+ if err != nil {
+ return err
+ }
+ switch maj {
+ case cbg.MajUnsignedInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 positive overflow")
+ }
+ case cbg.MajNegativeInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 negative oveflow")
+ }
+ extraI = -1 - extraI
+ default:
+ return fmt.Errorf("wrong type for int64 field: %d", maj)
+ }
+
+ t.CurrEpoch = abi.ChainEpoch(extraI)
+ }
+ // t.CurrentBalance (big.Int) (struct)
+
+ {
+
+ if err := t.CurrentBalance.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.CurrentBalance: %w", err)
+ }
+
+ }
+ // t.State (chaos.State) (struct)
+
+ {
+
+ if err := t.State.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.State: %w", err)
+ }
+
+ }
+ return nil
+}
diff --git a/conformance/chaos/gen/gen.go b/conformance/chaos/gen/gen.go
new file mode 100644
index 000000000..ac97da99e
--- /dev/null
+++ b/conformance/chaos/gen/gen.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+ "github.com/filecoin-project/lotus/conformance/chaos"
+
+ gen "github.com/whyrusleeping/cbor-gen"
+)
+
+func main() {
+ if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "chaos",
+ chaos.State{},
+ chaos.CallerValidationArgs{},
+ chaos.CreateActorArgs{},
+ chaos.ResolveAddressResponse{},
+ chaos.SendArgs{},
+ chaos.SendReturn{},
+ chaos.MutateStateArgs{},
+ chaos.AbortWithArgs{},
+ chaos.InspectRuntimeReturn{},
+ ); err != nil {
+ panic(err)
+ }
+}
diff --git a/conformance/chaos/ids.go b/conformance/chaos/ids.go
new file mode 100644
index 000000000..6b0ad86a7
--- /dev/null
+++ b/conformance/chaos/ids.go
@@ -0,0 +1,29 @@
+package chaos
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+ "github.com/multiformats/go-multihash"
+)
+
+// ChaosActorCodeCID is the CID by which this kind of actor will be identified.
+var ChaosActorCodeCID = func() cid.Cid {
+ builder := cid.V1Builder{Codec: cid.Raw, MhType: multihash.IDENTITY}
+ c, err := builder.Sum([]byte("fil/1/chaos"))
+ if err != nil {
+ panic(err)
+ }
+ return c
+}()
+
+// Address is the singleton address of this actor. Its value is 98
+// (builtin.FirstNonSingletonActorId - 2), as 99 is reserved for the burnt funds
+// singleton.
+var Address = func() address.Address {
+ // the address before the burnt funds address (99)
+ addr, err := address.NewIDAddress(98)
+ if err != nil {
+ panic(err)
+ }
+ return addr
+}()
diff --git a/conformance/chaos/state.go b/conformance/chaos/state.go
new file mode 100644
index 000000000..4a54ef61c
--- /dev/null
+++ b/conformance/chaos/state.go
@@ -0,0 +1,32 @@
+package chaos
+
+import (
+ "fmt"
+ "io"
+)
+
+// State is the state for the chaos actor used by some methods to invoke
+// behaviours in the vm or runtime.
+type State struct {
+ // Value can be updated by chaos actor methods to test illegal state
+ // mutations when the state is in readonly mode for example.
+ Value string
+ // Unmarshallable is a sentinel value. If the slice contains no values, the
+ // State struct will encode as CBOR without issue. If the slice is non-nil,
+ // CBOR encoding will fail.
+ Unmarshallable []*UnmarshallableCBOR
+}
+
+// UnmarshallableCBOR is a type that cannot be marshalled or unmarshalled to
+// CBOR despite implementing the CBORMarshaler and CBORUnmarshaler interface.
+type UnmarshallableCBOR struct{}
+
+// UnmarshalCBOR will fail to unmarshal the value from CBOR.
+func (t *UnmarshallableCBOR) UnmarshalCBOR(io.Reader) error {
+ return fmt.Errorf("failed to unmarshal cbor")
+}
+
+// MarshalCBOR will fail to marshal the value to CBOR.
+func (t *UnmarshallableCBOR) MarshalCBOR(io.Writer) error {
+ return fmt.Errorf("failed to marshal cbor")
+}
diff --git a/conformance/corpus_test.go b/conformance/corpus_test.go
new file mode 100644
index 000000000..3d447570d
--- /dev/null
+++ b/conformance/corpus_test.go
@@ -0,0 +1,133 @@
+package conformance
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/filecoin-project/test-vectors/schema"
+)
+
+const (
+ // EnvSkipConformance, if 1, skips the conformance test suite.
+ EnvSkipConformance = "SKIP_CONFORMANCE"
+
+ // EnvCorpusRootDir is the name of the environment variable where the path
+ // to an alternative corpus location can be provided.
+ //
+ // The default is defaultCorpusRoot.
+ EnvCorpusRootDir = "CORPUS_DIR"
+
+ // defaultCorpusRoot is the directory where the test vector corpus is hosted.
+ // It is mounted on the Lotus repo as a git submodule.
+ //
+ // When running this test, the corpus root can be overridden through the
+ // -conformance.corpus CLI flag to run an alternate corpus.
+ defaultCorpusRoot = "../extern/test-vectors/corpus"
+)
+
+// ignore is a set of paths relative to root to skip.
+var ignore = map[string]struct{}{
+ ".git": {},
+ "schema.json": {},
+}
+
+// TestConformance is the entrypoint test that runs all test vectors found
+// in the corpus root directory.
+//
+// It locates all json files via a recursive walk, skipping over the ignore set,
+// as well as files beginning with _. It parses each file as a test vector, and
+// runs it via the Driver.
+func TestConformance(t *testing.T) {
+ if skip := strings.TrimSpace(os.Getenv(EnvSkipConformance)); skip == "1" {
+ t.SkipNow()
+ }
+ // corpusRoot is the effective corpus root path, taken from the `-conformance.corpus` CLI flag,
+ // falling back to defaultCorpusRoot if not provided.
+ corpusRoot := defaultCorpusRoot
+ if dir := strings.TrimSpace(os.Getenv(EnvCorpusRootDir)); dir != "" {
+ corpusRoot = dir
+ }
+
+ var vectors []string
+ err := filepath.Walk(corpusRoot+"/", func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ filename := filepath.Base(path)
+ rel, err := filepath.Rel(corpusRoot, path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := ignore[rel]; ok {
+ // skip over using the right error.
+ if info.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ if info.IsDir() {
+ // dive into directories.
+ return nil
+ }
+ if filepath.Ext(path) != ".json" {
+ // skip if not .json.
+ return nil
+ }
+ if ignored := strings.HasPrefix(filename, "_"); ignored {
+ // ignore files starting with _.
+ t.Logf("ignoring: %s", rel)
+ return nil
+ }
+ vectors = append(vectors, rel)
+ return nil
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(vectors) == 0 {
+ t.Fatalf("no test vectors found")
+ }
+
+ // Run a test for each vector.
+ for _, v := range vectors {
+ path := filepath.Join(corpusRoot, v)
+ raw, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatalf("failed to read test raw file: %s", path)
+ }
+
+ var vector schema.TestVector
+ err = json.Unmarshal(raw, &vector)
+ if err != nil {
+ t.Errorf("failed to parse test vector %s: %s; skipping", path, err)
+ continue
+ }
+
+ t.Run(v, func(t *testing.T) {
+ for _, h := range vector.Hints {
+ if h == schema.HintIncorrect {
+ t.Logf("skipping vector marked as incorrect: %s", vector.Meta.ID)
+ t.SkipNow()
+ }
+ }
+
+ // dispatch the execution depending on the vector class.
+ switch vector.Class {
+ case "message":
+ ExecuteMessageVector(t, &vector)
+ case "tipset":
+ ExecuteTipsetVector(t, &vector)
+ default:
+ t.Fatalf("test vector class not supported: %s", vector.Class)
+ }
+ })
+ }
+}
diff --git a/conformance/driver.go b/conformance/driver.go
index 218198a05..940c137cf 100644
--- a/conformance/driver.go
+++ b/conformance/driver.go
@@ -2,19 +2,25 @@ package conformance
import (
"context"
+ gobig "math/big"
+ "os"
- "github.com/filecoin-project/specs-actors/actors/crypto"
-
+ "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/conformance/chaos"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
+ _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
- "github.com/filecoin-project/test-vectors/chaos"
"github.com/filecoin-project/test-vectors/schema"
"github.com/filecoin-project/go-address"
@@ -24,18 +30,36 @@ import (
)
var (
- // BaseFee to use in the VM.
- // TODO make parametrisable through vector.
- BaseFee = abi.NewTokenAmount(100)
+ // DefaultCirculatingSupply is the fallback circulating supply returned by
+ // the driver's CircSupplyCalculator function, used if the vector specifies
+ // no circulating supply.
+ DefaultCirculatingSupply = types.TotalFilecoinInt
+
+ // DefaultBaseFee to use in the VM, if one is not supplied in the vector.
+ DefaultBaseFee = abi.NewTokenAmount(100)
)
type Driver struct {
ctx context.Context
selector schema.Selector
+ vmFlush bool
}
-func NewDriver(ctx context.Context, selector schema.Selector) *Driver {
- return &Driver{ctx: ctx, selector: selector}
+type DriverOpts struct {
+ // DisableVMFlush, when true, avoids calling VM.Flush(), forces a blockstore
+ // recursive copy, from the temporary buffer blockstore, to the real
+ // system's blockstore. Disabling VM flushing is useful when extracting test
+ // vectors and trimming state, as we don't want to force an accidental
+ // deep copy of the state tree.
+ //
+ // Disabling VM flushing almost always should go hand-in-hand with
+ // LOTUS_DISABLE_VM_BUF=iknowitsabadidea. That way, state tree writes are
+ // immediately committed to the blockstore.
+ DisableVMFlush bool
+}
+
+func NewDriver(ctx context.Context, selector schema.Selector, opts DriverOpts) *Driver {
+ return &Driver{ctx: ctx, selector: selector, vmFlush: !opts.DisableVMFlush}
}
type ExecuteTipsetResult struct {
@@ -60,8 +84,8 @@ type ExecuteTipsetResult struct {
// and reward withdrawal per miner.
func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset) (*ExecuteTipsetResult, error) {
var (
- syscalls = mkFakedSigSyscalls(vm.Syscalls(ffiwrapper.ProofVerifier))
- vmRand = new(testRand)
+ syscalls = vm.Syscalls(ffiwrapper.ProofVerifier)
+ vmRand = NewFixedRand()
cs = store.NewChainStore(bs, ds, syscalls)
sm = stmgr.NewStateManager(cs)
@@ -96,13 +120,16 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot
var (
messages []*types.Message
results []*vm.ApplyRet
+
+ epoch = abi.ChainEpoch(tipset.Epoch)
+ basefee = abi.NewTokenAmount(tipset.BaseFee.Int64())
)
- postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, tipset.Epoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
+ postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, epoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
messages = append(messages, msg)
results = append(results, ret)
return nil
- }, tipset.BaseFee)
+ }, basefee, nil)
if err != nil {
return nil, err
@@ -117,38 +144,75 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot
return ret, nil
}
+type ExecuteMessageParams struct {
+ Preroot cid.Cid
+ Epoch abi.ChainEpoch
+ Message *types.Message
+ CircSupply abi.TokenAmount
+ BaseFee abi.TokenAmount
+
+ // Rand is an optional vm.Rand implementation to use. If nil, the driver
+ // will use a vm.Rand that returns a fixed value for all calls.
+ Rand vm.Rand
+}
+
// ExecuteMessage executes a conformance test vector message in a temporary VM.
-func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, preroot cid.Cid, epoch abi.ChainEpoch, msg *types.Message) (*vm.ApplyRet, cid.Cid, error) {
- vmOpts := &vm.VMOpts{
- StateBase: preroot,
- Epoch: epoch,
- Rand: &testRand{}, // TODO always succeeds; need more flexibility.
- Bstore: bs,
- Syscalls: mkFakedSigSyscalls(vm.Syscalls(ffiwrapper.ProofVerifier)), // TODO always succeeds; need more flexibility.
- CircSupplyCalc: nil,
- BaseFee: BaseFee,
+func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageParams) (*vm.ApplyRet, cid.Cid, error) {
+ if !d.vmFlush {
+ // do not flush the VM, just the state tree; this should be used with
+ // LOTUS_DISABLE_VM_BUF enabled, so writes will anyway be visible.
+ _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea")
}
- lvm, err := vm.NewVM(vmOpts)
+ if params.Rand == nil {
+ params.Rand = NewFixedRand()
+ }
+
+ // dummy state manager; only to reference the GetNetworkVersion method,
+ // which does not depend on state.
+ sm := stmgr.NewStateManager(nil)
+
+ vmOpts := &vm.VMOpts{
+ StateBase: params.Preroot,
+ Epoch: params.Epoch,
+ Bstore: bs,
+ Syscalls: vm.Syscalls(ffiwrapper.ProofVerifier),
+ CircSupplyCalc: func(_ context.Context, _ abi.ChainEpoch, _ *state.StateTree) (abi.TokenAmount, error) {
+ return params.CircSupply, nil
+ },
+ Rand: params.Rand,
+ BaseFee: params.BaseFee,
+ NtwkVersion: sm.GetNtwkVersion,
+ }
+
+ lvm, err := vm.NewVM(context.TODO(), vmOpts)
if err != nil {
return nil, cid.Undef, err
}
- invoker := vm.NewInvoker()
+ invoker := vm.NewActorRegistry()
// register the chaos actor if required by the vector.
if chaosOn, ok := d.selector["chaos_actor"]; ok && chaosOn == "true" {
- invoker.Register(chaos.ChaosActorCodeCID, chaos.Actor{}, chaos.State{})
+ invoker.Register(nil, chaos.Actor{})
}
lvm.SetInvoker(invoker)
- ret, err := lvm.ApplyMessage(d.ctx, toChainMsg(msg))
+ ret, err := lvm.ApplyMessage(d.ctx, toChainMsg(params.Message))
if err != nil {
return nil, cid.Undef, err
}
- root, err := lvm.Flush(d.ctx)
+ var root cid.Cid
+ if d.vmFlush {
+ // flush the VM, committing the state tree changes and forcing a
+ // recursive copoy from the temporary blcokstore to the real blockstore.
+ root, err = lvm.Flush(d.ctx)
+ } else {
+ root, err = lvm.StateTree().(*state.StateTree).Flush(d.ctx)
+ }
+
return ret, root, err
}
@@ -170,3 +234,22 @@ func toChainMsg(msg *types.Message) (ret types.ChainMsg) {
}
return ret
}
+
+// BaseFeeOrDefault converts a basefee as passed in a test vector (go *big.Int
+// type) to an abi.TokenAmount, or if nil it returns the DefaultBaseFee.
+func BaseFeeOrDefault(basefee *gobig.Int) abi.TokenAmount {
+ if basefee == nil {
+ return DefaultBaseFee
+ }
+ return big.NewFromGo(basefee)
+}
+
+// CircSupplyOrDefault converts a circulating supply as passed in a test vector
+// (go *big.Int type) to an abi.TokenAmount, or if nil it returns the
+// DefaultCirculatingSupply.
+func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount {
+ if circSupply == nil {
+ return DefaultBaseFee
+ }
+ return big.NewFromGo(circSupply)
+}
diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go
new file mode 100644
index 000000000..d356b53d0
--- /dev/null
+++ b/conformance/rand_fixed.go
@@ -0,0 +1,28 @@
+package conformance
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+type fixedRand struct{}
+
+var _ vm.Rand = (*fixedRand)(nil)
+
+// NewFixedRand creates a test vm.Rand that always returns fixed bytes value
+// of utf-8 string 'i_am_random_____i_am_random_____'.
+func NewFixedRand() vm.Rand {
+ return &fixedRand{}
+}
+
+func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
+ return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
+}
+
+func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
+ return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
+}
diff --git a/conformance/rand_record.go b/conformance/rand_record.go
new file mode 100644
index 000000000..6f6d064dc
--- /dev/null
+++ b/conformance/rand_record.go
@@ -0,0 +1,103 @@
+package conformance
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/test-vectors/schema"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+type RecordingRand struct {
+ reporter Reporter
+ api api.FullNode
+
+ // once guards the loading of the head tipset.
+ // can be removed when https://github.com/filecoin-project/lotus/issues/4223
+ // is fixed.
+ once sync.Once
+ head types.TipSetKey
+ lk sync.Mutex
+ recorded schema.Randomness
+}
+
+var _ vm.Rand = (*RecordingRand)(nil)
+
+// NewRecordingRand returns a vm.Rand implementation that proxies calls to a
+// full Lotus node via JSON-RPC, and records matching rules and responses so
+// they can later be embedded in test vectors.
+func NewRecordingRand(reporter Reporter, api api.FullNode) *RecordingRand {
+ return &RecordingRand{reporter: reporter, api: api}
+}
+
+func (r *RecordingRand) loadHead() {
+ head, err := r.api.ChainHead(context.Background())
+ if err != nil {
+ panic(fmt.Sprintf("could not fetch chain head while fetching randomness: %s", err))
+ }
+ r.head = head.Key()
+}
+
+func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ r.once.Do(r.loadHead)
+ ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy)
+ if err != nil {
+ return ret, err
+ }
+
+ r.reporter.Logf("fetched and recorded chain randomness for: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+
+ match := schema.RandomnessMatch{
+ On: schema.RandomnessRule{
+ Kind: schema.RandomnessChain,
+ DomainSeparationTag: int64(pers),
+ Epoch: int64(round),
+ Entropy: entropy,
+ },
+ Return: []byte(ret),
+ }
+ r.lk.Lock()
+ r.recorded = append(r.recorded, match)
+ r.lk.Unlock()
+
+ return ret, err
+}
+
+func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ r.once.Do(r.loadHead)
+ ret, err := r.api.ChainGetRandomnessFromBeacon(ctx, r.head, pers, round, entropy)
+ if err != nil {
+ return ret, err
+ }
+
+ r.reporter.Logf("fetched and recorded beacon randomness for: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+
+ match := schema.RandomnessMatch{
+ On: schema.RandomnessRule{
+ Kind: schema.RandomnessBeacon,
+ DomainSeparationTag: int64(pers),
+ Epoch: int64(round),
+ Entropy: entropy,
+ },
+ Return: []byte(ret),
+ }
+ r.lk.Lock()
+ r.recorded = append(r.recorded, match)
+ r.lk.Unlock()
+
+ return ret, err
+}
+
+func (r *RecordingRand) Recorded() schema.Randomness {
+ r.lk.Lock()
+ defer r.lk.Unlock()
+
+ return r.recorded
+}
diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go
new file mode 100644
index 000000000..1b73e5a08
--- /dev/null
+++ b/conformance/rand_replay.go
@@ -0,0 +1,79 @@
+package conformance
+
+import (
+ "bytes"
+ "context"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/test-vectors/schema"
+
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+type ReplayingRand struct {
+ reporter Reporter
+ recorded schema.Randomness
+ fallback vm.Rand
+}
+
+var _ vm.Rand = (*ReplayingRand)(nil)
+
+// NewReplayingRand replays recorded randomness when requested, falling back to
+// fixed randomness if the value cannot be found; hence this is a safe
+// backwards-compatible replacement for fixedRand.
+func NewReplayingRand(reporter Reporter, recorded schema.Randomness) *ReplayingRand {
+ return &ReplayingRand{
+ reporter: reporter,
+ recorded: recorded,
+ fallback: NewFixedRand(),
+ }
+}
+
+func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) {
+ for _, other := range r.recorded {
+ if other.On.Kind == requested.Kind &&
+ other.On.Epoch == requested.Epoch &&
+ other.On.DomainSeparationTag == requested.DomainSeparationTag &&
+ bytes.Equal(other.On.Entropy, requested.Entropy) {
+ return other.Return, true
+ }
+ }
+ return nil, false
+}
+
+func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ rule := schema.RandomnessRule{
+ Kind: schema.RandomnessChain,
+ DomainSeparationTag: int64(pers),
+ Epoch: int64(round),
+ Entropy: entropy,
+ }
+
+ if ret, ok := r.match(rule); ok {
+ r.reporter.Logf("returning saved chain randomness: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+ return ret, nil
+ }
+
+ r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
+ return r.fallback.GetChainRandomness(ctx, pers, round, entropy)
+}
+
+func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ rule := schema.RandomnessRule{
+ Kind: schema.RandomnessBeacon,
+ DomainSeparationTag: int64(pers),
+ Epoch: int64(round),
+ Entropy: entropy,
+ }
+
+ if ret, ok := r.match(rule); ok {
+ r.reporter.Logf("returning saved beacon randomness: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+ return ret, nil
+ }
+
+ r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
+ return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy)
+
+}
diff --git a/conformance/reporter.go b/conformance/reporter.go
new file mode 100644
index 000000000..1cd2d389d
--- /dev/null
+++ b/conformance/reporter.go
@@ -0,0 +1,62 @@
+package conformance
+
+import (
+ "log"
+ "os"
+ "sync/atomic"
+ "testing"
+
+ "github.com/fatih/color"
+)
+
+// Reporter is a contains a subset of the testing.T methods, so that the
+// Execute* functions in this package can be used inside or outside of
+// go test runs.
+type Reporter interface {
+ Helper()
+
+ Log(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Logf(format string, args ...interface{})
+ FailNow()
+ Failed() bool
+}
+
+var _ Reporter = (*testing.T)(nil)
+
+// LogReporter wires the Reporter methods to the log package. It is appropriate
+// to use when calling the Execute* functions from a standalone CLI program.
+type LogReporter struct {
+ failed int32
+}
+
+var _ Reporter = (*LogReporter)(nil)
+
+func (*LogReporter) Helper() {}
+
+func (*LogReporter) Log(args ...interface{}) {
+ log.Println(args...)
+}
+
+func (*LogReporter) Logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+func (*LogReporter) FailNow() {
+ os.Exit(1)
+}
+
+func (l *LogReporter) Failed() bool {
+ return atomic.LoadInt32(&l.failed) == 1
+}
+
+func (l *LogReporter) Errorf(format string, args ...interface{}) {
+ atomic.StoreInt32(&l.failed, 1)
+ log.Println(color.HiRedString("❌ "+format, args...))
+}
+
+func (l *LogReporter) Fatalf(format string, args ...interface{}) {
+ atomic.StoreInt32(&l.failed, 1)
+ log.Fatal(color.HiRedString("❌ "+format, args...))
+}
diff --git a/conformance/runner.go b/conformance/runner.go
new file mode 100644
index 000000000..d489ac288
--- /dev/null
+++ b/conformance/runner.go
@@ -0,0 +1,266 @@
+package conformance
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "strconv"
+
+ "github.com/fatih/color"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/ipfs/go-blockservice"
+ "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ offline "github.com/ipfs/go-ipfs-exchange-offline"
+ format "github.com/ipfs/go-ipld-format"
+ "github.com/ipfs/go-merkledag"
+ "github.com/ipld/go-car"
+
+ "github.com/filecoin-project/test-vectors/schema"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/lib/blockstore"
+)
+
+// ExecuteMessageVector executes a message-class test vector.
+func ExecuteMessageVector(r Reporter, vector *schema.TestVector) {
+ var (
+ ctx = context.Background()
+ epoch = vector.Pre.Epoch
+ root = vector.Pre.StateTree.RootCID
+ )
+
+ // Load the CAR into a new temporary Blockstore.
+ bs, err := LoadVectorCAR(vector.CAR)
+ if err != nil {
+ r.Fatalf("failed to load the vector CAR: %w", err)
+ }
+
+ // Create a new Driver.
+ driver := NewDriver(ctx, vector.Selector, DriverOpts{DisableVMFlush: true})
+
+ // Apply every message.
+ for i, m := range vector.ApplyMessages {
+ msg, err := types.DecodeMessage(m.Bytes)
+ if err != nil {
+ r.Fatalf("failed to deserialize message: %s", err)
+ }
+
+ // add an epoch if one's set.
+ if m.Epoch != nil {
+ epoch = *m.Epoch
+ }
+
+ // Execute the message.
+ var ret *vm.ApplyRet
+ ret, root, err = driver.ExecuteMessage(bs, ExecuteMessageParams{
+ Preroot: root,
+ Epoch: abi.ChainEpoch(epoch),
+ Message: msg,
+ BaseFee: BaseFeeOrDefault(vector.Pre.BaseFee),
+ CircSupply: CircSupplyOrDefault(vector.Pre.CircSupply),
+ Rand: NewReplayingRand(r, vector.Randomness),
+ })
+ if err != nil {
+ r.Fatalf("fatal failure when executing message: %s", err)
+ }
+
+ // Assert that the receipt matches what the test vector expects.
+ AssertMsgResult(r, vector.Post.Receipts[i], ret, strconv.Itoa(i))
+ }
+
+ // Once all messages are applied, assert that the final state root matches
+ // the expected postcondition root.
+ if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
+ r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
+ dumpThreeWayStateDiff(r, vector, bs, root)
+ r.FailNow()
+ }
+}
+
+// ExecuteTipsetVector executes a tipset-class test vector.
+func ExecuteTipsetVector(r Reporter, vector *schema.TestVector) {
+ var (
+ ctx = context.Background()
+ prevEpoch = vector.Pre.Epoch
+ root = vector.Pre.StateTree.RootCID
+ tmpds = ds.NewMapDatastore()
+ )
+
+ // Load the vector CAR into a new temporary Blockstore.
+ bs, err := LoadVectorCAR(vector.CAR)
+ if err != nil {
+ r.Fatalf("failed to load the vector CAR: %w", err)
+ }
+
+ // Create a new Driver.
+ driver := NewDriver(ctx, vector.Selector, DriverOpts{})
+
+ // Apply every tipset.
+ var receiptsIdx int
+ for i, ts := range vector.ApplyTipsets {
+ ts := ts // capture
+ ret, err := driver.ExecuteTipset(bs, tmpds, root, abi.ChainEpoch(prevEpoch), &ts)
+ if err != nil {
+ r.Fatalf("failed to apply tipset %d message: %s", i, err)
+ }
+
+ for j, v := range ret.AppliedResults {
+ AssertMsgResult(r, vector.Post.Receipts[receiptsIdx], v, fmt.Sprintf("%d of tipset %d", j, i))
+ receiptsIdx++
+ }
+
+ // Compare the receipts root.
+ if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual {
+ r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
+ }
+
+ prevEpoch = ts.Epoch
+ root = ret.PostStateRoot
+ }
+
+ // Once all messages are applied, assert that the final state root matches
+ // the expected postcondition root.
+ if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
+ r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
+ dumpThreeWayStateDiff(r, vector, bs, root)
+ r.FailNow()
+ }
+}
+
+// AssertMsgResult compares a message result. It takes the expected receipt
+// encoded in the vector, the actual receipt returned by Lotus, and a message
+// label to log in the assertion failure message to facilitate debugging.
+func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet, label string) {
+ r.Helper()
+
+ if expected, actual := exitcode.ExitCode(expected.ExitCode), actual.ExitCode; expected != actual {
+ r.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual)
+ }
+ if expected, actual := expected.GasUsed, actual.GasUsed; expected != actual {
+ r.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual)
+ }
+ if expected, actual := []byte(expected.ReturnValue), actual.Return; !bytes.Equal(expected, actual) {
+ r.Errorf("return value of msg %s did not match; expected: %s, got: %s", label, base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(actual))
+ }
+}
+
+func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) {
+ // check if statediff exists; if not, skip.
+ if err := exec.Command("statediff", "--help").Run(); err != nil {
+ r.Log("could not dump 3-way state tree diff upon test failure: statediff command not found")
+ r.Log("install statediff with:")
+ r.Log("$ git clone https://github.com/filecoin-project/statediff.git")
+ r.Log("$ cd statediff")
+ r.Log("$ go generate ./...")
+ r.Log("$ go install ./cmd/statediff")
+ return
+ }
+
+ tmpCar, err := writeStateToTempCAR(bs,
+ vector.Pre.StateTree.RootCID,
+ vector.Post.StateTree.RootCID,
+ actual,
+ )
+ if err != nil {
+ r.Fatalf("failed to write temporary state CAR: %s", err)
+ }
+ defer os.RemoveAll(tmpCar) //nolint:errcheck
+
+ color.NoColor = false // enable colouring.
+
+ var (
+ a = color.New(color.FgMagenta, color.Bold).Sprint("(A) expected final state")
+ b = color.New(color.FgYellow, color.Bold).Sprint("(B) actual final state")
+ c = color.New(color.FgCyan, color.Bold).Sprint("(C) initial state")
+ d1 = color.New(color.FgGreen, color.Bold).Sprint("[Δ1]")
+ d2 = color.New(color.FgGreen, color.Bold).Sprint("[Δ2]")
+ d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]")
+ )
+
+ printDiff := func(left, right cid.Cid) {
+ cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String())
+ b, err := cmd.CombinedOutput()
+ if err != nil {
+ r.Fatalf("statediff failed: %s", err)
+ }
+ r.Log(string(b))
+ }
+
+ bold := color.New(color.Bold).SprintfFunc()
+
+ // run state diffs.
+ r.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c))
+
+ r.Log(bold("--- %s left: %s; right: %s ---", d1, a, b))
+ printDiff(vector.Post.StateTree.RootCID, actual)
+
+ r.Log(bold("--- %s left: %s; right: %s ---", d2, c, b))
+ printDiff(vector.Pre.StateTree.RootCID, actual)
+
+ r.Log(bold("--- %s left: %s; right: %s ---", d3, c, a))
+ printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID)
+}
+
+// writeStateToTempCAR writes the provided roots to a temporary CAR that'll be
+// cleaned up via t.Cleanup(). It returns the full path of the temp file.
+func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, error) {
+ tmp, err := ioutil.TempFile("", "lotus-tests-*.car")
+ if err != nil {
+ return "", fmt.Errorf("failed to create temp file to dump CAR for diffing: %w", err)
+ }
+
+ carWalkFn := func(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ // ignore things we don't have, the state tree is incomplete.
+ if has, err := bs.Has(link.Cid); err != nil {
+ return nil, err
+ } else if has {
+ out = append(out, link)
+ }
+ }
+ return out, nil
+ }
+
+ var (
+ offl = offline.Exchange(bs)
+ blkserv = blockservice.New(bs, offl)
+ dserv = merkledag.NewDAGService(blkserv)
+ )
+
+ err = car.WriteCarWithWalker(context.Background(), dserv, roots, tmp, carWalkFn)
+ if err != nil {
+ return "", fmt.Errorf("failed to dump CAR for diffing: %w", err)
+ }
+ _ = tmp.Close()
+ return tmp.Name(), nil
+}
+
+func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) {
+ bs := blockstore.NewTemporary()
+
+ // Read the base64-encoded CAR from the vector, and inflate the gzip.
+ buf := bytes.NewReader(vectorCAR)
+ r, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to inflate gzipped CAR: %s", err)
+ }
+ defer r.Close() // nolint
+
+ // Load the CAR embedded in the test vector into the Blockstore.
+ _, err = car.LoadCar(bs, r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err)
+ }
+ return bs, nil
+}
diff --git a/conformance/runner_test.go b/conformance/runner_test.go
deleted file mode 100644
index 87317fc8d..000000000
--- a/conformance/runner_test.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package conformance
-
-import (
- "bytes"
- "compress/gzip"
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
-
- "github.com/ipfs/go-cid"
- ds "github.com/ipfs/go-datastore"
-
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/vm"
- "github.com/filecoin-project/lotus/lib/blockstore"
-
- "github.com/filecoin-project/statediff"
- "github.com/filecoin-project/test-vectors/schema"
-
- "github.com/fatih/color"
- "github.com/ipld/go-car"
-)
-
-const (
- // EnvSkipConformance, if 1, skips the conformance test suite.
- EnvSkipConformance = "SKIP_CONFORMANCE"
-
- // EnvCorpusRootDir is the name of the environment variable where the path
- // to an alternative corpus location can be provided.
- //
- // The default is defaultCorpusRoot.
- EnvCorpusRootDir = "CORPUS_DIR"
-
- // defaultCorpusRoot is the directory where the test vector corpus is hosted.
- // It is mounted on the Lotus repo as a git submodule.
- //
- // When running this test, the corpus root can be overridden through the
- // -conformance.corpus CLI flag to run an alternate corpus.
- defaultCorpusRoot = "../extern/test-vectors/corpus"
-)
-
-// ignore is a set of paths relative to root to skip.
-var ignore = map[string]struct{}{
- ".git": {},
- "schema.json": {},
-}
-
-// TestConformance is the entrypoint test that runs all test vectors found
-// in the corpus root directory.
-//
-// It locates all json files via a recursive walk, skipping over the ignore set,
-// as well as files beginning with _. It parses each file as a test vector, and
-// runs it via the Driver.
-func TestConformance(t *testing.T) {
- if skip := strings.TrimSpace(os.Getenv(EnvSkipConformance)); skip == "1" {
- t.SkipNow()
- }
- // corpusRoot is the effective corpus root path, taken from the `-conformance.corpus` CLI flag,
- // falling back to defaultCorpusRoot if not provided.
- corpusRoot := defaultCorpusRoot
- if dir := strings.TrimSpace(os.Getenv(EnvCorpusRootDir)); dir != "" {
- corpusRoot = dir
- }
-
- var vectors []string
- err := filepath.Walk(corpusRoot+"/", func(path string, info os.FileInfo, err error) error {
- if err != nil {
- t.Fatal(err)
- }
-
- filename := filepath.Base(path)
- rel, err := filepath.Rel(corpusRoot, path)
- if err != nil {
- t.Fatal(err)
- }
-
- if _, ok := ignore[rel]; ok {
- // skip over using the right error.
- if info.IsDir() {
- return filepath.SkipDir
- }
- return nil
- }
- if info.IsDir() {
- // dive into directories.
- return nil
- }
- if filepath.Ext(path) != ".json" {
- // skip if not .json.
- return nil
- }
- if ignored := strings.HasPrefix(filename, "_"); ignored {
- // ignore files starting with _.
- t.Logf("ignoring: %s", rel)
- return nil
- }
- vectors = append(vectors, rel)
- return nil
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- if len(vectors) == 0 {
- t.Fatalf("no test vectors found")
- }
-
- // Run a test for each vector.
- for _, v := range vectors {
- path := filepath.Join(corpusRoot, v)
- raw, err := ioutil.ReadFile(path)
- if err != nil {
- t.Fatalf("failed to read test raw file: %s", path)
- }
-
- var vector schema.TestVector
- err = json.Unmarshal(raw, &vector)
- if err != nil {
- t.Errorf("failed to parse test vector %s: %s; skipping", path, err)
- continue
- }
-
- t.Run(v, func(t *testing.T) {
- for _, h := range vector.Hints {
- if h == schema.HintIncorrect {
- t.Logf("skipping vector marked as incorrect: %s", vector.Meta.ID)
- t.SkipNow()
- }
- }
-
- // dispatch the execution depending on the vector class.
- switch vector.Class {
- case "message":
- executeMessageVector(t, &vector)
- case "tipset":
- executeTipsetVector(t, &vector)
- default:
- t.Fatalf("test vector class not supported: %s", vector.Class)
- }
- })
- }
-}
-
-// executeMessageVector executes a message-class test vector.
-func executeMessageVector(t *testing.T, vector *schema.TestVector) {
- var (
- ctx = context.Background()
- epoch = vector.Pre.Epoch
- root = vector.Pre.StateTree.RootCID
- )
-
- // Load the CAR into a new temporary Blockstore.
- bs := loadCAR(t, vector.CAR)
-
- // Create a new Driver.
- driver := NewDriver(ctx, vector.Selector)
-
- // Apply every message.
- for i, m := range vector.ApplyMessages {
- msg, err := types.DecodeMessage(m.Bytes)
- if err != nil {
- t.Fatalf("failed to deserialize message: %s", err)
- }
-
- // add an epoch if one's set.
- if m.Epoch != nil {
- epoch = *m.Epoch
- }
-
- // Execute the message.
- var ret *vm.ApplyRet
- ret, root, err = driver.ExecuteMessage(bs, root, epoch, msg)
- if err != nil {
- t.Fatalf("fatal failure when executing message: %s", err)
- }
-
- // Assert that the receipt matches what the test vector expects.
- assertMsgResult(t, vector.Post.Receipts[i], ret, strconv.Itoa(i))
- }
-
- // Once all messages are applied, assert that the final state root matches
- // the expected postcondition root.
- if root != vector.Post.StateTree.RootCID {
- dumpThreeWayStateDiff(t, vector, bs, root)
- }
-}
-
-// executeTipsetVector executes a tipset-class test vector.
-func executeTipsetVector(t *testing.T, vector *schema.TestVector) {
- var (
- ctx = context.Background()
- prevEpoch = vector.Pre.Epoch
- root = vector.Pre.StateTree.RootCID
- tmpds = ds.NewMapDatastore()
- )
-
- // Load the CAR into a new temporary Blockstore.
- bs := loadCAR(t, vector.CAR)
-
- // Create a new Driver.
- driver := NewDriver(ctx, vector.Selector)
-
- // Apply every tipset.
- var receiptsIdx int
- for i, ts := range vector.ApplyTipsets {
- ts := ts // capture
- ret, err := driver.ExecuteTipset(bs, tmpds, root, prevEpoch, &ts)
- if err != nil {
- t.Fatalf("failed to apply tipset %d message: %s", i, err)
- }
-
- for j, v := range ret.AppliedResults {
- assertMsgResult(t, vector.Post.Receipts[receiptsIdx], v, fmt.Sprintf("%d of tipset %d", j, i))
- receiptsIdx++
- }
-
- // Compare the receipts root.
- if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual {
- t.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
- }
-
- prevEpoch = ts.Epoch
- root = ret.PostStateRoot
- }
-
- // Once all messages are applied, assert that the final state root matches
- // the expected postcondition root.
- if root != vector.Post.StateTree.RootCID {
- dumpThreeWayStateDiff(t, vector, bs, root)
- }
-}
-
-// assertMsgResult compares a message result. It takes the expected receipt
-// encoded in the vector, the actual receipt returned by Lotus, and a message
-// label to log in the assertion failure message to facilitate debugging.
-func assertMsgResult(t *testing.T, expected *schema.Receipt, actual *vm.ApplyRet, label string) {
- t.Helper()
-
- if expected, actual := expected.ExitCode, actual.ExitCode; expected != actual {
- t.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual)
- }
- if expected, actual := expected.GasUsed, actual.GasUsed; expected != actual {
- t.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual)
- }
- if expected, actual := []byte(expected.ReturnValue), actual.Return; !bytes.Equal(expected, actual) {
- t.Errorf("return value of msg %s did not match; expected: %s, got: %s", label, base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(actual))
- }
-}
-
-func dumpThreeWayStateDiff(t *testing.T, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) {
- color.NoColor = false // enable colouring.
-
- t.Errorf("wrong post root cid; expected %v, but got %v", vector.Post.StateTree.RootCID, actual)
-
- var (
- a = color.New(color.FgMagenta, color.Bold).Sprint("(A) expected final state")
- b = color.New(color.FgYellow, color.Bold).Sprint("(B) actual final state")
- c = color.New(color.FgCyan, color.Bold).Sprint("(C) initial state")
- d1 = color.New(color.FgGreen, color.Bold).Sprint("[Δ1]")
- d2 = color.New(color.FgGreen, color.Bold).Sprint("[Δ2]")
- d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]")
- )
-
- bold := color.New(color.Bold).SprintfFunc()
-
- // run state diffs.
- t.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c))
-
- t.Log(bold("--- %s left: %s; right: %s ---", d1, a, b))
- t.Log(statediff.Diff(context.Background(), bs, vector.Post.StateTree.RootCID, actual))
-
- t.Log(bold("--- %s left: %s; right: %s ---", d2, c, b))
- t.Log(statediff.Diff(context.Background(), bs, vector.Pre.StateTree.RootCID, actual))
-
- t.Log(bold("--- %s left: %s; right: %s ---", d3, c, a))
- t.Log(statediff.Diff(context.Background(), bs, vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID))
-}
-
-func loadCAR(t *testing.T, vectorCAR schema.Base64EncodedBytes) blockstore.Blockstore {
- bs := blockstore.NewTemporary()
-
- // Read the base64-encoded CAR from the vector, and inflate the gzip.
- buf := bytes.NewReader(vectorCAR)
- r, err := gzip.NewReader(buf)
- if err != nil {
- t.Fatalf("failed to inflate gzipped CAR: %s", err)
- }
- defer r.Close() // nolint
-
- // Load the CAR embedded in the test vector into the Blockstore.
- _, err = car.LoadCar(bs, r)
- if err != nil {
- t.Fatalf("failed to load state tree car from test vector: %s", err)
- }
- return bs
-}
diff --git a/conformance/stubs.go b/conformance/stubs.go
deleted file mode 100644
index 2fd1e7b64..000000000
--- a/conformance/stubs.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package conformance
-
-import (
- "context"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/chain/state"
- "github.com/filecoin-project/lotus/chain/vm"
-
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime"
-
- cbor "github.com/ipfs/go-ipld-cbor"
-)
-
-type testRand struct{}
-
-var _ vm.Rand = (*testRand)(nil)
-
-func (r *testRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
- return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
-}
-
-func (r *testRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
- return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
-}
-
-type testSyscalls struct {
- runtime.Syscalls
-}
-
-// TODO VerifySignature this will always succeed; but we want to be able to test failures too.
-func (fss *testSyscalls) VerifySignature(_ crypto.Signature, _ address.Address, _ []byte) error {
- return nil
-}
-
-// TODO VerifySeal this will always succeed; but we want to be able to test failures too.
-func (fss *testSyscalls) VerifySeal(_ abi.SealVerifyInfo) error {
- return nil
-}
-
-// TODO VerifyPoSt this will always succeed; but we want to be able to test failures too.
-func (fss *testSyscalls) VerifyPoSt(_ abi.WindowPoStVerifyInfo) error {
- return nil
-}
-
-func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
- return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls {
- return &testSyscalls{
- base(ctx, cstate, cst),
- }
- }
-}
diff --git a/documentation/en/.glossary.json b/documentation/en/.glossary.json
index 79d96664a..0967ef424 100644
--- a/documentation/en/.glossary.json
+++ b/documentation/en/.glossary.json
@@ -1,146 +1 @@
-{
- "bellman": {
- "title": "Bellman",
- "value": "Bellman is a rust crate for building zk-SNARK circuits. It provides circuit traits and primitive structures, as well as basic gadget implementations such as booleans and number abstractions."
- },
- "nvme": {
- "title": "NVMe",
- "value": "(non-volatile memory express) is a host controller interface and storage protocol created to accelerate the transfer of data between enterprise and client systems and solid-state drives (SSDs) over a computer's high-speed Peripheral Component Interconnect Express (PCIe) bus."
- },
- "multiaddr": {
- "title": "Multiaddr",
- "value": "Multiaddr is a format for encoding addresses from various well-established network protocols. It is useful to write applications that future-proof their use of addresses, and allow multiple transport protocols and addresses to coexist."
- },
- "attofil": {
- "title": "attoFIL",
- "value": "AttoFIL is a word used to describe 10^-18 FIL. The word atto comes from the Norwegian and Danish term: atten eighteen."
- },
- "fil": {
- "title": "FIL",
- "value": "A ticker symbol is an abbreviation used to uniquely identify Filecoin when it is used in a wallet exchange or a cryptocurrency exchange."
- },
- "epost": {
- "title": "Election Proof-of-Spacetime",
- "value": "Election Proof-of-Spacetime couples the Proof-of-Spacetime process with block production, meaning that in order to produce a block, the miner must produce a valid Proof-of-Spacetime proof (snark output)."
- },
- "jwt": {
- "title": "JWT",
- "value": "JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties."
- },
- "json-rpc": {
- "title": "JSON-RPC",
- "value": "JSON-RPC is a remote procedure call protocol encoded in JSON. It is a very simple protocol (and very similar to XML-RPC), defining only a few data types and commands."
- },
- "bls-address": {
- "title": "BLS Signature (Address)",
- "value": "A Boneh–Lynn–Shacham (BLS) signature is a digital signature scheme that allows a user to determine the authenticity of a signer, and is a commonly used signature scheme in the Filecoin Distributed Storage Network."
- },
- "faucet": {
- "title": "Filecoin Test Faucet",
- "value": "A webpage where you can get free test Filecoin to participate in the Testnet."
- },
- "chain": {
- "title": "Chain",
- "value": "The Filecoin Blockchain is a distributed virtual machine that achieves consensus, processes messages, accounts for storage, and maintains security in the Filecoin Protocol. It is the main interface linking various actors in the Filecoin system."
- },
- "miner-power": {
- "title": "Miner Power",
- "value": "Miner storage in relation to network storage, tracked in the power table."
- },
- "sector": {
- "title": "Sector",
- "value": "A fixed-size block of data of SECTOR_SIZE bytes which generally contains client's data."
- },
- "sealing": {
- "title": "Sealing",
- "value": "A slow encoding process that returns commitments and proofs for data being stored in a sector."
- },
- "seal": {
- "title": "Seal",
- "value": "A slow encoding process that returns commitments and proofs for data being stored in a sector."
- },
- "posts": {
- "title": "Proof-of-Spacetime(s)",
- "value": "Filecoin is a protocol token whose blockchain runs on a novel proof, called Proof-of-Spacetime, where blocks are created by miners that are storing data."
- },
- "filecoin-testnet": {
- "title": "Filecoin Testnet",
- "value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value – the official filecoin tokens will not be released until Mainnet launch."
- },
- "filecoin-decentralized-storage-market": {
- "title": "Filecoin Decentralized Storage Market",
- "value": "Storage Market subsystem is the data entry point into the network. Miners only earn power from data stored in a storage deal and all deals live on the Filecoin network."
- },
- "filecoin-proof-parameters": {
- "title": "Filecoin Proof Parameters",
- "value": "The proving algorithms rely on a large binary parameter file."
- },
- "lotus-devnet": {
- "title": "DevNet",
- "value": "On the DevNets, you can store data as a storage client and also try how Filecoin mining works. The devnets are an important development tool for those who anticipate building applications on top of the Filecoin protocol or storing data on the decentralized storage market. "
- },
- "filecoin-distributed-storage-network": {
- "title": "Filecoin Distributed Storage Network",
- "value": "Filecoin is a distributed storage network based on a blockchain mechanism. Filecoin miners can elect to provide storage capacity for the network, and thereby earn units of the Filecoin cryptocurrency (FIL) by periodically producing cryptographic proofs that certify that they are providing the capacity specified."
- },
- "lotus-node": {
- "title": "Lotus Node",
- "value": "The Lotus Node is full of capabilities. It runs the Blockchain system, makes retrieval deals, does data transfer, supports block producer logic, and syncs and validates the chain."
- },
- "block-rewards": {
- "title": "Block Reward",
- "value": "Over the entire lifetime of the protocol, 1,400,000,000 FIL (TotalIssuance) will be given out to miners. The rate at which the funds are given out is set to halve every six years, smoothly (not a fixed jump like in Bitcoin)."
- },
- "block-producer-miner": {
- "title": "Miner (Block Producer)",
- "value": "The Block Producer Miner's logic. It currently shares an interface and process with the Lotus Node. A Block Producer chooses which messages to include in a block and is rewarded according to each message’s gas price and consumption, forming a market."
- },
- "lotus-storage-miner": {
- "title": "Miner (lotus-miner)",
- "value": "The Miner's logic. It has its own dedicated process. Contributes to the network through Sector commitments and Proofs of Spacetime to prove that it is storing the sectors it has commited to."
- },
- "swarm-port": {
- "title": "Swarm Port (Libp2p)",
- "value": "The LibP2P Swarm manages groups of connections to peers, handles incoming and outgoing streams, and is part of the miners implementation. The port value is part of the Host interface."
- },
- "daemon": {
- "title": "Lotus Daemon",
- "value": "A Daemon is a program that runs as a background process. A Daemon in the context of the Filecoin Distributed Storage Network may enable applications to communicate with peers, handle protocols, participate in pubsub, and interact with a distributed hash table (DHT)."
- },
- "storage-deal": {
- "title": "Storage deal",
- "value": "One of the two types of deals in Filecoin markets. Storage deals are recorded on the blockchain and enforced by the protocol."
- },
- "retrieval-deal": {
- "title": "Retrieval deal",
- "value": "One of the two types of deals in Filecoin markets. Retrieval deals are off chain and enabled by micropayment channel by transacting parties."
- },
- "deal-cid": {
- "title": "Deal CID",
- "value": "CID is a format for referencing content in distributed information systems, it is a way to store information so it can be retrieved based on its content, not its location. DealCID specifically is used in storage deals."
- },
- "data-cid": {
- "title": "Data CID",
- "value": "CID is a format for referencing content in distributed information systems, it is a way to store information so it can be retrieved based on its content, not its location. DataCID specifically is used to represent the file that is stored in the Filecoin Distributed Storage Network."
- },
- "cid": {
- "title": "CID",
- "value": "A CID is a self-describing content-addressed identifier. It uses cryptographic hashes to achieve content addressing. It uses several multiformats to achieve flexible self-description, namely multihash for hashes, multicodec for data content types, and multibase to encode the CID itself into strings."
- },
- "total-network-power": {
- "title": "Total Network Power",
- "value": "A reference to all the Power Tables for every subchain, accounting for each Lotus Miner on chain."
- },
- "chain-block-height": {
- "title": "Chain Block Height",
- "value": "Chain block height is defined as the number of blocks in the chain between any given block and the very first block in the blockchain."
- },
- "block-height": {
- "title": "Block Height",
- "value": "Height of the Merkle Tree of a sector. A sector is a contiguous array of bytes that a miner puts together, seals, and performs Proofs of Spacetime on."
- },
- "blocktime": {
- "title": "Blocktime",
- "value": "The time it takes for a Block to propagate to the whole network."
- }
-}
+{}
diff --git a/documentation/en/.library.json b/documentation/en/.library.json
index 3fab0df9b..e31f09950 100644
--- a/documentation/en/.library.json
+++ b/documentation/en/.library.json
@@ -1,232 +1,25 @@
{
"posts": [
{
- "title": "Hardware Requirements",
- "slug": "en+hardware",
- "github": "en/hardware.md",
+ "title": "About Lotus",
+ "slug": "",
+ "github": "en/about.md",
"value": null,
- "posts": [
- {
- "title": "Testing Configuration",
- "slug": "en+hardware-mining",
- "github": "en/hardware-mining.md",
- "value": null
- }
- ]
+ "posts": []
},
{
- "title": "Setup",
- "slug": "en+getting-started",
- "github": "en/getting-started.md",
- "value": null,
- "posts": [
- {
- "title": "Arch Linux Installation",
- "slug": "en+install-lotus-arch",
- "github": "en/install-lotus-arch.md",
- "value": null
- },
- {
- "title": "Ubuntu Installation",
- "slug": "en+install-lotus-ubuntu",
- "github": "en/install-lotus-ubuntu.md",
- "value": null
- },
- {
- "title": "Fedora Installation",
- "slug": "en+install-lotus-fedora",
- "github": "en/install-lotus-fedora.md",
- "value": null
- },
- {
- "title": "MacOS Installation",
- "slug": "en+install-lotus-macos",
- "github": "en/install-lotus-macos.md",
- "value": null
- },
- {
- "title": "Updating Lotus",
- "slug": "en+updating-lotus",
- "github": "en/updating-lotus.md",
- "value": null
- },
- {
- "title": "Join Testnet",
- "slug": "en+join-testnet",
- "github": "en/join-testnet.md",
- "value": null
- },
- {
- "title": "Use Lotus with systemd",
- "slug": "en+install-systemd-services",
- "github": "en/install-systemd-services.md",
- "value": null
- },
- {
- "title": "Setup Troubleshooting",
- "slug": "en+setup-troubleshooting",
- "github": "en/setup-troubleshooting.md",
- "value": null
- },
- {
- "title": "Environment Variables",
- "slug": "en+env-vars",
- "github": "en/environment-vars.md",
- "value": null
- }
- ]
- },
- {
- "title": "Architecture",
+ "title": "Lotus Architecture (WIP)",
"slug": "en+arch",
- "github": "en/architecture.md",
+ "github": "en/architecture/architecture.md",
"value": null,
"posts": [
- {
- "title": "The Message Pool",
- "slug": "en+mpool",
- "github": "en/mpool.md",
- "value": null
- }
+ {
+ "title": "The Message Pool",
+ "slug": "en+mpool",
+ "github": "en/architecture/mpool.md",
+ "value": null
+ }
]
- },
- {
- "title": "Storage Mining",
- "slug": "en+mining",
- "github": "en/mining.md",
- "value": null,
- "posts": [
- {
- "title": "Lotus Worker",
- "slug": "en+lotus-worker",
- "github": "en/mining-lotus-worker.md",
- "value": null
- },
- {
- "title": "Static Ports",
- "slug": "en+setting-a-static-port",
- "github": "en/setting-a-static-port.md",
- "value": null
- },
- {
- "title": "Mining Troubleshooting",
- "slug": "en+mining-troubleshooting",
- "github": "en/mining-troubleshooting.md",
- "value": null
- }
- ]
- },
- {
- "title": "Storing Data",
- "slug": "en+storing-data",
- "github": "en/storing-data.md",
- "value": null,
- "posts": [
- {
- "title": "Storage Troubleshooting",
- "slug": "en+storing-data-troubleshooting",
- "github": "en/storing-data-troubleshooting.md",
- "value": null
- },
- {
- "title": "Information for Miners",
- "slug": "en+info-for-miners",
- "github": "en/miner-deals.md",
- "value": null
- },
- {
- "title": "IPFS Integration",
- "slug": "en+ipfs-client-integration",
- "github": "en/storing-ipfs-integration.md",
- "value": null
- }
- ]
- },
- {
- "title": "Retrieving Data",
- "slug": "en+retrieving-data",
- "github": "en/retrieving-data.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Payment Channels",
- "slug": "en+payment-channels",
- "github": "en/payment-channels.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Command Line Interface",
- "slug": "en+cli",
- "github": "en/cli.md",
- "value": null,
- "posts": []
- },
- {
- "title": "API",
- "slug": "en+api",
- "github": "en/api.md",
- "value": null,
- "posts": [
- {
- "title": "Remote API Support",
- "slug": "en+api-scripting-support",
- "github": "en/api-scripting-support.md",
- "value": null
- },
- {
- "title": "API Methods",
- "slug": "en+api-methods",
- "github": "en/api-methods.md",
- "value": null
- },
- {
- "title": "API Troubleshooting",
- "slug": "en+api-troubleshooting",
- "github": "en/api-troubleshooting.md",
- "value": null
- }
- ]
- },
- {
- "title": "Developer Tools",
- "slug": "en+dev-tools",
- "github": "en/dev-tools.md",
- "value": null,
- "posts": [
- {
- "title": "Setup Local Devnet",
- "slug": "en+setup-local-dev-net",
- "github": "en/local-dev-net.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Jaeger Tracing",
- "slug": "en+dev-tools-jaeger-tracing",
- "github": "en/dev-tools-jaeger-tracing.md",
- "value": null,
- "posts": []
- }
- ]
- },
- {
- "title": "FAQs",
- "slug": "en+faqs",
- "github": "en/faqs.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Glossary",
- "slug": "en+glossary",
- "github": "en/.glossary.json",
- "value": null,
- "custom": {
- "glossary": true
- },
- "posts": []
}
]
}
diff --git a/documentation/en/README.md b/documentation/en/README.md
new file mode 100644
index 000000000..76f11ed90
--- /dev/null
+++ b/documentation/en/README.md
@@ -0,0 +1,16 @@
+# Lotus documentation
+
+This folder contains some Lotus documentation mostly intended for Lotus developers.
+
+User documentation (including documentation for miners) has been moved to specific Lotus sections in https://docs.filecoin.io:
+
+- https://docs.filecoin.io/get-started/lotus
+- https://docs.filecoin.io/store/lotus
+- https://docs.filecoin.io/mine/lotus
+- https://docs.filecoin.io/build/lotus
+
+## The Lotu.sh site
+
+The https://lotu.sh and https://docs.lotu.sh sites are generated from this folder based on the index provided by [.library.json](.library.json). This is done at the [lotus-docs repository](https://github.com/filecoin-project/lotus-docs), which contains Lotus as a git submodule.
+
+To update the site, the lotus-docs repository should be updated with the desired version for the lotus git submodule. Once pushed to master, it will be auto-deployed.
diff --git a/documentation/en/dev/WIP-arch-complementary-notes.md b/documentation/en/WIP-arch-complementary-notes.md
similarity index 100%
rename from documentation/en/dev/WIP-arch-complementary-notes.md
rename to documentation/en/WIP-arch-complementary-notes.md
diff --git a/documentation/en/about.md b/documentation/en/about.md
new file mode 100644
index 000000000..f2051e00b
--- /dev/null
+++ b/documentation/en/about.md
@@ -0,0 +1,19 @@
+# Lotus
+
+Lotus is an implementation of the **Filecoin Distributed Storage Network**.
+
+It is written in Go and provides a suite of command-line applications:
+
+- Lotus Node (`lotus`): a Filecoin Node: validates network transactions, manages a FIL wallet, can perform storage and retrieval deals.
+- Lotus Miner (`lotus-miner`): a Filecoin miner. See the the respective Lotus Miner section in the Mine documentation.
+- Lotus Worker (`lotus-worker`): a worker that assists miners to perform mining-related tasks. See its respective guide for more information.
+
+The [Lotus user documentation](https://docs.filecoin.io/get-started/lotus) is part of the [Filecoin documentation site](https://docs.filecoin.io):
+
+* To install and get started with Lotus, visit the [Get Started section](https://docs.filecoin.io/get-started/lotus).
+* Information about how to perform deals on the Filecoin network using Lotus can be found in the [Store section](https://docs.filecoin.io/store/lotus).
+* Miners looking to provide storage to the Network can find the latest guides in the [Mine section](https://docs.filecoin.io/mine/lotus).
+* Developers and integrators that wish to use the Lotus APIs can start in the [Build section](https://docs.filecoin.io/mine/lotus).
+
+For more details about Filecoin, check out the [Filecoin Docs](https://docs.filecoin.io) and [Filecoin Spec](https://spec.filecoin.io/).
+
diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md
index bba212d45..8a288a4bf 100644
--- a/documentation/en/api-methods.md
+++ b/documentation/en/api-methods.md
@@ -9,6 +9,7 @@
* [Beacon](#Beacon)
* [BeaconGetEntry](#BeaconGetEntry)
* [Chain](#Chain)
+ * [ChainDeleteObj](#ChainDeleteObj)
* [ChainExport](#ChainExport)
* [ChainGetBlock](#ChainGetBlock)
* [ChainGetBlockMessages](#ChainGetBlockMessages)
@@ -46,8 +47,11 @@
* [ClientQueryAsk](#ClientQueryAsk)
* [ClientRemoveImport](#ClientRemoveImport)
* [ClientRetrieve](#ClientRetrieve)
+ * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds)
* [ClientRetrieveWithEvents](#ClientRetrieveWithEvents)
* [ClientStartDeal](#ClientStartDeal)
+* [Create](#Create)
+ * [CreateBackup](#CreateBackup)
* [Gas](#Gas)
* [GasEstimateFeeCap](#GasEstimateFeeCap)
* [GasEstimateGasLimit](#GasEstimateGasLimit)
@@ -70,14 +74,20 @@
* [MpoolPending](#MpoolPending)
* [MpoolPush](#MpoolPush)
* [MpoolPushMessage](#MpoolPushMessage)
+ * [MpoolPushUntrusted](#MpoolPushUntrusted)
* [MpoolSelect](#MpoolSelect)
* [MpoolSetConfig](#MpoolSetConfig)
* [MpoolSub](#MpoolSub)
* [Msig](#Msig)
+ * [MsigAddApprove](#MsigAddApprove)
+ * [MsigAddCancel](#MsigAddCancel)
+ * [MsigAddPropose](#MsigAddPropose)
* [MsigApprove](#MsigApprove)
* [MsigCancel](#MsigCancel)
* [MsigCreate](#MsigCreate)
* [MsigGetAvailableBalance](#MsigGetAvailableBalance)
+ * [MsigGetVested](#MsigGetVested)
+ * [MsigGetVestingSchedule](#MsigGetVestingSchedule)
* [MsigPropose](#MsigPropose)
* [MsigSwapApprove](#MsigSwapApprove)
* [MsigSwapCancel](#MsigSwapCancel)
@@ -98,6 +108,7 @@
* [Paych](#Paych)
* [PaychAllocateLane](#PaychAllocateLane)
* [PaychAvailableFunds](#PaychAvailableFunds)
+ * [PaychAvailableFundsByFromTo](#PaychAvailableFundsByFromTo)
* [PaychCollect](#PaychCollect)
* [PaychGet](#PaychGet)
* [PaychGetWaitReady](#PaychGetWaitReady)
@@ -142,7 +153,9 @@
* [StateMinerRecoveries](#StateMinerRecoveries)
* [StateMinerSectorCount](#StateMinerSectorCount)
* [StateMinerSectors](#StateMinerSectors)
+ * [StateMsgGasCost](#StateMsgGasCost)
* [StateNetworkName](#StateNetworkName)
+ * [StateNetworkVersion](#StateNetworkVersion)
* [StateReadState](#StateReadState)
* [StateReplay](#StateReplay)
* [StateSearchMsg](#StateSearchMsg)
@@ -151,13 +164,18 @@
* [StateSectorPartition](#StateSectorPartition)
* [StateSectorPreCommitInfo](#StateSectorPreCommitInfo)
* [StateVerifiedClientStatus](#StateVerifiedClientStatus)
+ * [StateVerifiedRegistryRootKey](#StateVerifiedRegistryRootKey)
+ * [StateVerifierStatus](#StateVerifierStatus)
* [StateWaitMsg](#StateWaitMsg)
* [Sync](#Sync)
* [SyncCheckBad](#SyncCheckBad)
+ * [SyncCheckpoint](#SyncCheckpoint)
* [SyncIncomingBlocks](#SyncIncomingBlocks)
* [SyncMarkBad](#SyncMarkBad)
* [SyncState](#SyncState)
* [SyncSubmitBlock](#SyncSubmitBlock)
+ * [SyncUnmarkBad](#SyncUnmarkBad)
+ * [SyncValidateTipset](#SyncValidateTipset)
* [Wallet](#Wallet)
* [WalletBalance](#WalletBalance)
* [WalletDefaultAddress](#WalletDefaultAddress)
@@ -170,6 +188,7 @@
* [WalletSetDefault](#WalletSetDefault)
* [WalletSign](#WalletSign)
* [WalletSignMessage](#WalletSignMessage)
+ * [WalletValidateAddress](#WalletValidateAddress)
* [WalletVerify](#WalletVerify)
##
@@ -203,7 +222,7 @@ Response:
```json
{
"Version": "string value",
- "APIVersion": 3584,
+ "APIVersion": 4096,
"BlockDelay": 42
}
```
@@ -271,11 +290,29 @@ The Chain method group contains methods for interacting with the
blockchain, but that do not require any form of state computation.
+### ChainDeleteObj
+ChainDeleteObj deletes node referenced by the given CID
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `{}`
+
### ChainExport
ChainExport returns a stream of bytes with CAR dump of chain data.
The exported chain data includes the header chain from the given tipset
back to genesis, the entire genesis state, and the most recent 'nroots'
state trees.
+If oldmsgskip is set, messages from before the requested roots are also not included.
Perms: read
@@ -284,6 +321,7 @@ Inputs:
```json
[
10101,
+ true,
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -315,7 +353,7 @@ Inputs:
Response:
```json
{
- "Miner": "t01234",
+ "Miner": "f01234",
"Ticket": {
"VRFProof": "Ynl0ZSBhcnJheQ=="
},
@@ -412,8 +450,8 @@ Response:
```json
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -905,7 +943,7 @@ Response:
},
"State": 42,
"Message": "string value",
- "Provider": "t01234",
+ "Provider": "f01234",
"DataRef": {
"TransferType": "string value",
"Root": {
@@ -921,7 +959,8 @@ Response:
"PricePerEpoch": "0",
"Duration": 42,
"DealID": 5432,
- "CreationTime": "0001-01-01T00:00:00Z"
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "Verified": true
}
```
@@ -941,7 +980,7 @@ Response:
},
"State": 42,
"Message": "string value",
- "Provider": "t01234",
+ "Provider": "f01234",
"DataRef": {
"TransferType": "string value",
"Root": {
@@ -957,7 +996,8 @@ Response:
"PricePerEpoch": "0",
"Duration": 42,
"DealID": 5432,
- "CreationTime": "0001-01-01T00:00:00Z"
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "Verified": true
}
```
@@ -1043,7 +1083,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
@@ -1064,9 +1104,9 @@ Response:
"UnsealPrice": "0",
"PaymentInterval": 42,
"PaymentIntervalIncrease": 42,
- "Miner": "t01234",
+ "Miner": "f01234",
"MinerPeer": {
- "Address": "t01234",
+ "Address": "f01234",
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"PieceCID": null
}
@@ -1083,27 +1123,21 @@ Inputs:
```json
[
"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
- "t01234"
+ "f01234"
]
```
Response:
```json
{
- "Ask": {
- "Price": "0",
- "VerifiedPrice": "0",
- "MinPieceSize": 1032,
- "MaxPieceSize": 1032,
- "Miner": "t01234",
- "Timestamp": 10101,
- "Expiry": 10101,
- "SeqNo": 42
- },
- "Signature": {
- "Type": 2,
- "Data": "Ynl0ZSBhcnJheQ=="
- }
+ "Price": "0",
+ "VerifiedPrice": "0",
+ "MinPieceSize": 1032,
+ "MaxPieceSize": 1032,
+ "Miner": "f01234",
+ "Timestamp": 10101,
+ "Expiry": 10101,
+ "SeqNo": 42
}
```
@@ -1141,10 +1175,10 @@ Inputs:
"UnsealPrice": "0",
"PaymentInterval": 42,
"PaymentIntervalIncrease": 42,
- "Client": "t01234",
- "Miner": "t01234",
+ "Client": "f01234",
+ "Miner": "f01234",
"MinerPeer": {
- "Address": "t01234",
+ "Address": "f01234",
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"PieceCID": null
}
@@ -1158,6 +1192,22 @@ Inputs:
Response: `{}`
+### ClientRetrieveTryRestartInsufficientFunds
+ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
+which are stuck due to insufficient funds
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ "f01234"
+]
+```
+
+Response: `{}`
+
### ClientRetrieveWithEvents
ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
of status updates.
@@ -1178,10 +1228,10 @@ Inputs:
"UnsealPrice": "0",
"PaymentInterval": 42,
"PaymentIntervalIncrease": 42,
- "Client": "t01234",
- "Miner": "t01234",
+ "Client": "f01234",
+ "Miner": "f01234",
"MinerPeer": {
- "Address": "t01234",
+ "Address": "f01234",
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"PieceCID": null
}
@@ -1222,8 +1272,8 @@ Inputs:
"PieceCid": null,
"PieceSize": 1024
},
- "Wallet": "t01234",
- "Miner": "t01234",
+ "Wallet": "f01234",
+ "Miner": "f01234",
"EpochPrice": "0",
"MinBlocksDuration": 42,
"ProviderCollateral": "0",
@@ -1236,6 +1286,27 @@ Inputs:
Response: `null`
+## Create
+
+
+### CreateBackup
+CreateBackup creates node backup onder the specified file name. The
+method requires that the lotus daemon is running with the
+LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
+the path specified when calling CreateBackup is within the base path
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `{}`
+
## Gas
@@ -1250,8 +1321,8 @@ Inputs:
[
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1286,8 +1357,8 @@ Inputs:
[
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1320,7 +1391,7 @@ Inputs:
```json
[
42,
- "t01234",
+ "f01234",
9,
[
{
@@ -1346,8 +1417,8 @@ Inputs:
[
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1374,8 +1445,8 @@ Response:
```json
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1437,8 +1508,8 @@ Perms: sign
Inputs:
```json
[
- "t01234",
- "t01234",
+ "f01234",
+ "f01234",
"0"
]
```
@@ -1462,7 +1533,7 @@ Inputs:
```json
[
{
- "Miner": "t01234",
+ "Miner": "f01234",
"Parents": [
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -1491,7 +1562,7 @@ Response:
```json
{
"Header": {
- "Miner": "t01234",
+ "Miner": "f01234",
"Ticket": {
"VRFProof": "Ynl0ZSBhcnJheQ=="
},
@@ -1538,7 +1609,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
10101,
[
{
@@ -1557,14 +1628,14 @@ Response:
"MinerPower": "0",
"NetworkPower": "0",
"Sectors": null,
- "WorkerKey": "t01234",
+ "WorkerKey": "f01234",
"SectorSize": 34359738368,
"PrevBeaconEntry": {
"Round": 42,
"Data": "Ynl0ZSBhcnJheQ=="
},
"BeaconEntries": null,
- "HasMinPower": true
+ "EligibleForMining": true
}
```
@@ -1618,7 +1689,7 @@ Perms: read
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -1658,8 +1729,8 @@ Inputs:
{
"Message": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1699,8 +1770,8 @@ Inputs:
[
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1720,8 +1791,8 @@ Response:
{
"Message": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1737,6 +1808,43 @@ Response:
}
```
+### MpoolPushUntrusted
+MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ=="
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ }
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
### MpoolSelect
MpoolSelect returns a list of pending messages for inclusion in the next block
@@ -1796,8 +1904,8 @@ Response:
"Message": {
"Message": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -1819,6 +1927,84 @@ The Msig methods are used to interact with multisig wallets on the
filecoin network
+### MsigAddApprove
+MsigAddApprove approves a previously proposed AddSigner message
+It takes the following params: , , ,
+, ,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ "f01234",
+ true
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MsigAddCancel
+MsigAddCancel cancels a previously proposed AddSigner message
+It takes the following params: , , ,
+,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ true
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MsigAddPropose
+MsigAddPropose proposes adding a signer in the multisig
+It takes the following params: , ,
+,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ "f01234",
+ true
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
### MsigApprove
MsigApprove approves a previously-proposed multisig message
It takes the following params: , , , , ,
@@ -1830,12 +2016,12 @@ Perms: sign
Inputs:
```json
[
- "t01234",
+ "f01234",
42,
- "t01234",
- "t01234",
+ "f01234",
+ "f01234",
"0",
- "t01234",
+ "f01234",
42,
"Ynl0ZSBhcnJheQ=="
]
@@ -1859,11 +2045,11 @@ Perms: sign
Inputs:
```json
[
- "t01234",
+ "f01234",
42,
- "t01234",
+ "f01234",
"0",
- "t01234",
+ "f01234",
42,
"Ynl0ZSBhcnJheQ=="
]
@@ -1891,7 +2077,7 @@ Inputs:
null,
10101,
"0",
- "t01234",
+ "f01234",
"0"
]
```
@@ -1912,7 +2098,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -1926,6 +2112,69 @@ Inputs:
Response: `"0"`
+### MsigGetVested
+MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
+It takes the following params: , ,
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+### MsigGetVestingSchedule
+MsigGetLockedBalance returns the locked balance of an msig at a vien epoch.
+The return may be greater than the multisig actor's actual balance.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+```json
+{
+ "InitialBalance": "0",
+ "StartEpoch": 10101,
+ "UnlockDuration": 10101
+}
+```
+
### MsigPropose
MsigPropose proposes a multisig message
It takes the following params: , , ,
@@ -1937,10 +2186,10 @@ Perms: sign
Inputs:
```json
[
- "t01234",
- "t01234",
+ "f01234",
+ "f01234",
"0",
- "t01234",
+ "f01234",
42,
"Ynl0ZSBhcnJheQ=="
]
@@ -1956,7 +2205,7 @@ Response:
### MsigSwapApprove
MsigSwapApprove approves a previously proposed SwapSigner
It takes the following params: , , ,
-,
+, ,
Perms: sign
@@ -1964,12 +2213,12 @@ Perms: sign
Inputs:
```json
[
- "t01234",
- "t01234",
+ "f01234",
+ "f01234",
42,
- "t01234",
- "t01234",
- "t01234"
+ "f01234",
+ "f01234",
+ "f01234"
]
```
@@ -1983,7 +2232,7 @@ Response:
### MsigSwapCancel
MsigSwapCancel cancels a previously proposed SwapSigner message
It takes the following params: , , ,
-
+,
Perms: sign
@@ -1991,11 +2240,11 @@ Perms: sign
Inputs:
```json
[
- "t01234",
- "t01234",
+ "f01234",
+ "f01234",
42,
- "t01234",
- "t01234"
+ "f01234",
+ "f01234"
]
```
@@ -2009,7 +2258,7 @@ Response:
### MsigSwapPropose
MsigSwapPropose proposes swapping 2 signers in the multisig
It takes the following params: , ,
-
+,
Perms: sign
@@ -2017,10 +2266,10 @@ Perms: sign
Inputs:
```json
[
- "t01234",
- "t01234",
- "t01234",
- "t01234"
+ "f01234",
+ "f01234",
+ "f01234",
+ "f01234"
]
```
@@ -2228,7 +2477,7 @@ Perms: sign
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -2242,7 +2491,7 @@ Perms: sign
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -2250,6 +2499,35 @@ Response:
```json
{
"Channel": "\u003cempty\u003e",
+ "From": "f01234",
+ "To": "f01234",
+ "ConfirmedAmt": "0",
+ "PendingAmt": "0",
+ "PendingWaitSentinel": null,
+ "QueuedAmt": "0",
+ "VoucherReedeemedAmt": "0"
+}
+```
+
+### PaychAvailableFundsByFromTo
+There are not yet any comments for this method.
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234"
+]
+```
+
+Response:
+```json
+{
+ "Channel": "\u003cempty\u003e",
+ "From": "f01234",
+ "To": "f01234",
"ConfirmedAmt": "0",
"PendingAmt": "0",
"PendingWaitSentinel": null,
@@ -2266,7 +2544,7 @@ Perms: sign
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -2285,8 +2563,8 @@ Perms: sign
Inputs:
```json
[
- "t01234",
- "t01234",
+ "f01234",
+ "f01234",
"0"
]
```
@@ -2294,7 +2572,7 @@ Inputs:
Response:
```json
{
- "Channel": "t01234",
+ "Channel": "f01234",
"WaitSentinel": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
@@ -2315,7 +2593,7 @@ Inputs:
]
```
-Response: `"t01234"`
+Response: `"f01234"`
### PaychList
There are not yet any comments for this method.
@@ -2334,8 +2612,8 @@ Perms: sign
Inputs:
```json
[
- "t01234",
- "t01234",
+ "f01234",
+ "f01234",
null
]
```
@@ -2343,7 +2621,7 @@ Inputs:
Response:
```json
{
- "Channel": "t01234",
+ "Channel": "f01234",
"WaitSentinel": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
@@ -2359,7 +2637,7 @@ Perms: sign
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -2378,14 +2656,14 @@ Perms: read
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
Response:
```json
{
- "ControlAddr": "t01234",
+ "ControlAddr": "f01234",
"Direction": 1
}
```
@@ -2398,14 +2676,14 @@ Perms: write
Inputs:
```json
[
- "t01234",
+ "f01234",
{
- "ChannelAddr": "t01234",
+ "ChannelAddr": "f01234",
"TimeLockMin": 10101,
"TimeLockMax": 10101,
"SecretPreimage": "Ynl0ZSBhcnJheQ==",
"Extra": {
- "Actor": "t01234",
+ "Actor": "f01234",
"Method": 1,
"Data": "Ynl0ZSBhcnJheQ=="
},
@@ -2434,14 +2712,14 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
{
- "ChannelAddr": "t01234",
+ "ChannelAddr": "f01234",
"TimeLockMin": 10101,
"TimeLockMax": 10101,
"SecretPreimage": "Ynl0ZSBhcnJheQ==",
"Extra": {
- "Actor": "t01234",
+ "Actor": "f01234",
"Method": 1,
"Data": "Ynl0ZSBhcnJheQ=="
},
@@ -2470,14 +2748,14 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
{
- "ChannelAddr": "t01234",
+ "ChannelAddr": "f01234",
"TimeLockMin": 10101,
"TimeLockMax": 10101,
"SecretPreimage": "Ynl0ZSBhcnJheQ==",
"Extra": {
- "Actor": "t01234",
+ "Actor": "f01234",
"Method": 1,
"Data": "Ynl0ZSBhcnJheQ=="
},
@@ -2504,7 +2782,7 @@ Perms: sign
Inputs:
```json
[
- "t01234",
+ "f01234",
"0",
42
]
@@ -2514,12 +2792,12 @@ Response:
```json
{
"Voucher": {
- "ChannelAddr": "t01234",
+ "ChannelAddr": "f01234",
"TimeLockMin": 10101,
"TimeLockMax": 10101,
"SecretPreimage": "Ynl0ZSBhcnJheQ==",
"Extra": {
- "Actor": "t01234",
+ "Actor": "f01234",
"Method": 1,
"Data": "Ynl0ZSBhcnJheQ=="
},
@@ -2545,7 +2823,7 @@ Perms: write
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -2559,14 +2837,14 @@ Perms: sign
Inputs:
```json
[
- "t01234",
+ "f01234",
{
- "ChannelAddr": "t01234",
+ "ChannelAddr": "f01234",
"TimeLockMin": 10101,
"TimeLockMax": 10101,
"SecretPreimage": "Ynl0ZSBhcnJheQ==",
"Extra": {
- "Actor": "t01234",
+ "Actor": "f01234",
"Method": 1,
"Data": "Ynl0ZSBhcnJheQ=="
},
@@ -2607,7 +2885,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -2619,7 +2897,7 @@ Inputs:
]
```
-Response: `"t01234"`
+Response: `"f01234"`
### StateAllMinerFaults
StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
@@ -2655,8 +2933,8 @@ Inputs:
[
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -2681,8 +2959,8 @@ Response:
{
"Msg": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -2699,8 +2977,8 @@ Response:
"ExecutionTrace": {
"Msg": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -2863,7 +3141,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -2954,8 +3232,8 @@ Inputs:
[
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -3009,7 +3287,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3021,7 +3299,7 @@ Inputs:
]
```
-Response: `"t01234"`
+Response: `"f01234"`
### StateMarketBalance
StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
@@ -3032,7 +3310,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3082,8 +3360,8 @@ Response:
},
"PieceSize": 1032,
"VerifiedDeal": true,
- "Client": "t01234",
- "Provider": "t01234",
+ "Client": "f01234",
+ "Provider": "f01234",
"Label": "string value",
"StartEpoch": 10101,
"EndEpoch": 10101,
@@ -3160,8 +3438,8 @@ Response:
},
"PieceSize": 1032,
"VerifiedDeal": true,
- "Client": "t01234",
- "Provider": "t01234",
+ "Client": "f01234",
+ "Provider": "f01234",
"Label": "string value",
"StartEpoch": 10101,
"EndEpoch": 10101,
@@ -3186,7 +3464,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3209,7 +3487,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3232,7 +3510,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3255,7 +3533,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3284,7 +3562,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3299,16 +3577,17 @@ Inputs:
Response:
```json
{
- "Owner": "t01234",
- "Worker": "t01234",
- "NewWorker": "t01234",
+ "Owner": "f01234",
+ "Worker": "f01234",
+ "NewWorker": "f01234",
"ControlAddresses": null,
"WorkerChangeEpoch": 10101,
"PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Multiaddrs": null,
"SealProofType": 3,
"SectorSize": 34359738368,
- "WindowPoStPartitionSectors": 42
+ "WindowPoStPartitionSectors": 42,
+ "ConsensusFaultElapsed": 10101
}
```
@@ -3321,7 +3600,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
{
"SealProof": 3,
"SectorNumber": 9,
@@ -3350,7 +3629,7 @@ Inputs:
Response: `"0"`
### StateMinerPartitions
-StateMinerPartitions loads miner partitions for the specified miner/deadline
+StateMinerPartitions returns all partitions in the specified deadline
Perms: read
@@ -3358,7 +3637,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
42,
[
{
@@ -3382,7 +3661,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3404,7 +3683,8 @@ Response:
"TotalPower": {
"RawBytePower": "0",
"QualityAdjPower": "0"
- }
+ },
+ "HasMinPower": true
}
```
@@ -3417,7 +3697,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
{
"SealProof": 3,
"SectorNumber": 9,
@@ -3455,7 +3735,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3476,7 +3756,12 @@ Response:
"Open": 10101,
"Close": 10101,
"Challenge": 10101,
- "FaultCutoff": 10101
+ "FaultCutoff": 10101,
+ "WPoStPeriodDeadlines": 42,
+ "WPoStProvingPeriod": 10101,
+ "WPoStChallengeWindow": 10101,
+ "WPoStChallengeLookback": 10101,
+ "FaultDeclarationCutoff": 10101
}
```
@@ -3489,7 +3774,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3518,7 +3803,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3533,15 +3818,14 @@ Inputs:
Response:
```json
{
- "Sectors": 42,
- "Active": 42
+ "Live": 42,
+ "Active": 42,
+ "Faulty": 42
}
```
### StateMinerSectors
StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
-If the filterOut boolean is set to true, any sectors in the filter are excluded.
-If false, only those sectors in the filter are included.
Perms: read
@@ -3549,11 +3833,10 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
0
],
- true,
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3567,6 +3850,45 @@ Inputs:
Response: `null`
+### StateMsgGasCost
+StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "GasUsed": "0",
+ "BaseFeeBurn": "0",
+ "OverEstimationBurn": "0",
+ "MinerPenalty": "0",
+ "MinerTip": "0",
+ "Refund": "0",
+ "TotalCost": "0"
+}
+```
+
### StateNetworkName
StateNetworkName returns the name of the network the node is synced to
@@ -3577,6 +3899,28 @@ Inputs: `null`
Response: `"lotus"`
+### StateNetworkVersion
+StateNetworkVersion returns the network version at the given tipset
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `4`
+
### StateReadState
StateReadState returns the indicated actor's state.
@@ -3586,7 +3930,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3634,8 +3978,8 @@ Response:
{
"Msg": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -3652,8 +3996,8 @@ Response:
"ExecutionTrace": {
"Msg": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -3725,7 +4069,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
9,
[
{
@@ -3757,7 +4101,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
9,
[
{
@@ -3798,7 +4142,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
9,
[
{
@@ -3828,7 +4172,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
9,
[
{
@@ -3876,7 +4220,54 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+### StateVerifiedRegistryRootKey
+StateVerifiedClientStatus returns the address of the Verified Registry's root key
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"f01234"`
+
+### StateVerifierStatus
+StateVerifierStatus returns the data cap for the given address.
+Returns nil if there is no entry in the data cap table for the
+address.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3954,6 +4345,28 @@ Inputs:
Response: `"string value"`
+### SyncCheckpoint
+SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `{}`
+
### SyncIncomingBlocks
SyncIncomingBlocks returns a channel streaming incoming, potentially not
yet synced block headers.
@@ -3966,7 +4379,7 @@ Inputs: `null`
Response:
```json
{
- "Miner": "t01234",
+ "Miner": "f01234",
"Ticket": {
"VRFProof": "Ynl0ZSBhcnJheQ=="
},
@@ -4031,7 +4444,8 @@ Inputs: `null`
Response:
```json
{
- "ActiveSyncs": null
+ "ActiveSyncs": null,
+ "VMApplied": 42
}
```
@@ -4047,7 +4461,7 @@ Inputs:
[
{
"Header": {
- "Miner": "t01234",
+ "Miner": "f01234",
"Ticket": {
"VRFProof": "Ynl0ZSBhcnJheQ=="
},
@@ -4089,6 +4503,45 @@ Inputs:
Response: `{}`
+### SyncUnmarkBad
+SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `{}`
+
+### SyncValidateTipset
+SyncValidateTipset indicates whether the provided tipset is valid or not
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `true`
+
## Wallet
@@ -4101,7 +4554,7 @@ Perms: read
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -4115,7 +4568,7 @@ Perms: write
Inputs: `null`
-Response: `"t01234"`
+Response: `"f01234"`
### WalletDelete
WalletDelete deletes an address from the wallet.
@@ -4126,7 +4579,7 @@ Perms: write
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -4141,7 +4594,7 @@ Perms: admin
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -4162,7 +4615,7 @@ Perms: write
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -4184,7 +4637,7 @@ Inputs:
]
```
-Response: `"t01234"`
+Response: `"f01234"`
### WalletList
WalletList lists all the addresses in the wallet.
@@ -4209,7 +4662,7 @@ Inputs:
]
```
-Response: `"t01234"`
+Response: `"f01234"`
### WalletSetDefault
WalletSetDefault marks the given address as as the default one.
@@ -4220,7 +4673,7 @@ Perms: admin
Inputs:
```json
[
- "t01234"
+ "f01234"
]
```
@@ -4235,7 +4688,7 @@ Perms: sign
Inputs:
```json
[
- "t01234",
+ "f01234",
"Ynl0ZSBhcnJheQ=="
]
```
@@ -4257,11 +4710,11 @@ Perms: sign
Inputs:
```json
[
- "t01234",
+ "f01234",
{
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -4278,8 +4731,8 @@ Response:
{
"Message": {
"Version": 42,
- "To": "t01234",
- "From": "t01234",
+ "To": "f01234",
+ "From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
@@ -4295,6 +4748,21 @@ Response:
}
```
+### WalletValidateAddress
+WalletValidateAddress validates whether a given string can be decoded as a well-formed address
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `"f01234"`
+
### WalletVerify
WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
The address does not have to be in the wallet.
@@ -4305,7 +4773,7 @@ Perms: read
Inputs:
```json
[
- "t01234",
+ "f01234",
"Ynl0ZSBhcnJheQ==",
{
"Type": 2,
diff --git a/documentation/en/api-scripting-support.md b/documentation/en/api-scripting-support.md
deleted file mode 100644
index 653f144ed..000000000
--- a/documentation/en/api-scripting-support.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Remote API Support
-
-You may want to delegate the work **Lotus Miner** or **Lotus Node** performs to other machines.
-Here is how to setup the necessary authorization and environment variables.
-
-## Environment variables
-
-Environmental variables are variables that are defined for the current shell and are inherited by any child shells or processes. Environmental variables are used to pass information into processes that are spawned from the shell.
-
-Using the [JWT you generated](https://lotu.sh/en+api#how-do-i-generate-a-token-18865), you can assign it and the **multiaddr** to the appropriate environment variable.
-
-```sh
-# Lotus Node
-FULLNODE_API_INFO="JWT_TOKEN:/ip4/127.0.0.1/tcp/1234/http"
-
-# Lotus Miner
-MINER_API_INFO="JWT_TOKEN:/ip4/127.0.0.1/tcp/2345/http"
-```
-
-You can also use `lotus auth api-info --perm admin` to quickly create _API_INFO env vars
-
-- The **Lotus Node**'s `mutliaddr` is in `~/.lotus/api`.
-- The default token is in `~/.lotus/token`.
-- The **Lotus Miner**'s `multiaddr` is in `~/.lotusminer/config`.
-- The default token is in `~/.lotusminer/token`.
diff --git a/documentation/en/api-troubleshooting.md b/documentation/en/api-troubleshooting.md
deleted file mode 100644
index 0cb3a6800..000000000
--- a/documentation/en/api-troubleshooting.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# API Troubleshooting
-
-## Types: params
-
-`params` must be an array. If there are no `params` you should still pass an empty array.
-
-## Types: TipSet
-
-For methods such as `Filecoin.StateMinerPower`, where the method accepts the argument of the type `TipSet`, you can pass `null` to use the current chain head.
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method": "Filecoin.StateMinerPower", "params": ["t0101", null], "id": 3 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-## Types: Sending a CID
-
-If you do not serialize the CID as a [JSON IPLD link](https://did-ipid.github.io/ipid-did-method/#txref), you will receive an error. Here is an example of a broken CURL request:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method":"Filecoin.ClientGetDealInfo", "params": ["bafyreiaxl446wlnu6t6dpq4ivrjf4gda4gvsoi4rr6mpxau7z25xvk5pl4"], "id": 0 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-To fix it, change the `params` property to:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method":"Filecoin.ClientGetDealInfo", "params": [{"/": "bafyreiaxl446wlnu6t6dpq4ivrjf4gda4gvsoi4rr6mpxau7z25xvk5pl4"}], "id": 0 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
diff --git a/documentation/en/api.md b/documentation/en/api.md
deleted file mode 100644
index 9760e2f32..000000000
--- a/documentation/en/api.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# API
-
-Here is an early overview of how to make API calls.
-
-Implementation details for the **JSON-RPC** package are [here](https://github.com/filecoin-project/go-jsonrpc).
-
-## Overview: How do you modify the config.toml to change the API endpoint?
-
-API requests are made against `127.0.0.1:1234` unless you modify `.lotus/config.toml`.
-
-Options:
-
-- `http://[api:port]/rpc/v0` - HTTP endpoint
-- `ws://[api:port]/rpc/v0` - Websocket endpoint
-- `PUT http://[api:port]/rest/v0/import` - File import, it requires write permissions.
-
-## What methods can I use?
-
-For now, you can look into different files to find methods available to you based on your needs:
-
-- [Both Lotus node + miner APIs](https://github.com/filecoin-project/lotus/blob/master/api/api_common.go)
-- [Lotus node API](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go)
-- [Lotus miner API](https://github.com/filecoin-project/lotus/blob/master/api/api_storage.go)
-
-The necessary permissions for each are in [api/struct.go](https://github.com/filecoin-project/lotus/blob/master/api/struct.go).
-
-## How do I make an API request?
-
-To demonstrate making an API request, we will take the method `ChainHead` from [api/api_full.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go).
-
-```go
-ChainHead(context.Context) (*types.TipSet, error)
-```
-
-And create a CURL command. In this command, `ChainHead` is included as `{ "method": "Filecoin.ChainHead" }`:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-If the request requires authorization, add an authorization header:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer $(cat ~/.lotusminer/token)" \
- --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-> In the future we will add a playground to make it easier to build and experiment with API requests.
-
-## CURL authorization
-
-To authorize your request, you will need to include the **JWT** in a HTTP header, for example:
-
-```sh
--H "Authorization: Bearer $(cat ~/.lotusminer/token)"
-```
-
-Admin token is stored in `~/.lotus/token` for the **Lotus Node** or `~/.lotusminer/token` for the **Lotus Miner**.
-
-## How do I generate a token?
-
-To generate a JWT with custom permissions, use this command:
-
-```sh
-# Lotus Node
-lotus auth create-token --perm admin
-
-# Lotus Miner
-lotus-miner auth create-token --perm admin
-```
-
-## What authorization level should I use?
-
-When viewing [api/apistruct/struct.go](https://github.com/filecoin-project/lotus/blob/master/api/apistruct/struct.go), you will encounter these types:
-
-- `read` - Read node state, no private data.
-- `write` - Write to local store / chain, and `read` permissions.
-- `sign` - Use private keys stored in wallet for signing, `read` and `write` permissions.
-- `admin` - Manage permissions, `read`, `write`, and `sign` permissions.
diff --git a/documentation/en/architecture.md b/documentation/en/architecture/architecture.md
similarity index 99%
rename from documentation/en/architecture.md
rename to documentation/en/architecture/architecture.md
index 619e04f05..61cd117bb 100644
--- a/documentation/en/architecture.md
+++ b/documentation/en/architecture/architecture.md
@@ -6,7 +6,7 @@ Filecoin protocol, validating the blocks and state transitions.
The specification for the Filecoin protocol can be found [here](https://filecoin-project.github.io/specs/).
For information on how to setup and operate a Lotus node,
-please follow the instructions [here](https://lotu.sh/en+getting-started).
+please follow the instructions [here](en+getting-started).
# Components
@@ -259,7 +259,7 @@ When we launch a Lotus node with the command `./lotus daemon`
(see [here](https://github.com/filecoin-project/lotus/blob/master/cmd/lotus/daemon.go) for more),
the node is created through [dependency injection](https://godoc.org/go.uber.org/fx).
This relies on reflection, which makes some of the references hard to follow.
-The node sets up all of the subsystems it needs to run, such as the repository, the network connections, thechain sync
+The node sets up all of the subsystems it needs to run, such as the repository, the network connections, the chain sync
service, etc.
This setup is orchestrated through calls to the `node.Override` function.
The structure of each call indicates the type of component it will set up
diff --git a/documentation/en/mpool.md b/documentation/en/architecture/mpool.md
similarity index 100%
rename from documentation/en/mpool.md
rename to documentation/en/architecture/mpool.md
diff --git a/documentation/en/cli.md b/documentation/en/cli.md
deleted file mode 100644
index fd26400d0..000000000
--- a/documentation/en/cli.md
+++ /dev/null
@@ -1,108 +0,0 @@
-# Lotus Command Line Interface
-
-The Command Line Interface (CLI) is a convenient way to interact with
-a Lotus node. You can use the CLI to operate your node,
-get information about the blockchain,
-manage your accounts and transfer funds,
-create storage deals, and much more!
-
-The CLI is intended to be self-documenting, so when in doubt, simply add `--help`
-to whatever command you're trying to run! This will also display all of the
-input parameters that can be provided to a command.
-
-We highlight some of the commonly
-used features of the CLI below.
-All CLI commands should be run from the home directory of the Lotus project.
-
-## Operating a Lotus node
-
-### Starting up a node
-
-```sh
-lotus daemon
-```
-This command will start up your Lotus node, with its API port open at 1234.
-You can pass `--api=` to use a different port.
-
-### Checking your sync progress
-
-```sh
-lotus sync status
-```
-This command will print your current tipset height under `Height`, and the target tipset height
-under `Taregt`.
-
-You can also run `lotus sync wait` to get constant updates on your sync progress.
-
-### Getting the head tipset
-
-```sh
-lotus chain head
-```
-
-### Control the logging level
-
-```sh
-lotus log set-level
-```
-This command can be used to toggle the logging levels of the different
-systems of a Lotus node. In decreasing order
-of logging detail, the levels are `debug`, `info`, `warn`, and `error`.
-
-As an example,
-to set the `chain` and `blocksync` to log at the `debug` level, run
-`lotus log set-level --system chain --system blocksync debug`.
-
-To see the various logging system, run `lotus log list`.
-
-### Find out what version of Lotus you're running
-
-```sh
-lotus version
-```
-
-## Managing your accounts
-
-### Listing accounts in your wallet
-
-```sh
-lotus wallet list
-```
-
-### Creating a new account
-
-```sh
-lotus wallet new bls
-```
-This command will create a new BLS account in your wallet; these
-addresses start with the prefix `t3`. Running `lotus wallet new secp256k1`
-(or just `lotus wallet new`) will create
-a new Secp256k1 account, which begins with the prefix `t1`.
-
-### Getting an account's balance
-
-```sh
-lotus wallet balance
-```
-
-### Transferring funds
-
-```sh
-lotus send --source=
-```
-This command will transfer `amount` (in attoFIL) from `source address` to `destination address`.
-
-### Importing an account into your wallet
-
-```sh
-lotus wallet import
-```
-This command will import an account whose private key is saved at the specified file.
-
-### Exporting an account from your wallet
-
-```sh
-lotus wallet export
-```
-This command will print out the private key of the specified address
-if it is in your wallet. Always be careful with your private key!
diff --git a/documentation/en/dev/create-miner.md b/documentation/en/create-miner.md
similarity index 83%
rename from documentation/en/dev/create-miner.md
rename to documentation/en/create-miner.md
index 9a1cf2d4e..7b3b81765 100644
--- a/documentation/en/dev/create-miner.md
+++ b/documentation/en/create-miner.md
@@ -43,9 +43,9 @@ FIXME: Is there an easy way to visualize the message generated by the Faucet?
## Storage miner node
-The `lotus-storage-miner` command provides a set of tools to manage the miners associated with the local storage miner node. At this point it is important to note the different [node types](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go), in the previous document we always referred to a *single* local node, `FullNode`, which handled the sync process and any other communication with the Filecoin network (the term *full* stands for full validation of the consensus protocol, there are no *light* clients at the moment that do not do the full validation). We now create a new node of type [`StorageMiner`](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go), with its own repo (each node is always associated to its own repo), by default in `~/.lotusstorage`. The difference between the two nodes lies in the services they run (see build options in the main architecture document).
+The `lotus-miner` command provides a set of tools to manage the miners associated with the local storage miner node. At this point it is important to note the different [node types](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go), in the previous document we always referred to a *single* local node, `FullNode`, which handled the sync process and any other communication with the Filecoin network (the term *full* stands for full validation of the consensus protocol, there are no *light* clients at the moment that do not do the full validation). We now create a new node of type [`StorageMiner`](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go), with its own repo (each node is always associated to its own repo), by default in `~/.lotusstorage`. The difference between the two nodes lies in the services they run (see build options in the main architecture document).
-The `lotus-storage-miner init` command option creates a new storage miner node. We will only be able to run the command once the chain has been synced by the full node (which needs to be running) and it will also require the download of the [proof parameters](https://filecoin.io/blog/filecoin-proof-system/) (of several GBs, so it may take some time).
+The `lotus-miner init` command option creates a new storage miner node. We will only be able to run the command once the chain has been synced by the full node (which needs to be running) and it will also require the download of the [proof parameters](https://filecoin.io/blog/filecoin-proof-system/) (of several GBs, so it may take some time).
The main options that define a miner are the owner and worker addresses associated to it (stored in [`MinerInfo`](https://github.com/filecoin-project/specs-actors/blob/master/actors/builtin/miner/miner_state.go), a substructure of the Miner Actor State) and its peer ID. We use default values for all of these options in the command and briefly described them here:
@@ -55,7 +55,7 @@ The main options that define a miner are the owner and worker addresses associat
* [Peer ID](https://docs.libp2p.io/reference/glossary/#peerid): a network ID (belonging to the `libp2p` stack) used to contact the miner directly off-chain (e.g., to make a storage deal). Note the difference with the rest of the communication in the Filecoin network that happens largely inside the chain itself: when we "send" messages to the different actors that is actually a VM abstraction meaning we execute the method in the VM itself run by logic of the targeted actor, physically (at the network TCP/IP level) we broadcast the message to all of our peers to be included in a Filecoin block.
-With the miner information filled the command constructs a Filecoin message to broadcast to the network and be included in a Filecoin block by a miner (see [`createStorageMiner()`](https://github.com/filecoin-project/lotus/blob/master/cmd/lotus-storage-miner/init.go)). We will wait for that block to be synced to the chain (by the full node) before returning the miner ID address. The ID address is another way to refer to the miner through a unique ID in the chain, it has a type 0 and it is the address that is normally seen in chain visualization tools, e.g., `t01475` (since, in contrast with the public-key types of addresses, it is easily readable by humans).
+With the miner information filled the command constructs a Filecoin message to broadcast to the network and be included in a Filecoin block by a miner (see [`createStorageMiner()`](https://github.com/filecoin-project/lotus/blob/master/cmd/lotus-miner/init.go)). We will wait for that block to be synced to the chain (by the full node) before returning the miner ID address. The ID address is another way to refer to the miner through a unique ID in the chain, it has a type 0 and it is the address that is normally seen in chain visualization tools, e.g., `t01475` (since, in contrast with the public-key types of addresses, it is easily readable by humans).
The Filecoin message constructed will be targeted to the [Power Actor](https://filecoin-project.github.io/specs/#systems__filecoin_blockchain__storage_power_consensus__storage_power_actor) (`StoragePowerActorAddr`), which tracks the amount of power (storage capacity) every miner has, and it will have the method number of the [`CreateMiner`](https://github.com/filecoin-project/specs-actors/blob/master/actors/builtin/methods.go) constant.
@@ -69,13 +69,13 @@ Back to the CLI command, the [`MpoolPushMessage`](https://github.com/filecoin-pr
## VM: message execution
-We describe here the code flow inside the VM when it executes the `CreateMiner` method (of the message sent by the `lotus-storage-miner` command included by a miner in a block). This execution will be the same seen by all participants in the Filecoin protocol, the miner including the message in the block, the full node syncing to it, and any other peer receiving also this message.
+We describe here the code flow inside the VM when it executes the `CreateMiner` method (of the message sent by the `lotus-miner` command included by a miner in a block). This execution will be the same seen by all participants in the Filecoin protocol, the miner including the message in the block, the full node syncing to it, and any other peer receiving also this message.
There is a one-to-one mapping between the pair of actor and method number (`To:`/`Method:` fields) in a message in the VM, and the Go function in an actor's exported methods list that implement it. In this case, for the Power Actor list of method numbers defined in [`MethodsPower`](https://github.com/filecoin-project/specs-actors/blob/master/actors/builtin/methods.go), the `CreateMiner` method number 2 will correspond to the Go function with the same index in the list of methods returned by [`Exports()`](https://github.com/filecoin-project/specs-actors/blob/master/actors/builtin/power/power_actor.go) (and normally also the same name, here `(Actor).CreateMiner()`).
The Power Actor in `CreateMiner()` will do two things:
-1. Send *another* message, `Exec`, to the Init Actor to instruct it to create the miner actor with the information provided by `lotus-storage-miner` and receive its ID address (this ID is the one returned to the CLI command).
+1. Send *another* message, `Exec`, to the Init Actor to instruct it to create the miner actor with the information provided by `lotus-miner` and receive its ID address (this ID is the one returned to the CLI command).
2. Generate an entry in its list of power claims ([`State.Claims`](https://github.com/filecoin-project/specs-actors/blob/master/actors/builtin/power/power_state.go)) for the newly created ID address of the miner.
diff --git a/documentation/en/dev-tools.md b/documentation/en/dev-tools.md
deleted file mode 100644
index 60b9b26d4..000000000
--- a/documentation/en/dev-tools.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Developer Tools
-
-> Running a local network can be a great way to understand how Lotus works and test your setup.
diff --git a/documentation/en/environment-vars.md b/documentation/en/environment-vars.md
deleted file mode 100644
index 9d455a74d..000000000
--- a/documentation/en/environment-vars.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# Lotus Environment Variables
-
-## Building
-
-## Common
-
-The environment variables are common across most lotus binaries.
-
-### `LOTUS_FD_MAX`
-
-Sets the file descriptor limit for the process. This should be set high (8192
-or higher) if you ever notice 'too many open file descriptor' errors.
-
-### `LOTUS_JAEGER`
-
-This can be set to enable jaeger trace reporting. The value should be the url
-of the jaeger trace collector, the default for most jaeger setups should be
-`localhost:6831`.
-
-### `LOTUS_DEV`
-
-If set to a non-empty value, certain parts of the application will print more
-verbose information to aid in development of the software. Not recommended for
-end users.
-
-## Lotus Daemon
-
-### `LOTUS_PATH`
-
-Sets the location for the lotus daemon on-disk repo. If left empty, this defaults to `~/.lotus`.
-
-### `LOTUS_SKIP_GENESIS_CHECK`
-
-Can be set to `_yes_` if you wish to run a lotus network with a different
-genesis than the default one built into your lotus binary.
-
-### `LOTUS_CHAIN_TIPSET_CACHE`
-
-Sets the cache size for the chainstore tipset cache. The default value is 8192,
-but if your usage of the lotus API involves frequent arbitrary tipset lookups,
-you may want to increase this.
-
-### `LOTUS_CHAIN_INDEX_CACHE`
-
-Sets the cache size for the chainstore epoch index cache. The default value is 32768,
-but if your usage of the lotus API involves frequent deep chain lookups for
-block heights that are very far from the current chain height, you may want to
-increase this.
-
-
-### `LOTUS_BSYNC_MSG_WINDOW`
-
-Set the initial maximum window size for message fetching blocksync requests. If
-you have a slower internet connection and are having trouble syncing, you might
-try lowering this down to 10-20 for a 'poor' internet connection.
-
-## Lotus Miner
-
-A number of environment variables are respected for configuring the behavior of the filecoin proving subsystem. For more details on those [see here](https://github.com/filecoin-project/rust-fil-proofs/#settings).
-
-### `LOTUS_MINER_PATH`
-
-Sets the location for the lotus miners on-disk repo. If left empty, this defaults to `~/.lotusminer`.
-
-
diff --git a/documentation/en/faqs.md b/documentation/en/faqs.md
deleted file mode 100644
index c2d526830..000000000
--- a/documentation/en/faqs.md
+++ /dev/null
@@ -1,138 +0,0 @@
-# Frequently Asked Questions
-
-Here are some FAQs concerning the Lotus implementation and participation in
-Testnet.
-For questions concerning the broader Filecoin project, please
-go [here](https://filecoin.io/faqs/).
-
-## Introduction to Lotus
-
-### What is Lotus?
-
-Lotus is an implementation of the **Filecoin Distributed Storage Network**, written in Go.
-It is designed to be modular and interoperable with any other implementation of the Filecoin Protocol.
-More information about Lotus can be found [here](https://lotu.sh/).
-
-### What are the components of Lotus?
-
-Lotus is composed of two separate pieces that can talk to each other:
-
-The Lotus Node can sync the blockchain, validating all blocks, transfers, and deals
-along the way. It can also facilitate the creation of new storage deals. If you are not
-interested in providing your own storage to the network, and do not want to produce blocks
-yourself, then the Lotus Node is all you need!
-
-The Lotus Miner does everything you need for the registration of storage, and the
-production of new blocks. The Lotus Miner communicates with the network by talking
-to a Lotus Node over the JSON-RPC API.
-
-## Setting up a Lotus Node
-
-### How do I set up a Lotus Node?
-
-Follow the instructions found [here](https://lotu.sh/en+getting-started).
-
-### Where can I get the latest version of Lotus?
-
-Download the binary tagged as the `Latest Release` from the
- [Lotus Github repo](https://github.com/filecoin-project/lotus/releases).
-
-### What operating systems can Lotus run on?
-
-Lotus can build and run on most Linux and MacOS systems with at least
-8GB of RAM. Windows is not yet supported.
-
-### How can I update to the latest version of Lotus?
-
-To update Lotus, follow the instructions [here](https://lotu.sh/en+updating-lotus).
-
-### How do I prepare a fresh installation of Lotus?
-
-Stop the Lotus daemon, and delete all related files, including sealed and chain data by
-running `rm ~/.lotus ~/.lotusminer`.
-
-Then, install Lotus afresh by following the instructions
-found [here](https://lotu.sh/en+getting-started).
-
-### Can I configure where the node's config and data goes?
-
-Yes! The `LOTUS_PATH` variable sets the path for where the Lotus node's data is written.
-The `LOTUS_MINER_PATH` variable does the same for miner-specific information.
-
-## Interacting with a Lotus Node
-
-### How can I communicate with a Lotus Node?
-
-Lotus Nodes have a command-line interface, as well as a JSON-RPC API.
-
-### What are the commands I can send using the command-line interface?
-
-The command-line interface is self-documenting, try running `lotus --help` from the `lotus` home
-directory for more.
-
-### How can I send a request over the JSON-RPC API?
-
-Information on how to send a `cURL` request to the JSON-RPC API can be found
-[here](https://lotu.sh/en+api). A JavaScript client is under development.
-
-### What are the requests I can send over the JSON-RPC API?
-
-Please have a look at the
-[source code](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go)
-for a list of methods supported by the JSON-RPC API.
-## The Test Network
-
-### What is Testnet?
-
-Testnet is a live network of Lotus Nodes run by the
-community for testing purposes.
- It has 2 PiB of storage (and growing!) dedicated to it.
-
-### Is FIL on the Testnet worth anything?
-
-Nothing at all! Real-world incentives may be provided in a future phase of Testnet, but this is
-yet to be confirmed.
-
-### How can I see the status of Testnet?
-
-The [dashboard](https://stats.testnet.filecoin.io/) displays the status of the network as
-well as a ton
-of other metrics you might find interesting.
-
-## Mining with a Lotus Node on Testnet
-
-### How do I get started mining with Lotus?
-
-Follow the instructions found [here](https://lotu.sh/en+mining).
-
-### What are the minimum hardware requirements?
-
-An example test configuration, and minimum hardware requirements can be found
-[here](https://lotu.sh/en+hardware-mining).
-
-Note that these might NOT be the minimum requirements for mining on Mainnet.
-
-### What are some GPUs that have been tested?
-
-A list of benchmarked GPUs can be found [here](https://lotu.sh/en+hardware-mining#benchmarked-gpus-7393).
-
-### Why is my GPU not being used when sealing a sector?
-
-Sealing a sector does not involve constant GPU operations. It's possible
-that your GPU simply isn't necessary at the moment you checked.
-
-## Advanced questions
-
-### Is there a Docker image for lotus?
-
-Community-contributed Docker and Docker Compose examples are available
-[here](https://github.com/filecoin-project/lotus/tree/master/tools/dockers/docker-examples).
-
-### How can I run two miners on the same machine?
-
-You can do so by changing the storage path variable for the second miner, e.g.,
-`LOTUS_MINER_PATH=~/.lotusminer2`. You will also need to make sure that no ports collide.
-
-### How do I setup my own local devnet?
-
-Follow the instructions found [here](https://lotu.sh/en+setup-local-dev-net).
diff --git a/documentation/en/getting-started.md b/documentation/en/getting-started.md
deleted file mode 100644
index e38a2ab97..000000000
--- a/documentation/en/getting-started.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Lotus
-
-Lotus is an implementation of the **Filecoin Distributed Storage Network**. You can run the Lotus software client to join the **Filecoin Testnet**.
-
-For more details about Filecoin, check out the [Filecoin Docs](https://docs.filecoin.io) and [Filecoin Spec](https://filecoin-project.github.io/specs/).
-
-## What can I learn here?
-
-- How to install Lotus on [Arch Linux](https://lotu.sh/en+install-lotus-arch), [Ubuntu](https://lotu.sh/en+install-lotus-ubuntu), or [MacOS](https://lotu.sh/en+install-lotus-macos).
-- Joining the [Lotus Testnet](https://lotu.sh/en+join-testnet).
-- [Storing](https://lotu.sh/en+storing-data) or [retrieving](https://lotu.sh/en+retrieving-data) data.
-- Mining Filecoin using the **Lotus Miner** in your [CLI](https://lotu.sh/en+mining).
-
-## How is Lotus designed?
-
-Lotus is architected modularly to keep clean API boundaries while using the same process. Installing Lotus will include two separate programs:
-
-- The **Lotus Node**
-- The **Lotus Miner**
-
-The **Lotus Miner** is intended to be run on the machine that manages a single miner instance, and is meant to communicate with the **Lotus Node** via the websocket **JSON-RPC** API for all of the chain interaction needs.
-
-This way, a mining operation may easily run a **Lotus Miner** or many of them, connected to one or many **Lotus Node** instances.
diff --git a/documentation/en/hardware-mining.md b/documentation/en/hardware-mining.md
deleted file mode 100644
index d421f6fb1..000000000
--- a/documentation/en/hardware-mining.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Protocol Labs Standard Testing Configuration
-
-> This documentation page describes the standard testing configuration the Protocol Labs team has used to test **Lotus Miner**s on Lotus. There is no guarantee this testing configuration will be suitable for Filecoin storage mining at MainNet launch. If you need to buy new hardware to join the Filecoin Testnet, we recommend to buy no more hardware than you require for testing. To learn more please read this [Protocol Labs Standard Testing Configuration post](https://filecoin.io/blog/filecoin-testnet-mining/).
-
-**Sector sizes** and **minimum pledged storage** required to mine blocks are two very important Filecoin Testnet parameters that impact hardware decisions. We will continue to refine all parameters during Testnet.
-
-BECAUSE OF THIS, OUR STANDARD TESTING CONFIGURATION FOR FILECOIN MAINNET CAN AND WILL CHANGE. YOU HAVE BEEN WARNED.
-
-## Example configuration
-
-The setup below is a minimal example for sealing 32 GiB sectors on Lotus:
-
-- 2 TB of hard drive space.
-- 8 core CPU
-- 128 GiB of RAM
-
-Note that 1GB sectors don't require as high of specs, but are likely to be removed as we improve the performance of 32GB sector sealing.
-
-For the first part of the sealing process, AMD CPU's are __highly recommended__, because of the `Intel SHA Extensions` instruction set that is available there ever since the `Zen` microarchitecture. Hence, AMD CPU's seem to perform much better on the testnet than other CPU's. Contrary to what the name implies, this extended instruction set is not available on recent Intel desktop/server chips.
-
-## Testnet discoveries
-
-- If you only have 128GiB of ram, enabling 256GB of **NVMe** swap on an SSD will help you avoid out-of-memory issues while mining.
-
-## Benchmarked GPUs
-
-GPUs are a must for getting **block rewards**. Here are a few that have been confirmed to generate **SNARKs** quickly enough to successfully mine blocks on the Lotus Testnet.
-
-- GeForce RTX 2080 Ti
-- GeForce RTX 2080 SUPER
-- GeForce RTX 2080
-- GeForce GTX 1080 Ti
-- GeForce GTX 1080
-- GeForce GTX 1060
-
-## Testing other GPUs
-
-If you want to test a GPU that is not explicitly supported, use the following global **environment variable**:
-
-```sh
-BELLMAN_CUSTOM_GPU=":"
-```
-
-Here is an example of trying a GeForce GTX 1660 Ti with 1536 cores.
-
-```sh
-BELLMAN_CUSTOM_GPU="GeForce GTX 1660 Ti:1536"
-```
-
-To get the number of cores for your GPU, you will need to check your card’s specifications.
-
-## Benchmarking
-
-Here is a [benchmarking tool](https://github.com/filecoin-project/lotus/tree/master/cmd/lotus-bench) and a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694) for those who wish to experiment with and contribute hardware setups for the **Filecoin Testnet**.
diff --git a/documentation/en/hardware.md b/documentation/en/hardware.md
deleted file mode 100644
index f6250548a..000000000
--- a/documentation/en/hardware.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Hardware
-
-> This page is a work in progress. Exact mining requirements are still in the works.
-
-Lotus can build and run on most [Linux](https://ubuntu.com/) and [MacOS](https://www.apple.com/macos) systems with at least 8GiB of RAM.
-
-Windows is not yet supported.
diff --git a/documentation/en/install-lotus-arch.md b/documentation/en/install-lotus-arch.md
deleted file mode 100644
index 8e06aae4e..000000000
--- a/documentation/en/install-lotus-arch.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Arch Linux Instructions
-
-These steps will install the following dependencies:
-
-- go (1.14 or higher)
-- gcc (7.4.0 or higher)
-- git (version 2 or higher)
-- bzr (some go dependency needs this)
-- jq
-- pkg-config
-- opencl-icd-loader
-- opencl driver (like nvidia-opencl on arch) (for GPU acceleration)
-- opencl-headers (build)
-- rustup (proofs build)
-- llvm (proofs build)
-- clang (proofs build)
-
-### Install dependencies
-
-```sh
-sudo pacman -Syu opencl-icd-loader gcc git bzr jq pkg-config opencl-icd-loader opencl-headers
-```
-
-### Install Go 1.14
-
-Install the latest version of Go by following [the docs on their website](https://golang.org/doc/install).
-
-### Clone the Lotus repository
-
-```sh
-git clone https://github.com/filecoin-project/lotus.git
-cd lotus/
-```
-
-### Build the Lotus binaries from source and install
-
-! **If you are running an AMD platform or if your CPU supports SHA extensions you will want to build the Filecoin proofs natively**
-
-```sh
-make clean && make all
-sudo make install
-```
-
-#### Native Filecoin FFI building
-
-```sh
-env env RUSTFLAGS="-C target-cpu=native -g" FFI_BUILD_FROM_SOURCE=1 make clean deps all
-sudo make install
-```
-
-After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus Testnet](https://lotu.sh/en+join-testnet).
diff --git a/documentation/en/install-lotus-fedora.md b/documentation/en/install-lotus-fedora.md
deleted file mode 100644
index c37161b7a..000000000
--- a/documentation/en/install-lotus-fedora.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Fedora Instructions
-
-> tested on 30
-
-**NOTE:** If you have an AMD GPU the opencl instructions may be incorrect...
-
-These steps will install the following dependencies:
-
-- go (1.14 or higher)
-- gcc (7.4.0 or higher)
-- git (version 2 or higher)
-- bzr (some go dependency needs this)
-- jq
-- pkg-config
-- rustup (proofs build)
-- llvm (proofs build)
-- clang (proofs build)
-
-### Install dependencies
-
-```sh
-$ sudo dnf -y update
-$ sudo dnf -y install gcc git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm
-$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-```
-
-### Install Go 1.14
-
-Install the latest version of Go by following [the docs on their website](https://golang.org/doc/install).
-
-### Clone the Lotus repository
-
-```sh
-git clone https://github.com/filecoin-project/lotus.git
-cd lotus/
-```
-
-### Build the Lotus binaries from source and install
-
-! **If you are running an AMD platform or if your CPU supports SHA extensions you will want to build the Filecoin proofs natively**
-
-```sh
-$ make clean && make all
-$ sudo make install
-```
-
-#### Native Filecoin FFI building
-
-```sh
-env env RUSTFLAGS="-C target-cpu=native -g" FFI_BUILD_FROM_SOURCE=1 make clean deps all
-sudo make install
-```
-
-After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus TestNet](https://lotu.sh/en+join-testnet).
diff --git a/documentation/en/install-lotus-macos.md b/documentation/en/install-lotus-macos.md
deleted file mode 100644
index 371832c96..000000000
--- a/documentation/en/install-lotus-macos.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# MacOS Instructions
-
-## Get XCode Command Line Tools
-
-To check if you already have the XCode Command Line Tools installed via the CLI, run:
-
-```sh
-xcode-select -p
-```
-
-If this command returns a path, you can move on to the next step. Otherwise, to install via the CLI, run:
-
-```sh
-xcode-select --install
-```
-
-To update, run:
-
-```sh
-sudo rm -rf /Library/Developer/CommandLineTools
-xcode-select --install
-```
-
-## Get HomeBrew
-
-We recommend that MacOS users use [HomeBrew](https://brew.sh) to install each the necessary packages.
-
-Check if you have HomeBrew:
-
-```sh
-brew -v
-```
-
-This command returns a version number if you have HomeBrew installed and nothing otherwise.
-
-In your terminal, enter this command to install Homebrew:
-
-```sh
-/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-```
-
-Use the command `brew install` to install the following packages:
-
-```sh
-brew install go bzr jq pkg-config rustup
-```
-
-Clone
-
-```sh
-git clone https://github.com/filecoin-project/lotus.git
-cd lotus/
-```
-
-Build
-
-```sh
-make clean && make all
-sudo make install
-```
-
-After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus Testnet](https://lotu.sh/en+join-testnet).
diff --git a/documentation/en/install-lotus-ubuntu.md b/documentation/en/install-lotus-ubuntu.md
deleted file mode 100644
index 500650692..000000000
--- a/documentation/en/install-lotus-ubuntu.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Ubuntu Instructions
-
-These steps will install the following dependencies:
-
-- go (1.14 or higher)
-- gcc (7.4.0 or higher)
-- git (version 2 or higher)
-- bzr (some go dependency needs this)
-- jq
-- pkg-config
-- opencl-icd-loader
-- opencl driver (like nvidia-opencl on arch) (for GPU acceleration)
-- opencl-headers (build)
-- rustup (proofs build)
-- llvm (proofs build)
-- clang (proofs build)
-
-### Install dependencies
-
-```sh
-sudo apt update
-sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl
-sudo apt upgrade
-```
-
-### Install Go 1.14
-
-Install the latest version of Go by following [the docs on their website](https://golang.org/doc/install).
-
-### Clone the Lotus repository
-
-```sh
-git clone https://github.com/filecoin-project/lotus.git
-cd lotus/
-```
-
-### Build the Lotus binaries from source and install
-
-! **If you are running an AMD platform or if your CPU supports SHA extensions you will want to build the Filecoin proofs natively**
-
-```sh
-make clean && make all
-sudo make install
-```
-
-#### Native Filecoin FFI building
-
-```sh
-env env RUSTFLAGS="-C target-cpu=native -g" FFI_BUILD_FROM_SOURCE=1 make clean deps all
-sudo make install
-```
-
-
-After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus Testnet](https://lotu.sh/en+join-testnet).
diff --git a/documentation/en/install-systemd-services.md b/documentation/en/install-systemd-services.md
deleted file mode 100644
index fbde1feec..000000000
--- a/documentation/en/install-systemd-services.md
+++ /dev/null
@@ -1,145 +0,0 @@
-# Use Lotus with systemd
-
-Lotus is capable of running as a systemd service daemon. You can find installable service files for systemd in the [lotus repo scripts directory](https://github.com/filecoin-project/lotus/tree/master/scripts) as files with `.service` extension. In order to install these service files, you can copy these `.service` files to the default systemd unit load path.
-
-The services expect their binaries to be present in `/usr/local/bin/`. You can use `make` to install them by running:
-
-```sh
-$ sudo make install
-```
-
-for `lotus` and `lotus-storage-miner` and
-
-```sh
-$ sudo make install-chainwatch
-```
-
-for the `chainwatch` tool.
-
-## Installing services via `make`
-
-If your host uses the default systemd unit load path, the `lotus-daemon` and `lotus-miner` services can be installed by running:
-
-```sh
-$ sudo make install-services
-```
-
-To install the the `lotus-chainwatch` service run:
-
-```sh
-$ sudo make install-chainwatch-service
-```
-
-You can install all services together by running:
-
-```sh
-$ sudo make install-all-services
-```
-
-The `lotus-daemon` and the `lotus-miner` services can be installed individually too by running:
-
-```sh
-$ sudo make install-daemon-service
-```
-
-and
-
-```sh
-$ sudo make install-miner-service
-```
-
-### Notes
-
-When installing the `lotus-miner` and/or `lotus-chainwatch` service the `lotus-daemon` service gets automatically installed since the other two services depend on it being installed to run.
-
-All `install-*-service*` commands will install the latest binaries in the lotus build folders to `/usr/local/bin/`. If you do not want to use the latest build binaries please copy the `*.service` files by hand.
-
-## Removing via `make`
-
-All services can beremoved via `make`. To remove all services together run:
-
-```sh
-$ sudo make clean-all-services
-```
-
-Individual services can be removed by running:
-
-```sh
-$ sudo make clean-chainwatch-services
-$ sudo make clean-miner-services
-$ sudo make clean-daemon-services
-```
-
-### Notes
-
-The services will be stoppend and disabled when removed.
-
-Removing the `lotus-daemon` service will automatically remove the depending services `lotus-miner` and `lotus-chainwatch`.
-
-
-## Controlling services
-
-All service can be controlled with the `systemctl`. A few basic control commands are listed below. To get detailed infos about the capabilities of the `systemctl` command please consult your distributions man pages by running:
-
-```sh
-$ man systemctl
-```
-
-### Start/Stop services
-
-You can start the services by running:
-
-```sh
-$ sudo systemctl start lotus-daemon
-$ sudo systemctl start lotus-miner
-$ sudo systemctl start lotus-chainwatch
-```
-
-and can be stopped by running:
-
-```sh
-$ sudo systemctl stop lotus-daemon
-$ sudo systemctl stop lotus-miner
-$ sudo systemctl stop lotus-chainwatch
-```
-
-### Enabling services on startup
-
-To enable the services to run automatically on startup execute:
-
-```sh
-$ sudo systemctl enable lotus-daemon
-$ sudo systemctl enable lotus-miner
-$ sudo systemctl enable lotus-chainwatch
-```
-
-To disable the services on startup run:
-
-```sh
-$ sudo systemctl disable lotus-daemon
-$ sudo systemctl disable lotus-miner
-$ sudo systemctl disable lotus-chainwatch
-```
-### Notes
-
-Systemd will not let services be enabled or started without their requirements. Starting the `lotus-chainwatch` and/or `lotus-miner` service with automatically start the `lotus-daemon` service (if installed!). Stopping the `lotus-daemon` service will stop the other two services. The same pattern is executed for enabling and disabling the services.
-
-## Interacting with service logs
-
-Logs from the services can be reviewed using `journalctl`.
-
-### Follow logs from a specific service unit
-
-```sh
-$ sudo journalctl -u lotus-daemon -f
-```
-
-### View logs in reverse order
-
-```sh
-$ sudo journalctl -u lotus-miner -r
-```
-
-### Log files
-
-Besides the systemd service logs all services save their own log files in `/var/log/lotus/`.
diff --git a/documentation/en/dev-tools-jaeger-tracing.md b/documentation/en/jaeger-tracing.md
similarity index 100%
rename from documentation/en/dev-tools-jaeger-tracing.md
rename to documentation/en/jaeger-tracing.md
diff --git a/documentation/en/join-testnet.md b/documentation/en/join-testnet.md
deleted file mode 100644
index 6660d26d8..000000000
--- a/documentation/en/join-testnet.md
+++ /dev/null
@@ -1,93 +0,0 @@
-# Join Testnet
-
-## Introduction
-
-Anyone can set up a **Lotus Node** and connect to the **Lotus Testnet**. This is the best way to explore the current CLI and the **Filecoin Decentralized Storage Market**.
-
-## Note: Using the Lotus Node from China
-
-If you are trying to use `lotus` from China. You should set this **environment variable** on your machine:
-
-```sh
-export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/"
-```
-
-## Get started
-
-Start the **daemon** using the default configuration in `./build`:
-
-```sh
-lotus daemon
-```
-
-In another terminal window, check your connection with peers:
-
-```sh
-lotus net peers | wc -l
-```
-
-In order to connect to the network, you need to be connected to at least 1 peer. If you’re seeing 0 peers, read our [troubleshooting notes](https://lotu.sh/en+setup-troubleshooting).
-
-Make sure that you have a reasonable "open files limit" set on your machine, such as 10000. If you're seeing a lower value, such as 256 (default on macOS), read our [troubleshooting notes](https://lotu.sh/en+setup-troubleshooting) on how to update it prior to starting the Lotus daemon.
-
-## Chain sync
-
-While the daemon is running, the next requirement is to sync the chain. Run the command below to view the chain sync progress. To see current chain height, visit the [network stats page](https://stats.testnet.filecoin.io/).
-
-```sh
-lotus sync wait
-```
-
-- This step will take anywhere between a few hours to a couple of days.
-- You will be able to perform **Lotus Testnet** operations after it is finished.
-
-## Create your first address
-
-Initialize a new wallet:
-
-```sh
-lotus wallet new
-```
-
-Sometimes your operating system may limit file name length to under 150 characters. You need to use a file system that supports long filenames.
-
-Here is an example of the response:
-
-```sh
-t1aswwvjsae63tcrniz6x5ykvsuotlgkvlulnqpsi
-```
-
-- Visit the [faucet](http://spacerace.faucet.glif.io/) to add funds.
-- Paste the address you created under REQUEST.
-- Press the Request button.
-
-## Check wallet address balance
-
-Wallet balances in the Lotus Testnet are in **FIL**, the smallest denomination of FIL is an **attoFil**, where 1 attoFil = 10^-18 FIL.
-
-```sh
-lotus wallet balance
-```
-
-You will not see any attoFIL in your wallet if your **chain** is not fully synced.
-
-## Send FIL to another wallet
-
-To send FIL to another wallet from your default account, use this command:
-
-```
-lotus send
-```
-
-## Configure your node's connectivity
-
-To effectively accept incoming storage & retrieval deals, your Lotus node needs to be accessible to other nodes on the network. To improve your connectivity, be sure to:
-
-- [Set the multiaddresses for you miner to listen on](https://docs.filecoin.io/mine/connectivity/#setting-multiaddresses)
-- [Maintain a healthy peer count](https://docs.filecoin.io/mine/connectivity/#checking-peer-count)
-- [Enable port forwarding](https://docs.filecoin.io/mine/connectivity/#port-forwarding)
-- [Configure your public IP address and port](https://docs.filecoin.io/mine/connectivity/#setting-a-public-ip-address)
-
-## Monitor the dashboard
-
-To see the latest network activity, including **chain block height**, **block height**, **blocktime**, **total network power**, largest **block producer miner**, check out the [monitoring dashboard](https://stats.testnet.filecoin.io).
diff --git a/documentation/en/local-dev-net.md b/documentation/en/local-dev-net.md
deleted file mode 100644
index 3382b6471..000000000
--- a/documentation/en/local-dev-net.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Setup Local Devnet
-
-Build the Lotus Binaries in debug mode, This enables the use of 2048 byte sectors.
-
-```sh
-make 2k
-```
-
-Set the `LOTUS_SKIP_GENESIS_CHECK` environment variable to `_yes_`. This tells your
-Lotus node that it's okay if the genesis being used doesn't match any baked-in
-genesis.
-
-```sh
-export LOTUS_SKIP_GENESIS_CHECK=_yes_
-```
-
-Download the 2048 byte parameters:
-```sh
-./lotus fetch-params 2048
-```
-
-Pre-seal some sectors:
-
-```sh
-./lotus-seed pre-seal --sector-size 2KiB --num-sectors 2
-```
-
-Create the genesis block and start up the first node:
-
-```sh
-./lotus-seed genesis new localnet.json
-./lotus-seed genesis add-miner localnet.json ~/.genesis-sectors/pre-seal-t01000.json
-./lotus daemon --lotus-make-genesis=devgen.car --genesis-template=localnet.json --bootstrap=false
-```
-
-Then, in another console, import the genesis miner key:
-
-```sh
-./lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key
-```
-
-Set up the genesis miner:
-
-```sh
-./lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync
-```
-
-Now, finally, start up the miner:
-
-```sh
-./lotus-miner run --nosync
-```
-
-If all went well, you will have your own local Lotus Devnet running.
diff --git a/documentation/en/miner-deals.md b/documentation/en/miner-deals.md
deleted file mode 100644
index 0aee0e1af..000000000
--- a/documentation/en/miner-deals.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Information for Miners
-
-Here is how a miner can get set up to accept storage deals. The first step is
-to install a Lotus node and sync to the top of the chain.
-
-## Set up an ask
-
-```
-lotus-miner set-price
-```
-
-This command will set up your miner to accept deal proposals that meet the input price.
-The price is inputted in FIL per GiB per epoch, and the default is 0.0000000005.
-
-## Ensure you can be discovered
-
-Clients need to be able to find you in order to make storage deals with you.
-While there isn't necessarily anything you need to do to become discoverable, here are some things you can
-try to check that people can connect to you.
-
-To start off, make sure you are connected to at least some peers, and your port is
-open and working.
-
-### Connect to your own node
-
-If you are in contact with someone else running Lotus, you can ask them to try connecting
-to your node. To do so, provide them your peer ID, which you can get by running `lotus net id` on
-your node.
-
-They can then try running `lotus net findpeer ` to get your address(es), and can then
-run `lotus net connect ` to connect to you. If successful, your node will now
-appear on their peers list (run `lotus net peers` to check).
-
-You can also check this by running a second instance of Lotus yourself.
-
-### Query your own ask
-
-A client should be able to find your ask by running `lotus client query-ask `. If
-someone is not able to retrieve your ask by doing so, then there is an issue with your node.
\ No newline at end of file
diff --git a/documentation/en/mining-lotus-worker.md b/documentation/en/mining-lotus-worker.md
deleted file mode 100644
index f93780c44..000000000
--- a/documentation/en/mining-lotus-worker.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# Lotus Worker
-
-The **Lotus Worker** is an extra process that can offload heavy processing tasks from your **Lotus Miner**. The sealing process automatically runs in the **Lotus Miner** process, but you can use the Worker on another machine communicating over a fast network to free up resources on the machine running the mining process.
-
-## Note: Using the Lotus Worker from China
-
-If you are trying to use `lotus-worker` from China. You should set this **environment variable** on your machine:
-
-```sh
-export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/"
-```
-
-## Get Started
-
-Make sure that the `lotus-worker` is compiled and installed by running:
-
-```sh
-make lotus-worker
-```
-
-## Setting up the Miner
-
-First, you will need to ensure your `lotus-miner`'s API is accessible over the network.
-
-To do this, open up `~/.lotusminer/config.toml` (Or if you manually set `LOTUS_MINER_PATH`, look under that directory) and look for the API field.
-
-Default config:
-
-```toml
-[API]
-ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
-RemoteListenAddress = "127.0.0.1:2345"
-```
-
-To make your node accessible over the local area network, you will need to determine your machines IP on the LAN, and change the `127.0.0.1` in the file to that address.
-
-A more permissive and less secure option is to change it to `0.0.0.0`. This will allow anyone who can connect to your computer on that port to access the [API](https://lotu.sh/en+api). They will still need an auth token.
-
-`RemoteListenAddress` must be set to an address which other nodes on your network will be able to reach.
-
-Next, you will need to [create an authentication token](https://lotu.sh/en+api-scripting-support#generate-a-jwt-46). All Lotus APIs require authentication tokens to ensure your processes are as secure against attackers attempting to make unauthenticated requests to them.
-
-### Connect the Lotus Worker
-
-On the machine that will run `lotus-worker`, set the `MINER_API_INFO` environment variable to `TOKEN:MINER_NODE_MULTIADDR`. Where `TOKEN` is the token we created above, and `NIMER_NODE_MULTIADDR` is the `multiaddr` of the **Lotus Miner** API that was set in `config.toml`.
-
-Once this is set, run:
-
-```sh
-lotus-worker run
-```
-
-If you are running multiple workers on the same host, you will need to specify the `--listen` flag and ensure each worker is on a different port.
-
-To check that the **Lotus Worker** is connected to your **Lotus Miner**, run `lotus-miner sealing workers` and check that the remote worker count has increased.
-
-```sh
-why@computer ~/lotus> lotus-miner sealing workers
-Worker 0, host computer
- CPU: [ ] 0 core(s) in use
- RAM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB
- VMEM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB
- GPU: GeForce RTX 2080, not used
-
-Worker 1, host othercomputer
- CPU: [ ] 0 core(s) in use
- RAM: [|||||||||||||| ] 23% 14 GiB/62.7 GiB
- VMEM: [|||||||||||||| ] 23% 14 GiB/62.7 GiB
- GPU: GeForce RTX 2080, not used
-```
-
-### Running locally for manually managing process priority
-
-You can also run the **Lotus Worker** on the same machine as your **Lotus Miner**, so you can manually manage the process priority.
-To do so you have to first __disable all seal task types__ in the miner config. This is important to prevent conflicts between the two processes.
-
-You can then run the miner on your local-loopback interface;
-
-```sh
-lotus-worker run
-```
diff --git a/documentation/en/mining-troubleshooting.md b/documentation/en/mining-troubleshooting.md
deleted file mode 100644
index 5aaf9f6ef..000000000
--- a/documentation/en/mining-troubleshooting.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# Mining Troubleshooting
-
-## Config: Filecoin Proof Parameters directory
-
-If you want to put the **Filecoin Proof Parameters** in a different directory, use the following environment variable:
-
-```sh
-FIL_PROOFS_PARAMETER_CACHE
-```
-
-## Error: Can't acquire bellman.lock
-
-The **Bellman** lockfile is created to lock a GPU for a process. This bug can occur when this file isn't properly cleaned up:
-
-```sh
-mining block failed: computing election proof: github.com/filecoin-project/lotus/miner.(*Miner).mineOne
-```
-
-This bug occurs when the miner can't acquire the `bellman.lock`. To fix it you need to stop the `lotus-miner` and remove `/tmp/bellman.lock`.
-
-## Error: Failed to get api endpoint
-
-```sh
-lotus-miner info
-# WARN main lotus-storage-miner/main.go:73 failed to get api endpoint: (/Users/myrmidon/.lotusminer) %!w(*errors.errorString=&{API not running (no endpoint)}):
-```
-
-If you see this, that means your **Lotus Miner** isn't ready yet. You need to finish [syncing the chain](https://lotu.sh/en+join-testnet).
-
-## Error: Your computer may not be fast enough
-
-```sh
-CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up
-```
-
-If you see this, that means your computer is too slow and your blocks are not included in the chain, and you will not receive any rewards.
-
-## Error: No space left on device
-
-```sh
-lotus-miner sectors pledge
-# No space left on device (os error 28)
-```
-
-If you see this, that means `pledge-sector` wrote too much data to `$TMPDIR` which by default is the root partition (This is common for Linux setups). Usually your root partition does not get the largest partition of storage so you will need to change the environment variable to something else.
-
-## Error: GPU unused
-
-If you suspect that your GPU is not being used, first make sure it is properly configured as described in the [testing configuration page](hardware-mining.md). Once you've done that (and set the `BELLMAN_CUSTOM_GPU` as appropriate if necessary) you can verify your GPU is being used by running a quick lotus-bench benchmark.
-
-First, to watch GPU utilization run `nvtop` in one terminal, then in a separate terminal, run:
-
-```sh
-make bench
-./bench sealing --sector-size=2KiB
-```
-
-This process uses a fair amount of GPU, and generally takes ~4 minutes to complete. If you do not see any activity in nvtop from lotus during the entire process, it is likely something is misconfigured with your GPU.
-
-## Checking Sync Progress
-
-You can use this command to check how far behind you are on syncing:
-
-```sh
-date -d @$(./lotus chain getblock $(./lotus chain head) | jq .Timestamp)
-```
diff --git a/documentation/en/mining.md b/documentation/en/mining.md
deleted file mode 100644
index 32c3c51d2..000000000
--- a/documentation/en/mining.md
+++ /dev/null
@@ -1,149 +0,0 @@
-# Storage Mining
-
-Here are instructions to learn how to perform storage mining. For hardware specifications please read [this](https://lotu.sh/en+hardware-mining).
-
-It is useful to [join the Testnet](https://lotu.sh/en+join-testnet) prior to attempting storage mining for the first time.
-
-## Note: Using the Lotus Miner from China
-
-If you are trying to use `lotus-miner` from China. You should set this **environment variable** on your machine.
-
-```sh
-export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/"
-```
-
-## Get started
-
-Please ensure that at least one **BLS address** (starts with `t3`) in your wallet exists with the following command:
-
-```sh
-lotus wallet list
-```
-
-If you do not have a bls address, create a new bls wallet:
-
-```sh
-lotus wallet new bls
-```
-
-With your wallet address:
-
-- Visit the [faucet](http://spacerace.faucet.glif.io/)
-- Paste the address you created under REQUEST.
-- Press the Request button.
-- Run `/lotus-miner init --owner= --worker=`
-
-You will have to wait some time for this operation to complete.
-
-## Mining
-
-To mine:
-
-```sh
-lotus-miner run
-```
-
-If you are downloading **Filecoin Proof Parameters**, the download can take some time.
-
-Get information about your miner:
-
-```sh
-lotus-miner info
-# example: miner id `t0111`
-```
-
-**Seal** random data to start producing **PoSts**:
-
-```sh
-lotus-miner sectors pledge
-```
-
-- Warning: On Linux configurations, this command will write data to `$TMPDIR` which is not usually the largest partition. You should point the value to a larger partition if possible.
-
-Get **miner power** and **sector usage**:
-
-```sh
-lotus state power
-# returns total power
-
-lotus state power
-
-lotus state sectors
-```
-
-## Performance tuning
-
-### `FIL_PROOFS_MAXIMIZE_CACHING=1` Environment variable
-
-This env var can be used with `lotus-miner`, `lotus-worker`, and `lotus-bench` to make the precommit1 step faster at the cost of some memory use (1x sector size)
-
-### `FIL_PROOFS_USE_GPU_COLUMN_BUILDER=1` Environment variable
-
-This env var can be used with `lotus-miner`, `lotus-worker`, and `lotus-bench` to enable experimental precommit2 GPU acceleration
-
-### Setting multiaddresses
-
-Set multiaddresses for the miner to listen on in a miner's `config.toml` file
-(by default, it is located at `~/.lotusminer/config.toml`). The `ListenAddresses` in this file should be interface listen addresses (usually `/ip4/0.0.0.0/tcp/PORT`), and the `AnnounceAddresses` should match the addresses being passed to `set-addrs`.
-
-The addresses passed to `set-addrs` parameter in the commands below should be currently active and dialable; confirm they are before entering them.
-
-Once the config file has been updated, set the on-chain record of the miner's listen addresses:
-
-```
-lotus-miner actor set-addrs ...
-```
-
-This updates the `MinerInfo` object in the miner's actor, which will be looked up
-when a client attempts to make a deal. Any number of addresses can be provided.
-
-Example:
-
-```
-lotus-miner actor set-addrs /ip4/123.123.73.123/tcp/12345 /ip4/223.223.83.223/tcp/23456
-```
-
-# Separate address for windowPoSt messages
-
-WindowPoSt is the mechanism through which storage is verified in Filecoin. It requires miners to submit proofs for all sectors every 24h, which require sending messages to the chain.
-
-Because many other mining related actions require sending messages to the chain, and not all of those are "high value", it may be desirable to use a separate account to send PoSt messages from. This allows for setting lower GasFeeCaps on the lower value messages without creating head-of-line blocking problems for the PoSt messages in congested chain conditions
-
-To set this up, first create a new account, and send it some funds for gas fees:
-```sh
-lotus wallet new bls
-t3defg...
-
-lotus send t3defg... 100
-```
-
-Next add the control address
-```sh
-lotus-miner actor control set t3defg...
-Add t3defg...
-Pass --really-do-it to actually execute this action
-```
-
-Now actually set the addresses
-```sh
-lotus-miner actor control set --really-do-it t3defg...
-Add t3defg...
-Message CID: bafy2..
-```
-
-Wait for the message to land on chain
-```sh
-lotus state wait-msg bafy2..
-...
-Exit Code: 0
-...
-```
-
-Check miner control address list to make sure the address was correctly setup
-```sh
-lotus-miner actor control list
-name ID key use balance
-owner t01111 t3abcd... other 300 FIL
-worker t01111 t3abcd... other 300 FIL
-control-0 t02222 t3defg... post 100 FIL
-```
diff --git a/documentation/en/payment-channels.md b/documentation/en/payment-channels.md
deleted file mode 100644
index 7179da916..000000000
--- a/documentation/en/payment-channels.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# Payment Channels
-
-Payment channels are used to transfer funds between two actors.
-
-For example in lotus a payment channel is created when a client wants to fetch data from a provider.
-The client sends vouchers for the payment channel, and the provider sends data in response.
-
-The payment channel is created on-chain with an initial amount.
-Vouchers allow the client and the provider to exchange funds incrementally off-chain.
-The provider can submit vouchers to chain at any stage.
-Either party to the payment channel can settle the payment channel on chain.
-After a settlement period (currently 12 hours) either party to the payment channel can call collect on chain.
-Collect sends the value of submitted vouchers to the channel recipient (the provider), and refunds the remaining channel balance to the channel creator (the client).
-
-Vouchers have a lane, a nonce and a value, where vouchers with a higher nonce supersede vouchers with a lower nonce in the same lane.
-Each deal is created on a different lane.
-
-Note that payment channels and vouchers can be used for any situation in which two parties need to incrementally transfer value between each other off-chain.
-
-## Using the CLI
-
-For example a client creates a payment channel to a provider with value 10 FIL.
-
-```sh
-$ lotus paych add-funds 10
-
-```
-
-The client creates a voucher in lane 0 (implied) with nonce 1 (implied) and value 2.
-
-```sh
-$ lotus paych voucher create 2
-
-```
-
-The client sends the voucher to the provider and the provider adds the voucher to their local store.
-
-```sh
-$ lotus paych voucher add
-```
-
-The provider sends some data to the client.
-
-The client creates a voucher in lane 0 (implied) with nonce 2 (implied) and value 4.
-
-```sh
-$ lotus paych voucher create 4
-
-```
-
-The client sends the voucher to the provider and the provider adds the voucher and sends back more data.
-etc.
-
-The client can add value to the channel after it has been created by calling `paych add-funds` with the same client and provider addresses.
-
-```sh
-$ lotus paych add-funds 5
- # Same address as above. Channel now has 15
-```
-
-Once the client has received all their data, they may settle the channel.
-Note that settlement doesn't have to be done immediately.
-For example the client may keep the channel open as long as it wants to continue making deals with the provider.
-
-```sh
-$ lotus paych settle
-```
-
-The provider can submit vouchers to chain (note that lotus does this automatically when it sees a settle message appear on chain).
-The provider may have received many vouchers with incrementally higher values.
-The provider should submit the best vouchers. Note that there will be one best voucher for each lane.
-
-```sh
-$ lotus paych voucher best-spendable
-
-
-
-
-$ lotus paych voucher submit
-```
-
-Once the settlement period is over, either the client or provider can call collect to disburse funds.
-
-```sh
-$ lotus paych collect
-```
diff --git a/documentation/en/retrieving-data.md b/documentation/en/retrieving-data.md
deleted file mode 100644
index 7cb0e31be..000000000
--- a/documentation/en/retrieving-data.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Retrieving Data
-
-> There are recent bug reports with these instructions. If you happen to encounter any problems, please create a [GitHub issue](https://github.com/filecoin-project/lotus/issues/new) and a maintainer will address the problem as soon as they can.
-
-Here are the operations you can perform after you have stored and sealed a **Data CID** with the **Lotus Miner** in the network.
-
-If you would like to learn how to store a **Data CID** on a miner, read the instructions [here](https://lotu.sh/en+storing-data).
-
-## Find by Data CID
-
-```sh
-lotus client find
-# LOCAL
-# RETRIEVAL @--
-```
-
-## Retrieve by Data CID
-
-All fields are required.
-
-```sh
-lotus client retrieve
-```
-
-If the outfile does not exist it will be created in the Lotus repository directory.
-
-This command will initiate a **retrieval deal** and write the data to your computer. This process may take 2 to 10 minutes.
diff --git a/documentation/en/setting-a-static-port.md b/documentation/en/setting-a-static-port.md
deleted file mode 100644
index 97ac6528e..000000000
--- a/documentation/en/setting-a-static-port.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Static Ports
-
-Depending on how your network is set up, you may need to set a static port to successfully connect to peers to perform storage deals with your **Lotus Miner**.
-
-## Setup
-
-To change the random **swarm port**, you may edit the `config.toml` file located under `$LOTUS_MINER_PATH`. The default location of this file is `$HOME/.lotusminer`.
-
-To change the port to `1347`:
-
-```sh
-[Libp2p]
- ListenAddresses = ["/ip4/0.0.0.0/tcp/1347", "/ip6/::/tcp/1347"]
-```
-
-After changing the port value, restart your **daemon**.
-
-## Announce Addresses
-
-If the **swarm port** is port-forwarded from another address, it is possible to control what addresses
-are announced to the network.
-
-```sh
-[Libp2p]
- AnnounceAddresses = ["/ip4//tcp/1347"]
-```
-
-If non-empty, this array specifies the swarm addresses to announce to the network. If empty, the daemon will announce inferred swarm addresses.
-
-Similarly, it is possible to set `NoAnnounceAddresses` with an array of addresses to not announce to the network.
-
-## Ubuntu's Uncomplicated Firewall
-
-Open firewall manually:
-
-```sh
-ufw allow 1347/tcp
-```
-
-Or open and modify the profile located at `/etc/ufw/applications.d/lotus-daemon`:
-
-```sh
-[Lotus Daemon]
-title=Lotus Daemon
-description=Lotus Daemon firewall rules
-ports=1347/tcp
-```
-
-Then run these commands:
-
-```sh
-ufw update lotus-daemon
-ufw allow lotus-daemon
-```
diff --git a/documentation/en/setup-troubleshooting.md b/documentation/en/setup-troubleshooting.md
deleted file mode 100644
index a1c78b51b..000000000
--- a/documentation/en/setup-troubleshooting.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Setup Troubleshooting
-
-## Config: Clearing data
-
-Here is a command that will delete your chain data, stored wallets, stored data and any miners you have set up:
-
-```sh
-rm -rf ~/.lotus ~/.lotusminer
-```
-
-This command usually resolves any issues with running `lotus` but it is not always required for updates. We will share information about when resetting your chain data and miners is required for an update in the future.
-
-## Error: Failed to connect bootstrap peer
-
-```sh
-WARN peermgr peermgr/peermgr.go:131 failed to connect to bootstrap peer: failed to dial : all dials failed
- * [/ip4/147.75.80.17/tcp/1347] failed to negotiate security protocol: connected to wrong peer
-```
-
-- Try running the build steps again and make sure that you have the latest code from GitHub.
-
-```sh
-ERROR hello hello/hello.go:81 other peer has different genesis!
-```
-
-- Try deleting your file system's `~/.lotus` directory. Check that it exists with `ls ~/.lotus`.
-
-```sh
-- repo is already locked
-```
-
-- You already have another lotus daemon running.
-
-## Config: Open files limit
-
-On most systems you can check the open files limit with:
-
-```sh
-ulimit -n
-```
-
-You can also modify this number by using the `ulimit` command. It gives you the ability to control the resources available for the shell or process started by it. If the number is below 10000, you can change it with the following command prior to starting the Lotus daemon:
-
-```sh
-ulimit -n 10000
-```
diff --git a/documentation/en/storing-data-troubleshooting.md b/documentation/en/storing-data-troubleshooting.md
deleted file mode 100644
index c8a0254fa..000000000
--- a/documentation/en/storing-data-troubleshooting.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Storage Troubleshooting
-
-## Error: Routing: not found
-
-```sh
-WARN main lotus/main.go:72 routing: not found
-```
-
-- This miner is offline.
-
-## Error: Failed to start deal
-
-```sh
-WARN main lotus/main.go:72 failed to start deal: computing commP failed: generating CommP: Piece must be at least 127 bytes
-```
-
-- There is a minimum file size of 127 bytes.
-
-## Error: 0kb file response during retrieval
-
-In order to retrieve a file, it must be sealed. Miners can check sealing progress with this command:
-
-```sh
-lotus-miner sectors list
-```
-
-When sealing is complete, `pSet: NO` will become `pSet: YES`. From now on the **Data CID** is [retrievable](https://lotu.sh/en+retrieving-data) from the **Lotus Miner**.
diff --git a/documentation/en/storing-data.md b/documentation/en/storing-data.md
deleted file mode 100644
index 67d2b1533..000000000
--- a/documentation/en/storing-data.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Storing Data
-
-> There are recent bug reports with these instructions. If you happen to encounter any problems, please create a [GitHub issue](https://github.com/filecoin-project/lotus/issues/new) and a maintainer will address the problem as soon as they can.
-
-Here are instructions for how to store data on the **Lotus Testnet**.
-
-## Adding a file locally
-
-Adding a file locally allows you to make miner deals on the **Lotus Testnet**.
-
-```sh
-lotus client import ./your-example-file.txt
-```
-
-Upon success, this command will return a **Data CID**.
-
-## List your local files
-
-The command to see a list of files by `CID`, `name`, `size` in bytes, and `status`:
-
-```sh
-lotus client local
-```
-
-An example of the output:
-
-```sh
-bafkreierupr5ioxn4obwly4i2a5cd2rwxqi6kwmcyyylifxjsmos7hrgpe Development/sample-1.txt 2332 ok
-bafkreieuk7h4zs5alzpdyhlph4lxkefowvwdho3a3pml6j7dam5mipzaii Development/sample-2.txt 30618 ok
-```
-
-## Make a Miner Deal on Lotus Testnet
-
-Get a list of all miners that can store data:
-
-```sh
-lotus state list-miners
-```
-
-Get the requirements of a miner you wish to store data with:
-
-```sh
-lotus client query-ask
-```
-
-Store a **Data CID** with a miner:
-
-```sh
-lotus client deal
-```
-
-Check the status of a deal:
-
-```sh
-lotus client list-deals
-```
-
-- The `duration`, which represents how long the miner will keep your file hosted, is represented in blocks. Each block represents 25 seconds.
-
-Upon success, this command will return a **Deal CID**.
-
-The miner will need to **seal** the file before it can be retrieved. If the **Lotus Miner** is not running on a machine designed for sealing, the process will take a very long time.
diff --git a/documentation/en/storing-ipfs-integration.md b/documentation/en/storing-ipfs-integration.md
deleted file mode 100644
index 041364380..000000000
--- a/documentation/en/storing-ipfs-integration.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# IPFS Integration
-
-Lotus supports making deals with data stored in IPFS, without having to re-import it into lotus.
-
-To enable this integration, you need to have an IPFS daemon running in the background.
-Then, open up `~/.lotus/config.toml` (or if you manually set `LOTUS_PATH`, look under that directory)
-and look for the Client field, and set `UseIpfs` to `true`.
-
-```toml
-[Client]
-UseIpfs = true
-```
-
-After restarting the lotus daemon, you should be able to make deals with data in your IPFS node:
-
-```sh
-$ ipfs add -r SomeData
-QmSomeData
-$ ./lotus client deal QmSomeData t01000 0.0000000001 80000
-```
diff --git a/documentation/en/updating-lotus.md b/documentation/en/updating-lotus.md
deleted file mode 100644
index 862cea136..000000000
--- a/documentation/en/updating-lotus.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Updating Lotus
-
-If you installed Lotus on your machine, you can upgrade to the latest version by doing the following:
-
-```sh
-# get the latest
-git pull origin master
-
-# clean and remake the binaries
-make clean && make build
-
-# instal binaries in correct location
-make install # or sudo make install if necessary
-```
diff --git a/extern/fil-blst b/extern/fil-blst
new file mode 160000
index 000000000..5f93488fc
--- /dev/null
+++ b/extern/fil-blst
@@ -0,0 +1 @@
+Subproject commit 5f93488fc0dbfb450f2355269f18fc67010d59bb
diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi
index 777a6fbf4..f640612a1 160000
--- a/extern/filecoin-ffi
+++ b/extern/filecoin-ffi
@@ -1 +1 @@
-Subproject commit 777a6fbf4446b1112adfd4fa5dd88e0c88974122
+Subproject commit f640612a1a1f7a2dd8b3a49e1531db0aa0f63447
diff --git a/extern/oni b/extern/oni
new file mode 160000
index 000000000..dbee44e4f
--- /dev/null
+++ b/extern/oni
@@ -0,0 +1 @@
+Subproject commit dbee44e4f940a502971f17116ccbba61ceaf2674
diff --git a/extern/sector-storage/faults.go b/extern/sector-storage/faults.go
index 06c823bb8..31a1a3690 100644
--- a/extern/sector-storage/faults.go
+++ b/extern/sector-storage/faults.go
@@ -8,8 +8,8 @@ import (
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
// FaultTracker TODO: Track things more actively
diff --git a/extern/sector-storage/ffiwrapper/basicfs/fs.go b/extern/sector-storage/ffiwrapper/basicfs/fs.go
index ae17273e9..00aa49b98 100644
--- a/extern/sector-storage/ffiwrapper/basicfs/fs.go
+++ b/extern/sector-storage/ffiwrapper/basicfs/fs.go
@@ -6,7 +6,7 @@ import (
"path/filepath"
"sync"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
diff --git a/extern/sector-storage/ffiwrapper/config.go b/extern/sector-storage/ffiwrapper/config.go
index 707fc6746..ca32b1191 100644
--- a/extern/sector-storage/ffiwrapper/config.go
+++ b/extern/sector-storage/ffiwrapper/config.go
@@ -3,7 +3,7 @@ package ffiwrapper
import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
type Config struct {
diff --git a/extern/sector-storage/ffiwrapper/partialfile.go b/extern/sector-storage/ffiwrapper/partialfile.go
index 597e33105..e19930ac1 100644
--- a/extern/sector-storage/ffiwrapper/partialfile.go
+++ b/extern/sector-storage/ffiwrapper/partialfile.go
@@ -10,7 +10,7 @@ import (
"golang.org/x/xerrors"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
diff --git a/extern/sector-storage/ffiwrapper/sealer.go b/extern/sector-storage/ffiwrapper/sealer.go
index c97557a37..c1b558d9a 100644
--- a/extern/sector-storage/ffiwrapper/sealer.go
+++ b/extern/sector-storage/ffiwrapper/sealer.go
@@ -1,7 +1,7 @@
package ffiwrapper
import (
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
logging "github.com/ipfs/go-log/v2"
)
diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go
index d4f796dcb..d75501838 100644
--- a/extern/sector-storage/ffiwrapper/sealer_cgo.go
+++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go
@@ -17,7 +17,7 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
commcid "github.com/filecoin-project/go-fil-commcid"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
@@ -410,7 +410,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se
return false, xerrors.Errorf("closing partial file: %w", err)
}
- return false, nil
+ return true, nil
}
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
@@ -546,34 +546,37 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
defer done()
pf, err := openPartialFile(maxPieceSize, paths.Unsealed)
- if xerrors.Is(err, os.ErrNotExist) {
- return xerrors.Errorf("opening partial file: %w", err)
- }
+ if err == nil {
+ var at uint64
+ for sr.HasNext() {
+ r, err := sr.NextRun()
+ if err != nil {
+ _ = pf.Close()
+ return err
+ }
- var at uint64
- for sr.HasNext() {
- r, err := sr.NextRun()
- if err != nil {
- _ = pf.Close()
+ offset := at
+ at += r.Len
+ if !r.Val {
+ continue
+ }
+
+ err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded())
+ if err != nil {
+ _ = pf.Close()
+ return xerrors.Errorf("free partial file range: %w", err)
+ }
+ }
+
+ if err := pf.Close(); err != nil {
return err
}
-
- offset := at
- at += r.Len
- if !r.Val {
- continue
- }
-
- err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded())
- if err != nil {
- _ = pf.Close()
- return xerrors.Errorf("free partial file range: %w", err)
+ } else {
+ if !xerrors.Is(err, os.ErrNotExist) {
+ return xerrors.Errorf("opening partial file: %w", err)
}
}
- if err := pf.Close(); err != nil {
- return err
- }
}
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage)
diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go
index b484b391f..bb26adb77 100644
--- a/extern/sector-storage/ffiwrapper/sealer_test.go
+++ b/extern/sector-storage/ffiwrapper/sealer_test.go
@@ -15,6 +15,8 @@ import (
"testing"
"time"
+ saproof "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
@@ -22,7 +24,7 @@ import (
"golang.org/x/xerrors"
paramfetch "github.com/filecoin-project/go-paramfetch"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
ffi "github.com/filecoin-project/filecoin-ffi"
@@ -91,7 +93,7 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
t.Fatalf("%+v", err)
}
- ok, err := ProofVerifier.VerifySeal(abi.SealVerifyInfo{
+ ok, err := ProofVerifier.VerifySeal(saproof.SealVerifyInfo{
SectorID: s.id,
SealedCID: s.cids.Sealed,
SealProof: sealProofType,
@@ -166,50 +168,34 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
}
}
-func post(t *testing.T, sealer *Sealer, seals ...seal) time.Time {
- /*randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
+func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
+ randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
- sis := make([]abi.SectorInfo, len(seals))
+ sis := make([]saproof.SectorInfo, len(seals))
for i, s := range seals {
- sis[i] = abi.SectorInfo{
- RegisteredProof: sealProofType,
- SectorNumber: s.id.Number,
- SealedCID: s.cids.Sealed,
+ sis[i] = saproof.SectorInfo{
+ SealProof: sealProofType,
+ SectorNumber: s.id.Number,
+ SealedCID: s.cids.Sealed,
}
}
- candidates, err := sealer.GenerateEPostCandidates(context.TODO(), seals[0].id.Miner, sis, randomness, []abi.SectorNumber{})
- if err != nil {
- t.Fatalf("%+v", err)
- }*/
-
- fmt.Println("skipping post")
-
- genCandidates := time.Now()
-
- /*if len(candidates) != 1 {
- t.Fatal("expected 1 candidate")
+ proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].id.Miner, sis, randomness)
+ if len(skipped) > 0 {
+ require.Error(t, err)
+ require.EqualValues(t, skipped, skp)
+ return
}
- candidatesPrime := make([]abi.PoStCandidate, len(candidates))
- for idx := range candidatesPrime {
- candidatesPrime[idx] = candidates[idx].Candidate
- }
-
- proofs, err := sealer.ComputeElectionPoSt(context.TODO(), seals[0].id.Miner, sis, randomness, candidatesPrime)
if err != nil {
t.Fatalf("%+v", err)
}
- ePoStChallengeCount := ElectionPostChallengeCount(uint64(len(sis)), 0)
-
- ok, err := ProofVerifier.VerifyElectionPost(context.TODO(), abi.PoStVerifyInfo{
- Randomness: randomness,
- Candidates: candidatesPrime,
- Proofs: proofs,
- EligibleSectors: sis,
- Prover: seals[0].id.Miner,
- ChallengeCount: ePoStChallengeCount,
+ ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), saproof.WindowPoStVerifyInfo{
+ Randomness: randomness,
+ Proofs: proofs,
+ ChallengedSectors: sis,
+ Prover: seals[0].id.Miner,
})
if err != nil {
t.Fatalf("%+v", err)
@@ -217,8 +203,21 @@ func post(t *testing.T, sealer *Sealer, seals ...seal) time.Time {
if !ok {
t.Fatal("bad post")
}
- */
- return genCandidates
+}
+
+func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) {
+ paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, stores.FTSealed, 0, stores.PathStorage)
+ require.NoError(t, err)
+ defer done()
+
+ log.Infof("corrupt %s", paths.Sealed)
+ f, err := os.OpenFile(paths.Sealed, os.O_RDWR, 0664)
+ require.NoError(t, err)
+
+ _, err = f.WriteAt(bytes.Repeat([]byte{'d'}, 2048), 0)
+ require.NoError(t, err)
+
+ require.NoError(t, f.Close())
}
func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) {
@@ -246,6 +245,10 @@ func TestDownloadParams(t *testing.T) {
}
func TestSealAndVerify(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
@@ -297,11 +300,11 @@ func TestSealAndVerify(t *testing.T) {
commit := time.Now()
- genCandidiates := post(t, sb, s)
+ post(t, sb, nil, s)
epost := time.Now()
- post(t, sb, s)
+ post(t, sb, nil, s)
if err := sb.FinalizeSector(context.TODO(), si, nil); err != nil {
t.Fatalf("%+v", err)
@@ -311,11 +314,14 @@ func TestSealAndVerify(t *testing.T) {
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("Commit: %s\n", commit.Sub(precommit).String())
- fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(commit).String())
- fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String())
+ fmt.Printf("EPoSt: %s\n", epost.Sub(commit).String())
}
func TestSealPoStNoCommit(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
@@ -368,16 +374,19 @@ func TestSealPoStNoCommit(t *testing.T) {
t.Fatal(err)
}
- genCandidiates := post(t, sb, s)
+ post(t, sb, nil, s)
epost := time.Now()
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
- fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(precommit).String())
- fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String())
+ fmt.Printf("EPoSt: %s\n", epost.Sub(precommit).String())
}
-func TestSealAndVerify2(t *testing.T) {
+func TestSealAndVerify3(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
@@ -417,22 +426,32 @@ func TestSealAndVerify2(t *testing.T) {
si1 := abi.SectorID{Miner: miner, Number: 1}
si2 := abi.SectorID{Miner: miner, Number: 2}
+ si3 := abi.SectorID{Miner: miner, Number: 3}
s1 := seal{id: si1}
s2 := seal{id: si2}
+ s3 := seal{id: si3}
- wg.Add(2)
+ wg.Add(3)
go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck
time.Sleep(100 * time.Millisecond)
go s2.precommit(t, sb, si2, wg.Done) //nolint: staticcheck
+ time.Sleep(100 * time.Millisecond)
+ go s3.precommit(t, sb, si3, wg.Done) //nolint: staticcheck
wg.Wait()
- wg.Add(2)
+ wg.Add(3)
go s1.commit(t, sb, wg.Done) //nolint: staticcheck
go s2.commit(t, sb, wg.Done) //nolint: staticcheck
+ go s3.commit(t, sb, wg.Done) //nolint: staticcheck
wg.Wait()
- post(t, sb, s1, s2)
+ post(t, sb, nil, s1, s2, s3)
+
+ corrupt(t, sb, si1)
+ corrupt(t, sb, si2)
+
+ post(t, sb, []abi.SectorID{si1, si2}, s1, s2, s3)
}
func BenchmarkWriteWithAlignment(b *testing.B) {
diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go
index a634134ee..318dbd2b0 100644
--- a/extern/sector-storage/ffiwrapper/types.go
+++ b/extern/sector-storage/ffiwrapper/types.go
@@ -4,9 +4,11 @@ import (
"context"
"io"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/ipfs/go-cid"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
@@ -33,9 +35,9 @@ type Storage interface {
}
type Verifier interface {
- VerifySeal(abi.SealVerifyInfo) (bool, error)
- VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error)
- VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error)
+ VerifySeal(proof.SealVerifyInfo) (bool, error)
+ VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error)
+ VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error)
GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error)
}
diff --git a/extern/sector-storage/ffiwrapper/unseal_ranges.go b/extern/sector-storage/ffiwrapper/unseal_ranges.go
index 2e5119994..4519fc21e 100644
--- a/extern/sector-storage/ffiwrapper/unseal_ranges.go
+++ b/extern/sector-storage/ffiwrapper/unseal_ranges.go
@@ -5,7 +5,7 @@ import (
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go
index de6fc0849..d6c0ae35f 100644
--- a/extern/sector-storage/ffiwrapper/verifier_cgo.go
+++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go
@@ -5,9 +5,11 @@ package ffiwrapper
import (
"context"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
ffi "github.com/filecoin-project/filecoin-ffi"
@@ -16,7 +18,7 @@ import (
"go.opencensus.io/trace"
)
-func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
+func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
if err != nil {
@@ -30,7 +32,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID,
return ffi.GenerateWinningPoSt(minerID, privsectors, randomness)
}
-func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
+func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
if err != nil {
@@ -38,11 +40,24 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s
}
defer done()
- proof, err := ffi.GenerateWindowPoSt(minerID, privsectors, randomness)
- return proof, skipped, err
+ if len(skipped) > 0 {
+ return nil, skipped, xerrors.Errorf("pubSectorToPriv skipped some sectors")
+ }
+
+ proof, faulty, err := ffi.GenerateWindowPoSt(minerID, privsectors, randomness)
+
+ var faultyIDs []abi.SectorID
+ for _, f := range faulty {
+ faultyIDs = append(faultyIDs, abi.SectorID{
+ Miner: minerID,
+ Number: f,
+ })
+ }
+
+ return proof, faultyIDs, err
}
-func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
+func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
fmap := map[abi.SectorNumber]struct{}{}
for _, fault := range faults {
fmap[fault] = struct{}{}
@@ -95,11 +110,11 @@ type proofVerifier struct{}
var ProofVerifier = proofVerifier{}
-func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) {
+func (proofVerifier) VerifySeal(info proof.SealVerifyInfo) (bool, error) {
return ffi.VerifySeal(info)
}
-func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
+func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt")
defer span.End()
@@ -107,7 +122,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoSt
return ffi.VerifyWinningPoSt(info)
}
-func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
+func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWindowPoSt")
defer span.End()
diff --git a/extern/sector-storage/fr32/fr32.go b/extern/sector-storage/fr32/fr32.go
index b7248db7e..17e6a1142 100644
--- a/extern/sector-storage/fr32/fr32.go
+++ b/extern/sector-storage/fr32/fr32.go
@@ -5,7 +5,7 @@ import (
"runtime"
"sync"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
var MTTresh = uint64(32 << 20)
diff --git a/extern/sector-storage/fr32/fr32_ffi_cmp_test.go b/extern/sector-storage/fr32/fr32_ffi_cmp_test.go
index 2a602424a..3d5679095 100644
--- a/extern/sector-storage/fr32/fr32_ffi_cmp_test.go
+++ b/extern/sector-storage/fr32/fr32_ffi_cmp_test.go
@@ -12,7 +12,7 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require"
)
diff --git a/extern/sector-storage/fr32/fr32_test.go b/extern/sector-storage/fr32/fr32_test.go
index e27e7b1e3..415134272 100644
--- a/extern/sector-storage/fr32/fr32_test.go
+++ b/extern/sector-storage/fr32/fr32_test.go
@@ -9,7 +9,7 @@ import (
"testing"
ffi "github.com/filecoin-project/filecoin-ffi"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
diff --git a/extern/sector-storage/fr32/readers.go b/extern/sector-storage/fr32/readers.go
index 8a1bbe087..20f3e9b31 100644
--- a/extern/sector-storage/fr32/readers.go
+++ b/extern/sector-storage/fr32/readers.go
@@ -6,7 +6,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
type unpadReader struct {
diff --git a/extern/sector-storage/fr32/readers_test.go b/extern/sector-storage/fr32/readers_test.go
index e87a776ef..706af5fee 100644
--- a/extern/sector-storage/fr32/readers_test.go
+++ b/extern/sector-storage/fr32/readers_test.go
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
)
diff --git a/extern/sector-storage/fr32/utils.go b/extern/sector-storage/fr32/utils.go
index 9f4093c40..26c348f4f 100644
--- a/extern/sector-storage/fr32/utils.go
+++ b/extern/sector-storage/fr32/utils.go
@@ -3,7 +3,7 @@ package fr32
import (
"math/bits"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
func subPieces(in abi.UnpaddedPieceSize) []abi.UnpaddedPieceSize {
diff --git a/extern/sector-storage/localworker.go b/extern/sector-storage/localworker.go
index 773ef2d3b..b1193a2e2 100644
--- a/extern/sector-storage/localworker.go
+++ b/extern/sector-storage/localworker.go
@@ -12,7 +12,7 @@ import (
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
storage2 "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
@@ -26,6 +26,7 @@ var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stor
type WorkerConfig struct {
SealProof abi.RegisteredSealProof
TaskTypes []sealtasks.TaskType
+ NoSwap bool
}
type LocalWorker struct {
@@ -33,6 +34,7 @@ type LocalWorker struct {
storage stores.Store
localStore *stores.Local
sindex stores.SectorIndex
+ noSwap bool
acceptTasks map[sealtasks.TaskType]struct{}
}
@@ -50,6 +52,7 @@ func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local,
storage: store,
localStore: local,
sindex: sindex,
+ noSwap: wcfg.NoSwap,
acceptTasks: acceptTasks,
}
@@ -275,11 +278,16 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err)
}
+ memSwap := mem.VirtualTotal
+ if l.noSwap {
+ memSwap = 0
+ }
+
return storiface.WorkerInfo{
Hostname: hostname,
Resources: storiface.WorkerResources{
MemPhysical: mem.Total,
- MemSwap: mem.VirtualTotal,
+ MemSwap: memSwap,
MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process
CPUs: uint64(runtime.NumCPU()),
GPUs: gpus,
diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go
index 300958e39..73a5eb51e 100644
--- a/extern/sector-storage/manager.go
+++ b/extern/sector-storage/manager.go
@@ -12,7 +12,7 @@ import (
"github.com/mitchellh/go-homedir"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
@@ -203,30 +203,26 @@ func schedFetch(sector abi.SectorID, ft stores.SectorFileType, ptype stores.Path
}
}
-func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
+func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) {
+
+ // acquire a lock purely for reading unsealed sectors
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil {
- return xerrors.Errorf("acquiring sector lock: %w", err)
+ if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTNone); err != nil {
+ returnErr = xerrors.Errorf("acquiring read sector lock: %w", err)
+ return
}
// passing 0 spt because we only need it when allowFetch is true
best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
if err != nil {
- return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
+ returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
+ return
}
- var selector WorkerSelector
- if len(best) == 0 { // new
- selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
- } else { // append to existing
- selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
- }
-
- var readOk bool
-
- if len(best) > 0 {
+ foundUnsealed = len(best) > 0
+ if foundUnsealed { // append to existing
// There is unsealed sector, see if we can read from it
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
@@ -236,12 +232,27 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
return err
})
if err != nil {
- return xerrors.Errorf("reading piece from sealed sector: %w", err)
+ returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err)
}
+ } else {
+ selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
+ }
+ return
+}
- if readOk {
- return nil
- }
+func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
+ foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size)
+ if err != nil {
+ return err
+ }
+ if readOk {
+ return nil
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil {
+ return xerrors.Errorf("acquiring unseal sector lock: %w", err)
}
unsealFetch := func(ctx context.Context, worker Worker) error {
@@ -249,7 +260,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
}
- if len(best) > 0 {
+ if foundUnsealed {
if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil {
return xerrors.Errorf("copy unsealed sector data: %w", err)
}
@@ -257,6 +268,9 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
return nil
}
+ if unsealed == cid.Undef {
+ return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size)
+ }
err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
return w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed)
})
@@ -274,7 +288,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
return xerrors.Errorf("reading piece from sealed sector: %w", err)
}
- if readOk {
+ if !readOk {
return xerrors.Errorf("failed to read unsealed piece")
}
diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go
index 13ad9f8bf..ee704cb5a 100644
--- a/extern/sector-storage/manager_test.go
+++ b/extern/sector-storage/manager_test.go
@@ -16,7 +16,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/google/uuid"
logging "github.com/ipfs/go-log"
diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go
index 4afe5f096..64207e66d 100644
--- a/extern/sector-storage/mock/mock.go
+++ b/extern/sector-storage/mock/mock.go
@@ -9,8 +9,10 @@ import (
"math/rand"
"sync"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
commcid "github.com/filecoin-project/go-fil-commcid"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
@@ -64,8 +66,9 @@ const (
)
type sectorState struct {
- pieces []cid.Cid
- failed bool
+ pieces []cid.Cid
+ failed bool
+ corrupted bool
state int
@@ -249,6 +252,18 @@ func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error {
return nil
}
+func (mgr *SectorMgr) MarkCorrupted(sid abi.SectorID, corrupted bool) error {
+ mgr.lk.Lock()
+ defer mgr.lk.Unlock()
+ ss, ok := mgr.sectors[sid]
+ if !ok {
+ return fmt.Errorf("no such sector in storage")
+ }
+
+ ss.corrupted = corrupted
+ return nil
+}
+
func opFinishWait(ctx context.Context) {
val, ok := ctx.Value("opfinish").(chan struct{})
if !ok {
@@ -265,14 +280,16 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) {
}
}
-func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
+func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil
}
-func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
- si := make([]abi.SectorInfo, 0, len(sectorInfo))
+func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
+ si := make([]proof.SectorInfo, 0, len(sectorInfo))
var skipped []abi.SectorID
+ var err error
+
for _, info := range sectorInfo {
sid := abi.SectorID{
Miner: minerID,
@@ -281,17 +298,22 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
_, found := mgr.sectors[sid]
- if found && !mgr.sectors[sid].failed {
+ if found && !mgr.sectors[sid].failed && !mgr.sectors[sid].corrupted {
si = append(si, info)
} else {
skipped = append(skipped, sid)
+ err = xerrors.Errorf("skipped some sectors")
}
}
+ if err != nil {
+ return nil, skipped, err
+ }
+
return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
}
-func generateFakePoStProof(sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) []byte {
+func generateFakePoStProof(sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) []byte {
hasher := sha256.New()
_, _ = hasher.Write(randomness)
for _, info := range sectorInfo {
@@ -304,13 +326,13 @@ func generateFakePoStProof(sectorInfo []abi.SectorInfo, randomness abi.PoStRando
}
-func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []abi.PoStProof {
+func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof.PoStProof {
wp, err := rpt(sectorInfo[0].SealProof)
if err != nil {
panic(err)
}
- return []abi.PoStProof{
+ return []proof.PoStProof{
{
PoStProof: wp,
ProofBytes: generateFakePoStProof(sectorInfo, randomness),
@@ -384,7 +406,7 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, spt abi.RegisteredSealP
return bad, nil
}
-func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
+func (m mockVerif) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
if len(svi.Proof) != 32 { // Real ones are longer, but this should be fine
return false, nil
}
@@ -398,11 +420,11 @@ func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
return true, nil
}
-func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
+func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
return true, nil
}
-func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
+func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 {
return false, xerrors.Errorf("expected 1 proof entry")
}
diff --git a/extern/sector-storage/mock/mock_test.go b/extern/sector-storage/mock/mock_test.go
index c7d43e8b9..47c060f66 100644
--- a/extern/sector-storage/mock/mock_test.go
+++ b/extern/sector-storage/mock/mock_test.go
@@ -5,7 +5,7 @@ import (
"testing"
"time"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
func TestOpFinish(t *testing.T) {
diff --git a/extern/sector-storage/resources.go b/extern/sector-storage/resources.go
index 2fa797267..6b531e82b 100644
--- a/extern/sector-storage/resources.go
+++ b/extern/sector-storage/resources.go
@@ -1,7 +1,7 @@
package sectorstorage
import (
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
)
@@ -10,14 +10,38 @@ type Resources struct {
MinMemory uint64 // What Must be in RAM for decent perf
MaxMemory uint64 // Memory required (swap + ram)
- Threads int // -1 = multithread
- CanGPU bool
+ MaxParallelism int // -1 = multithread
+ CanGPU bool
BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads)
}
-func (r Resources) MultiThread() bool {
- return r.Threads == -1
+/*
+
+ Percent of threads to allocate to parallel tasks
+
+ 12 * 0.92 = 11
+ 16 * 0.92 = 14
+ 24 * 0.92 = 22
+ 32 * 0.92 = 29
+ 64 * 0.92 = 58
+ 128 * 0.92 = 117
+
+*/
+var ParallelNum uint64 = 92
+var ParallelDenom uint64 = 100
+
+// TODO: Take NUMA into account
+func (r Resources) Threads(wcpus uint64) uint64 {
+ if r.MaxParallelism == -1 {
+ n := (wcpus * ParallelNum) / ParallelDenom
+ if n == 0 {
+ return wcpus
+ }
+ return n
+ }
+
+ return uint64(r.MaxParallelism)
}
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{
@@ -26,7 +50,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 30,
MinMemory: 8 << 30,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 1 << 30,
},
@@ -34,7 +58,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 4 << 30,
MinMemory: 4 << 30,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 1 << 30,
},
@@ -42,7 +66,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 1 << 30,
},
@@ -50,7 +74,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 2 << 10,
},
@@ -58,7 +82,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 8 << 20,
},
@@ -68,7 +92,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 128 << 30,
MinMemory: 112 << 30,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 10 << 20,
},
@@ -76,7 +100,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 64 << 30,
MinMemory: 56 << 30,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 10 << 20,
},
@@ -84,7 +108,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30,
MinMemory: 768 << 20,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 1 << 20,
},
@@ -92,7 +116,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 2 << 10,
},
@@ -100,35 +124,35 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
- Threads: 1,
+ MaxParallelism: 1,
BaseMinMemory: 8 << 20,
},
},
sealtasks.TTPreCommit2: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
- MaxMemory: 64 << 30,
- MinMemory: 64 << 30,
+ MaxMemory: 30 << 30,
+ MinMemory: 30 << 30,
- Threads: -1,
- CanGPU: true,
+ MaxParallelism: -1,
+ CanGPU: true,
- BaseMinMemory: 60 << 30,
+ BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
- MaxMemory: 32 << 30,
- MinMemory: 32 << 30,
+ MaxMemory: 15 << 30,
+ MinMemory: 15 << 30,
- Threads: -1,
- CanGPU: true,
+ MaxParallelism: -1,
+ CanGPU: true,
- BaseMinMemory: 30 << 30,
+ BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 3 << 29, // 1.5G
MinMemory: 1 << 30,
- Threads: -1,
+ MaxParallelism: -1,
BaseMinMemory: 1 << 30,
},
@@ -136,7 +160,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
- Threads: -1,
+ MaxParallelism: -1,
BaseMinMemory: 2 << 10,
},
@@ -144,7 +168,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
- Threads: -1,
+ MaxParallelism: -1,
BaseMinMemory: 8 << 20,
},
@@ -154,7 +178,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
- Threads: 0,
+ MaxParallelism: 0,
BaseMinMemory: 1 << 30,
},
@@ -162,7 +186,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
- Threads: 0,
+ MaxParallelism: 0,
BaseMinMemory: 1 << 30,
},
@@ -170,7 +194,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
- Threads: 0,
+ MaxParallelism: 0,
BaseMinMemory: 1 << 30,
},
@@ -178,7 +202,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
- Threads: 0,
+ MaxParallelism: 0,
BaseMinMemory: 2 << 10,
},
@@ -186,7 +210,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
- Threads: 0,
+ MaxParallelism: 0,
BaseMinMemory: 8 << 20,
},
@@ -196,8 +220,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 190 << 30, // TODO: Confirm
MinMemory: 60 << 30,
- Threads: -1,
- CanGPU: true,
+ MaxParallelism: -1,
+ CanGPU: true,
BaseMinMemory: 64 << 30, // params
},
@@ -205,8 +229,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory
MinMemory: 30 << 30,
- Threads: -1,
- CanGPU: true,
+ MaxParallelism: -1,
+ CanGPU: true,
BaseMinMemory: 32 << 30, // params
},
@@ -214,8 +238,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 3 << 29, // 1.5G
MinMemory: 1 << 30,
- Threads: 1, // This is fine
- CanGPU: true,
+ MaxParallelism: 1, // This is fine
+ CanGPU: true,
BaseMinMemory: 10 << 30,
},
@@ -223,8 +247,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
- Threads: 1,
- CanGPU: true,
+ MaxParallelism: 1,
+ CanGPU: true,
BaseMinMemory: 2 << 10,
},
@@ -232,8 +256,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
- Threads: 1,
- CanGPU: true,
+ MaxParallelism: 1,
+ CanGPU: true,
BaseMinMemory: 8 << 20,
},
@@ -243,8 +267,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
- Threads: 0,
- CanGPU: false,
+ MaxParallelism: 0,
+ CanGPU: false,
BaseMinMemory: 0,
},
@@ -252,8 +276,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
- Threads: 0,
- CanGPU: false,
+ MaxParallelism: 0,
+ CanGPU: false,
BaseMinMemory: 0,
},
@@ -261,8 +285,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
- Threads: 0,
- CanGPU: false,
+ MaxParallelism: 0,
+ CanGPU: false,
BaseMinMemory: 0,
},
@@ -270,8 +294,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
- Threads: 0,
- CanGPU: false,
+ MaxParallelism: 0,
+ CanGPU: false,
BaseMinMemory: 0,
},
@@ -279,8 +303,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
- Threads: 0,
- CanGPU: false,
+ MaxParallelism: 0,
+ CanGPU: false,
BaseMinMemory: 0,
},
diff --git a/extern/sector-storage/roprov.go b/extern/sector-storage/roprov.go
index fe58a8445..2b009c63b 100644
--- a/extern/sector-storage/roprov.go
+++ b/extern/sector-storage/roprov.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
)
diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go
index 831a2615f..8b8ef6d46 100644
--- a/extern/sector-storage/sched.go
+++ b/extern/sector-storage/sched.go
@@ -10,7 +10,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
diff --git a/extern/sector-storage/sched_resources.go b/extern/sector-storage/sched_resources.go
index 623472a20..d6dae577b 100644
--- a/extern/sector-storage/sched_resources.go
+++ b/extern/sector-storage/sched_resources.go
@@ -28,12 +28,7 @@ func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResource
func (a *activeResources) add(wr storiface.WorkerResources, r Resources) {
a.gpuUsed = r.CanGPU
- if r.MultiThread() {
- a.cpuUse += wr.CPUs
- } else {
- a.cpuUse += uint64(r.Threads)
- }
-
+ a.cpuUse += r.Threads(wr.CPUs)
a.memUsedMin += r.MinMemory
a.memUsedMax += r.MaxMemory
}
@@ -42,12 +37,7 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
if r.CanGPU {
a.gpuUsed = false
}
- if r.MultiThread() {
- a.cpuUse -= wr.CPUs
- } else {
- a.cpuUse -= uint64(r.Threads)
- }
-
+ a.cpuUse -= r.Threads(wr.CPUs)
a.memUsedMin -= r.MinMemory
a.memUsedMax -= r.MaxMemory
}
@@ -68,16 +58,9 @@ func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, call
return false
}
- if needRes.MultiThread() {
- if a.cpuUse > 0 {
- log.Debugf("sched: not scheduling on worker %d for %s; multicore process needs %d threads, %d in use, target %d", wid, caller, res.CPUs, a.cpuUse, res.CPUs)
- return false
- }
- } else {
- if a.cpuUse+uint64(needRes.Threads) > res.CPUs {
- log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads, a.cpuUse, res.CPUs)
- return false
- }
+ if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs {
+ log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs)
+ return false
}
if len(res.GPUs) > 0 && needRes.CanGPU {
diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go
index 4c39370a0..579a6d913 100644
--- a/extern/sector-storage/sched_test.go
+++ b/extern/sector-storage/sched_test.go
@@ -14,7 +14,7 @@ import (
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
@@ -290,6 +290,9 @@ func TestSched(t *testing.T) {
}
testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) {
+ ParallelNum = 1
+ ParallelDenom = 1
+
return func(t *testing.T) {
index := stores.NewIndex()
diff --git a/extern/sector-storage/selector_alloc.go b/extern/sector-storage/selector_alloc.go
index ca4b99bfc..b891383fb 100644
--- a/extern/sector-storage/selector_alloc.go
+++ b/extern/sector-storage/selector_alloc.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
diff --git a/extern/sector-storage/selector_existing.go b/extern/sector-storage/selector_existing.go
index 1e97db539..fb161f085 100644
--- a/extern/sector-storage/selector_existing.go
+++ b/extern/sector-storage/selector_existing.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
diff --git a/extern/sector-storage/selector_task.go b/extern/sector-storage/selector_task.go
index 5c0d65bb1..807b53103 100644
--- a/extern/sector-storage/selector_task.go
+++ b/extern/sector-storage/selector_task.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
diff --git a/extern/sector-storage/stores/filetype.go b/extern/sector-storage/stores/filetype.go
index 50417d968..90cc1d160 100644
--- a/extern/sector-storage/stores/filetype.go
+++ b/extern/sector-storage/stores/filetype.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
const (
diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go
index 256dc9651..e2bd7e4ee 100644
--- a/extern/sector-storage/stores/index.go
+++ b/extern/sector-storage/stores/index.go
@@ -12,8 +12,8 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
)
var HeartbeatInterval = 10 * time.Second
diff --git a/extern/sector-storage/stores/index_locks.go b/extern/sector-storage/stores/index_locks.go
index 8bf15b950..32c963a41 100644
--- a/extern/sector-storage/stores/index_locks.go
+++ b/extern/sector-storage/stores/index_locks.go
@@ -6,7 +6,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
type sectorLock struct {
diff --git a/extern/sector-storage/stores/index_locks_test.go b/extern/sector-storage/stores/index_locks_test.go
index 5039f8815..1c550d3ca 100644
--- a/extern/sector-storage/stores/index_locks_test.go
+++ b/extern/sector-storage/stores/index_locks_test.go
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
var aSector = abi.SectorID{
diff --git a/extern/sector-storage/stores/interface.go b/extern/sector-storage/stores/interface.go
index d94f28e83..875754fc5 100644
--- a/extern/sector-storage/stores/interface.go
+++ b/extern/sector-storage/stores/interface.go
@@ -3,8 +3,8 @@ package stores
import (
"context"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
type PathType string
diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go
index b308f5d86..50968e7bd 100644
--- a/extern/sector-storage/stores/local.go
+++ b/extern/sector-storage/stores/local.go
@@ -13,7 +13,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
)
@@ -30,10 +30,15 @@ type StoragePath struct {
// LocalStorageMeta [path]/sectorstore.json
type LocalStorageMeta struct {
- ID ID
+ ID ID
+
+ // A high weight means data is more likely to be stored in this path
Weight uint64 // 0 = readonly
- CanSeal bool
+ // Intermediate data for the sealing process will be stored here
+ CanSeal bool
+
+ // Finalized sectors that will be proved over time will be stored here
CanStore bool
}
diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go
index a88e3b947..b9d241b5f 100644
--- a/extern/sector-storage/stores/remote.go
+++ b/extern/sector-storage/stores/remote.go
@@ -18,7 +18,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/hashicorp/go-multierror"
files "github.com/ipfs/go-ipfs-files"
diff --git a/extern/sector-storage/storiface/ffi.go b/extern/sector-storage/storiface/ffi.go
index 6e16018f0..95d400e52 100644
--- a/extern/sector-storage/storiface/ffi.go
+++ b/extern/sector-storage/storiface/ffi.go
@@ -3,7 +3,7 @@ package storiface
import (
"errors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
var ErrSectorNotFound = errors.New("sector not found")
diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go
index 37e4aad1d..25e3175bd 100644
--- a/extern/sector-storage/storiface/worker.go
+++ b/extern/sector-storage/storiface/worker.go
@@ -3,8 +3,8 @@ package storiface
import (
"time"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
type WorkerInfo struct {
diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go
index 858b76f7c..8f27401f0 100644
--- a/extern/sector-storage/testworker_test.go
+++ b/extern/sector-storage/testworker_test.go
@@ -6,7 +6,7 @@ import (
"github.com/ipfs/go-cid"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
diff --git a/extern/sector-storage/work_tracker.go b/extern/sector-storage/work_tracker.go
index fe176a7f7..5dc12802c 100644
--- a/extern/sector-storage/work_tracker.go
+++ b/extern/sector-storage/work_tracker.go
@@ -8,7 +8,7 @@ import (
"github.com/ipfs/go-cid"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
diff --git a/extern/sector-storage/zerocomm/zerocomm.go b/extern/sector-storage/zerocomm/zerocomm.go
index 9b59723a0..9855a5821 100644
--- a/extern/sector-storage/zerocomm/zerocomm.go
+++ b/extern/sector-storage/zerocomm/zerocomm.go
@@ -4,7 +4,7 @@ import (
"math/bits"
commcid "github.com/filecoin-project/go-fil-commcid"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
)
diff --git a/extern/sector-storage/zerocomm/zerocomm_test.go b/extern/sector-storage/zerocomm/zerocomm_test.go
index f5f508796..393f61d64 100644
--- a/extern/sector-storage/zerocomm/zerocomm_test.go
+++ b/extern/sector-storage/zerocomm/zerocomm_test.go
@@ -7,7 +7,7 @@ import (
"testing"
commcid "github.com/filecoin-project/go-fil-commcid"
- abi "github.com/filecoin-project/specs-actors/actors/abi"
+ abi "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go
index b07626187..78765d7b4 100644
--- a/extern/storage-sealing/cbor_gen.go
+++ b/extern/storage-sealing/cbor_gen.go
@@ -6,7 +6,7 @@ import (
"fmt"
"io"
- abi "github.com/filecoin-project/specs-actors/actors/abi"
+ abi "github.com/filecoin-project/go-state-types/abi"
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go
index 3a59ea059..1010d31b2 100644
--- a/extern/storage-sealing/checks.go
+++ b/extern/storage-sealing/checks.go
@@ -4,14 +4,18 @@ import (
"bytes"
"context"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+
+ proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
)
// TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting
@@ -91,8 +95,15 @@ func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, t
return &ErrBadCommD{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)}
}
- if height-(si.TicketEpoch+SealRandomnessLookback) > SealRandomnessLookbackLimit(si.SectorType) {
- return &ErrExpiredTicket{xerrors.Errorf("ticket expired: seal height: %d, head: %d", si.TicketEpoch+SealRandomnessLookback, height)}
+ nv, err := api.StateNetworkVersion(ctx, tok)
+ if err != nil {
+ return &ErrApi{xerrors.Errorf("calling StateNetworkVersion: %w", err)}
+ }
+
+ msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), si.SectorType)
+
+ if height-(si.TicketEpoch+policy.SealRandomnessLookback) > msd {
+ return &ErrExpiredTicket{xerrors.Errorf("ticket expired: seal height: %d, head: %d", si.TicketEpoch+policy.SealRandomnessLookback, height)}
}
pci, err := api.StateSectorPreCommitInfo(ctx, maddr, si.SectorNumber, tok)
@@ -137,8 +148,8 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
return &ErrNoPrecommit{xerrors.Errorf("precommit info not found on-chain")}
}
- if pci.PreCommitEpoch+miner.PreCommitChallengeDelay != si.SeedEpoch {
- return &ErrBadSeed{xerrors.Errorf("seed epoch doesn't match on chain info: %d != %d", pci.PreCommitEpoch+miner.PreCommitChallengeDelay, si.SeedEpoch)}
+ if pci.PreCommitEpoch+policy.GetPreCommitChallengeDelay() != si.SeedEpoch {
+ return &ErrBadSeed{xerrors.Errorf("seed epoch doesn't match on chain info: %d != %d", pci.PreCommitEpoch+policy.GetPreCommitChallengeDelay(), si.SeedEpoch)}
}
buf := new(bytes.Buffer)
@@ -168,7 +179,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
log.Warn("on-chain sealed CID doesn't match!")
}
- ok, err := m.verif.VerifySeal(abi.SealVerifyInfo{
+ ok, err := m.verif.VerifySeal(proof0.SealVerifyInfo{
SectorID: m.minerSector(si.SectorNumber),
SealedCID: pci.Info.SealedCID,
SealProof: spt,
diff --git a/extern/storage-sealing/constants.go b/extern/storage-sealing/constants.go
index 565a38c8e..d6aba1814 100644
--- a/extern/storage-sealing/constants.go
+++ b/extern/storage-sealing/constants.go
@@ -1,17 +1,4 @@
package sealing
-import (
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
-)
-
-// Epochs
-const SealRandomnessLookback = miner.ChainFinality
-
-// Epochs
-func SealRandomnessLookbackLimit(spt abi.RegisteredSealProof) abi.ChainEpoch {
- return miner.MaxSealDuration[spt]
-}
-
// Epochs
const InteractivePoRepConfidence = 6
diff --git a/extern/storage-sealing/events.go b/extern/storage-sealing/events.go
index ba6d2a860..298063147 100644
--- a/extern/storage-sealing/events.go
+++ b/extern/storage-sealing/events.go
@@ -3,7 +3,7 @@ package sealing
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
// `curH`-`ts.Height` = `confidence`
diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go
index 4b8266e8b..0d2e766fd 100644
--- a/extern/storage-sealing/fsm.go
+++ b/extern/storage-sealing/fsm.go
@@ -12,8 +12,8 @@ import (
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-state-types/abi"
statemachine "github.com/filecoin-project/go-statemachine"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface{}, uint64, error) {
@@ -189,6 +189,12 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
state.Log = append(state.Log, l)
}
+ if m.notifee != nil {
+ defer func(before SectorInfo) {
+ m.notifee(before, *state)
+ }(*state) // take safe-ish copy of the before state (except for nested pointers)
+ }
+
p := fsmPlanners[state.State]
if p == nil {
return nil, 0, xerrors.Errorf("planner for state %s not found", state.State)
diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go
index 8649e6c5e..3e597d761 100644
--- a/extern/storage-sealing/fsm_events.go
+++ b/extern/storage-sealing/fsm_events.go
@@ -1,12 +1,12 @@
package sealing
import (
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-storage/storage"
)
diff --git a/extern/storage-sealing/fsm_test.go b/extern/storage-sealing/fsm_test.go
index c67decbeb..51fd2a37b 100644
--- a/extern/storage-sealing/fsm_test.go
+++ b/extern/storage-sealing/fsm_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
@@ -27,6 +27,7 @@ type test struct {
}
func TestHappyPath(t *testing.T) {
+ var notif []struct{ before, after SectorInfo }
ma, _ := address.NewIDAddress(55151)
m := test{
s: &Sealing{
@@ -34,6 +35,9 @@ func TestHappyPath(t *testing.T) {
stats: SectorStats{
bySector: map[abi.SectorID]statSectorState{},
},
+ notifee: func(before, after SectorInfo) {
+ notif = append(notif, struct{ before, after SectorInfo }{before, after})
+ },
},
t: t,
state: &SectorInfo{State: Packing},
@@ -68,6 +72,16 @@ func TestHappyPath(t *testing.T) {
m.planSingle(SectorFinalized{})
require.Equal(m.t, m.state.State, Proving)
+
+ expected := []SectorState{Packing, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
+ for i, n := range notif {
+ if n.before.State != expected[i] {
+ t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
+ }
+ if n.after.State != expected[i+1] {
+ t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State)
+ }
+ }
}
func TestSeedRevert(t *testing.T) {
diff --git a/extern/storage-sealing/garbage.go b/extern/storage-sealing/garbage.go
index 4b95c1b67..caf371806 100644
--- a/extern/storage-sealing/garbage.go
+++ b/extern/storage-sealing/garbage.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) {
diff --git a/extern/storage-sealing/nullreader.go b/extern/storage-sealing/nullreader.go
index ea6dfddb0..5987a4145 100644
--- a/extern/storage-sealing/nullreader.go
+++ b/extern/storage-sealing/nullreader.go
@@ -3,8 +3,8 @@ package sealing
import (
"io"
+ "github.com/filecoin-project/go-state-types/abi"
nr "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
type NullReader struct {
diff --git a/extern/storage-sealing/precommit_policy.go b/extern/storage-sealing/precommit_policy.go
index 1521dfb05..0b774b56f 100644
--- a/extern/storage-sealing/precommit_policy.go
+++ b/extern/storage-sealing/precommit_policy.go
@@ -3,8 +3,11 @@ package sealing
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/go-state-types/abi"
)
type PreCommitPolicy interface {
@@ -13,6 +16,7 @@ type PreCommitPolicy interface {
type Chain interface {
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
+ StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
}
// BasicPreCommitPolicy satisfies PreCommitPolicy. It has two modes:
@@ -50,7 +54,7 @@ func NewBasicPreCommitPolicy(api Chain, duration abi.ChainEpoch, provingBoundary
func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...Piece) (abi.ChainEpoch, error) {
_, epoch, err := p.api.ChainHead(ctx)
if err != nil {
- return 0, nil
+ return 0, err
}
var end *abi.ChainEpoch
diff --git a/extern/storage-sealing/precommit_policy_test.go b/extern/storage-sealing/precommit_policy_test.go
index 9f9267d65..52814167a 100644
--- a/extern/storage-sealing/precommit_policy_test.go
+++ b/extern/storage-sealing/precommit_policy_test.go
@@ -4,12 +4,15 @@ import (
"context"
"testing"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/build"
+
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
commcid "github.com/filecoin-project/go-fil-commcid"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
)
@@ -18,6 +21,10 @@ type fakeChain struct {
h abi.ChainEpoch
}
+func (f *fakeChain) StateNetworkVersion(ctx context.Context, tok sealing.TipSetToken) (network.Version, error) {
+ return build.NewestNetworkVersion, nil
+}
+
func (f *fakeChain) ChainHead(ctx context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) {
return []byte{1, 2, 3}, f.h, nil
}
diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go
index e48679cc7..1ba53661a 100644
--- a/extern/storage-sealing/sealing.go
+++ b/extern/storage-sealing/sealing.go
@@ -8,6 +8,7 @@ import (
"sync"
"time"
+ "github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
@@ -16,18 +17,20 @@ import (
"github.com/filecoin-project/go-address"
padreader "github.com/filecoin-project/go-padreader"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
statemachine "github.com/filecoin-project/go-statemachine"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
)
const SectorStorePrefix = "/sectors"
+var ErrTooManySectorsSealing = xerrors.New("too many sectors sealing")
+
var log = logging.Logger("sectors")
type SectorLocation struct {
@@ -48,10 +51,10 @@ type SealingAPI interface {
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error)
StateMinerSectorSize(context.Context, address.Address, TipSetToken) (abi.SectorSize, error)
StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error)
- StateMinerDeadlines(ctx context.Context, maddr address.Address, tok TipSetToken) ([]*miner.Deadline, error)
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
+ StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
@@ -59,6 +62,8 @@ type SealingAPI interface {
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
}
+type SectorStateNotifee func(before, after SectorInfo)
+
type Sealing struct {
api SealingAPI
feeCfg FeeConfig
@@ -77,6 +82,8 @@ type Sealing struct {
upgradeLk sync.Mutex
toUpgrade map[abi.SectorNumber]struct{}
+ notifee SectorStateNotifee
+
stats SectorStats
getConfig GetSealingConfigFunc
@@ -99,7 +106,7 @@ type UnsealedSectorInfo struct {
pieceSizes []abi.UnpaddedPieceSize
}
-func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc) *Sealing {
+func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee) *Sealing {
s := &Sealing{
api: api,
feeCfg: fc,
@@ -116,6 +123,9 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
},
toUpgrade: map[abi.SectorNumber]struct{}{},
+
+ notifee: notifee,
+
getConfig: gc,
stats: SectorStats{
@@ -280,7 +290,7 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
if cfg.MaxSealingSectorsForDeals > 0 {
if m.stats.curSealing() > cfg.MaxSealingSectorsForDeals {
- return 0, xerrors.Errorf("too many sectors sealing")
+ return 0, ErrTooManySectorsSealing
}
}
diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go
index 4e674603d..10b96e504 100644
--- a/extern/storage-sealing/sector_state.go
+++ b/extern/storage-sealing/sector_state.go
@@ -2,6 +2,38 @@ package sealing
type SectorState string
+var ExistSectorStateList = map[SectorState]struct{}{
+ Empty: {},
+ WaitDeals: {},
+ Packing: {},
+ PreCommit1: {},
+ PreCommit2: {},
+ PreCommitting: {},
+ PreCommitWait: {},
+ WaitSeed: {},
+ Committing: {},
+ SubmitCommit: {},
+ CommitWait: {},
+ FinalizeSector: {},
+ Proving: {},
+ FailedUnrecoverable: {},
+ SealPreCommit1Failed: {},
+ SealPreCommit2Failed: {},
+ PreCommitFailed: {},
+ ComputeProofFailed: {},
+ CommitFailed: {},
+ PackingFailed: {},
+ FinalizeFailed: {},
+ DealsExpired: {},
+ RecoverDealIDs: {},
+ Faulty: {},
+ FaultReported: {},
+ FaultedFinal: {},
+ Removing: {},
+ RemoveFailed: {},
+ Removed: {},
+}
+
const (
UndefinedSectorState SectorState = ""
diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go
index e313fd712..d22830253 100644
--- a/extern/storage-sealing/states_failed.go
+++ b/extern/storage-sealing/states_failed.go
@@ -6,11 +6,12 @@ import (
"golang.org/x/xerrors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-statemachine"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
@@ -37,16 +38,16 @@ func (m *Sealing) checkPreCommitted(ctx statemachine.Context, sector SectorInfo)
tok, _, err := m.api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleSealPrecommit1Failed(%d): temp error: %+v", sector.SectorNumber, err)
- return nil, true
+ return nil, false
}
info, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok)
if err != nil {
log.Errorf("handleSealPrecommit1Failed(%d): temp error: %+v", sector.SectorNumber, err)
- return nil, true
+ return nil, false
}
- return info, false
+ return info, true
}
func (m *Sealing) handleSealPrecommit1Failed(ctx statemachine.Context, sector SectorInfo) error {
@@ -62,7 +63,7 @@ func (m *Sealing) handleSealPrecommit2Failed(ctx statemachine.Context, sector Se
return err
}
- if sector.PreCommit2Fails > 1 {
+ if sector.PreCommit2Fails > 3 {
return ctx.Send(SectorRetrySealPreCommit1{})
}
@@ -107,7 +108,7 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI
}
if pci, is := m.checkPreCommitted(ctx, sector); is && pci != nil {
- if sector.PreCommitMessage != nil {
+ if sector.PreCommitMessage == nil {
log.Warn("sector %d is precommitted on chain, but we don't have precommit message", sector.SectorNumber)
return ctx.Send(SectorPreCommitLanded{TipSet: tok})
}
diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go
index 7693f26ad..96589bcd2 100644
--- a/extern/storage-sealing/states_sealing.go
+++ b/extern/storage-sealing/states_sealing.go
@@ -4,15 +4,18 @@ import (
"bytes"
"context"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-statemachine"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/filecoin-project/specs-storage/storage"
)
@@ -56,7 +59,7 @@ func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.Se
return nil, 0, nil
}
- ticketEpoch := epoch - SealRandomnessLookback
+ ticketEpoch := epoch - policy.SealRandomnessLookback
buf := new(bytes.Buffer)
if err := m.maddr.MarshalCBOR(buf); err != nil {
return nil, 0, err
@@ -180,7 +183,14 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
// Sectors must last _at least_ MinSectorExpiration + MaxSealDuration.
// TODO: The "+10" allows the pre-commit to take 10 blocks to be accepted.
- if minExpiration := height + miner.MaxSealDuration[sector.SectorType] + miner.MinSectorExpiration + 10; expiration < minExpiration {
+ nv, err := m.api.StateNetworkVersion(ctx.Context(), tok)
+ if err != nil {
+ return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)})
+ }
+
+ msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType)
+
+ if minExpiration := height + msd + miner.MinSectorExpiration + 10; expiration < minExpiration {
expiration = minExpiration
}
// TODO: enforce a reasonable _maximum_ sector lifetime?
@@ -253,7 +263,7 @@ func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInf
func (m *Sealing) handleWaitSeed(ctx statemachine.Context, sector SectorInfo) error {
tok, _, err := m.api.ChainHead(ctx.Context())
if err != nil {
- log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
+ log.Errorf("handleWaitSeed: api error, not proceeding: %+v", err)
return nil
}
@@ -265,7 +275,7 @@ func (m *Sealing) handleWaitSeed(ctx statemachine.Context, sector SectorInfo) er
return ctx.Send(SectorChainPreCommitFailed{error: xerrors.Errorf("precommit info not found on chain")})
}
- randHeight := pci.PreCommitEpoch + miner.PreCommitChallengeDelay
+ randHeight := pci.PreCommitEpoch + policy.GetPreCommitChallengeDelay()
err = m.events.ChainAt(func(ectx context.Context, _ TipSetToken, curH abi.ChainEpoch) error {
// in case of null blocks the randomness can land after the tipset we
@@ -356,12 +366,12 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)})
}
+ enc := new(bytes.Buffer)
params := &miner.ProveCommitSectorParams{
SectorNumber: sector.SectorNumber,
Proof: sector.Proof,
}
- enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("could not serialize commit sector parameters: %w", err)})
}
diff --git a/extern/storage-sealing/stats.go b/extern/storage-sealing/stats.go
index 871c962c1..78630c216 100644
--- a/extern/storage-sealing/stats.go
+++ b/extern/storage-sealing/stats.go
@@ -3,7 +3,7 @@ package sealing
import (
"sync"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
type statSectorState int
diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go
index 99cce7714..046271a7f 100644
--- a/extern/storage-sealing/types.go
+++ b/extern/storage-sealing/types.go
@@ -6,10 +6,10 @@ import (
"github.com/ipfs/go-cid"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-storage/storage"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
@@ -82,7 +82,7 @@ type SectorInfo struct {
CommR *cid.Cid
Proof []byte
- PreCommitInfo *miner.SectorPreCommitInfo
+ PreCommitInfo *miner0.SectorPreCommitInfo
PreCommitDeposit big.Int
PreCommitMessage *cid.Cid
PreCommitTipSet TipSetToken
diff --git a/extern/storage-sealing/types_test.go b/extern/storage-sealing/types_test.go
index c11cc66b7..fc56620dc 100644
--- a/extern/storage-sealing/types_test.go
+++ b/extern/storage-sealing/types_test.go
@@ -7,7 +7,7 @@ import (
"gotest.tools/assert"
cborutil "github.com/filecoin-project/go-cbor-util"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
)
diff --git a/extern/storage-sealing/upgrade_queue.go b/extern/storage-sealing/upgrade_queue.go
index 870f60dbb..78a78fc45 100644
--- a/extern/storage-sealing/upgrade_queue.go
+++ b/extern/storage-sealing/upgrade_queue.go
@@ -3,11 +3,12 @@ package sealing
import (
"context"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
)
func (m *Sealing) IsMarkedForUpgrade(id abi.SectorNumber) bool {
@@ -67,6 +68,8 @@ func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreC
params.ReplaceSectorDeadline = loc.Deadline
params.ReplaceSectorPartition = loc.Partition
+ log.Infof("replacing sector %d with %d", *replace, params.SectorNumber)
+
ri, err := m.api.StateSectorGetInfo(ctx, m.maddr, *replace, nil)
if err != nil {
log.Errorf("error calling StateSectorGetInfo for replaced sector: %+v", err)
diff --git a/extern/storage-sealing/utils.go b/extern/storage-sealing/utils.go
index b507907fb..dadef227d 100644
--- a/extern/storage-sealing/utils.go
+++ b/extern/storage-sealing/utils.go
@@ -3,7 +3,7 @@ package sealing
import (
"math/bits"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
)
func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) {
diff --git a/extern/storage-sealing/utils_test.go b/extern/storage-sealing/utils_test.go
index 1d6b6c515..e346b6dc9 100644
--- a/extern/storage-sealing/utils_test.go
+++ b/extern/storage-sealing/utils_test.go
@@ -3,7 +3,7 @@ package sealing
import (
"testing"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/assert"
)
diff --git a/extern/test-vectors b/extern/test-vectors
index 84da0a5ea..a8f968ade 160000
--- a/extern/test-vectors
+++ b/extern/test-vectors
@@ -1 +1 @@
-Subproject commit 84da0a5ea1256a6e66bcbf73542c93e4916d6356
+Subproject commit a8f968adeba1995f161f7be0048188affc425079
diff --git a/gen/main.go b/gen/main.go
index e062f6a2e..d5874af2c 100644
--- a/gen/main.go
+++ b/gen/main.go
@@ -7,7 +7,7 @@ import (
gen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/blocksync"
+ "github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/hello"
"github.com/filecoin-project/lotus/paychmgr"
@@ -26,6 +26,8 @@ func main() {
types.BlockMsg{},
types.ExpTipSet{},
types.BeaconEntry{},
+ types.StateRoot{},
+ types.StateInfo0{},
)
if err != nil {
fmt.Println(err)
@@ -63,15 +65,14 @@ func main() {
os.Exit(1)
}
- err = gen.WriteTupleEncodersToFile("./chain/blocksync/cbor_gen.go", "blocksync",
- blocksync.Request{},
- blocksync.Response{},
- blocksync.CompactedMessages{},
- blocksync.BSTipSet{},
+ err = gen.WriteTupleEncodersToFile("./chain/exchange/cbor_gen.go", "exchange",
+ exchange.Request{},
+ exchange.Response{},
+ exchange.CompactedMessages{},
+ exchange.BSTipSet{},
)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
-
}
diff --git a/genesis/types.go b/genesis/types.go
index 13349def2..79656feac 100644
--- a/genesis/types.go
+++ b/genesis/types.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
diff --git a/go.mod b/go.mod
index 109f8110b..e9687c7b8 100644
--- a/go.mod
+++ b/go.mod
@@ -2,66 +2,71 @@ module github.com/filecoin-project/lotus
go 1.14
-replace github.com/supranational/blst => github.com/supranational/blst v0.1.2-alpha.1
-
require (
contrib.go.opencensus.io/exporter/jaeger v0.1.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0
github.com/BurntSushi/toml v0.3.1
github.com/GeertJohan/go.rice v1.0.0
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee
+ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129
+ github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b
github.com/coreos/go-systemd/v22 v22.0.0
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
- github.com/dgraph-io/badger/v2 v2.0.3
+ github.com/dgraph-io/badger/v2 v2.2007.2
github.com/docker/go-units v0.4.0
- github.com/drand/drand v1.0.3-0.20200714175734-29705eaf09d4
- github.com/drand/kyber v1.1.1
+ github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32
+ github.com/drand/kyber v1.1.2
github.com/dustin/go-humanize v1.0.0
github.com/elastic/go-sysinfo v1.3.0
github.com/fatih/color v1.8.0
- github.com/filecoin-project/chain-validation v0.0.6-0.20200813000554-40c22fe26eef
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d
- github.com/filecoin-project/go-address v0.0.3
- github.com/filecoin-project/go-bitfield v0.2.0
+ github.com/filecoin-project/go-address v0.0.4
+ github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect
+ github.com/filecoin-project/go-bitfield v0.2.1
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
- github.com/filecoin-project/go-data-transfer v0.6.3
+ github.com/filecoin-project/go-data-transfer v0.6.7
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
- github.com/filecoin-project/go-fil-markets v0.5.9
+ github.com/filecoin-project/go-fil-markets v0.7.1
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52
github.com/filecoin-project/go-multistore v0.0.3
- github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6
+ github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
- github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370
+ github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab
+ github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
github.com/filecoin-project/go-statestore v0.1.0
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
- github.com/filecoin-project/specs-actors v0.9.3
- github.com/filecoin-project/specs-storage v0.1.1-0.20200730063404-f7db367e9401
- github.com/filecoin-project/statediff v0.0.1
- github.com/filecoin-project/test-vectors v0.0.0-20200903223506-84da0a5ea125
+ github.com/filecoin-project/specs-actors v0.9.12
+ github.com/filecoin-project/specs-actors/v2 v2.0.1
+ github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796
+ github.com/filecoin-project/test-vectors/schema v0.0.4
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
github.com/go-kit/kit v0.10.0
+ github.com/go-ole/go-ole v1.2.4 // indirect
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.2
+ github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
github.com/hashicorp/go-multierror v1.1.0
github.com/hashicorp/golang-lru v0.5.4
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
+ github.com/ipfs/bbloom v0.0.4
github.com/ipfs/go-bitswap v0.2.20
github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834
github.com/ipfs/go-cid v0.0.7
github.com/ipfs/go-cidutil v0.0.2
- github.com/ipfs/go-datastore v0.4.4
+ github.com/ipfs/go-datastore v0.4.5
github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e
github.com/ipfs/go-ds-leveldb v0.4.2
github.com/ipfs/go-ds-measure v0.1.0
+ github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459
github.com/ipfs/go-filestore v1.0.0
github.com/ipfs/go-fs-lock v0.0.6
- github.com/ipfs/go-graphsync v0.1.2
+ github.com/ipfs/go-graphsync v0.2.1
github.com/ipfs/go-ipfs-blockstore v1.0.1
github.com/ipfs/go-ipfs-chunker v0.0.5
github.com/ipfs/go-ipfs-ds-help v1.0.0
@@ -78,8 +83,8 @@ require (
github.com/ipfs/go-path v0.0.7
github.com/ipfs/go-unixfs v0.2.4
github.com/ipfs/interface-go-ipfs-core v0.2.3
- github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae
- github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef
+ github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4
+ github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f
github.com/kelseyhightower/envconfig v1.4.0
github.com/lib/pq v1.7.0
github.com/libp2p/go-eventbus v0.2.1
@@ -89,10 +94,10 @@ require (
github.com/libp2p/go-libp2p-discovery v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.8.3
github.com/libp2p/go-libp2p-mplex v0.2.4
- github.com/libp2p/go-libp2p-noise v0.1.1
+ github.com/libp2p/go-libp2p-noise v0.1.2
github.com/libp2p/go-libp2p-peerstore v0.2.6
- github.com/libp2p/go-libp2p-pubsub v0.3.6-0.20200901174250-06a12f17b7de
- github.com/libp2p/go-libp2p-quic-transport v0.8.0
+ github.com/libp2p/go-libp2p-pubsub v0.3.6
+ github.com/libp2p/go-libp2p-quic-transport v0.8.2
github.com/libp2p/go-libp2p-record v0.1.3
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
github.com/libp2p/go-libp2p-swarm v0.2.8
@@ -107,17 +112,19 @@ require (
github.com/multiformats/go-multibase v0.0.3
github.com/multiformats/go-multihash v0.0.14
github.com/opentracing/opentracing-go v1.2.0
+ github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a
github.com/raulk/clock v1.1.0
github.com/stretchr/testify v1.6.1
github.com/supranational/blst v0.1.1
github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli/v2 v2.2.0
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
- github.com/whyrusleeping/cbor-gen v0.0.0-20200814224545-656e08ce49ee
+ github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
go.opencensus.io v0.22.4
+ go.uber.org/dig v1.10.0 // indirect
go.uber.org/fx v1.9.0
go.uber.org/multierr v1.5.0
go.uber.org/zap v1.15.0
@@ -125,13 +132,19 @@ require (
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
+ gopkg.in/cheggaaa/pb.v1 v1.0.28
gotest.tools v2.2.0+incompatible
+ launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect
)
+replace github.com/filecoin-project/lotus => ./
+
replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
-replace github.com/dgraph-io/badger/v2 => github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200716180832-3ab515320794
-
replace github.com/filecoin-project/test-vectors => ./extern/test-vectors
+
+replace github.com/supranational/blst => ./extern/fil-blst/blst
+
+replace github.com/filecoin-project/fil-blst => ./extern/fil-blst
diff --git a/go.sum b/go.sum
index ea7b3711e..bab4e929d 100644
--- a/go.sum
+++ b/go.sum
@@ -8,20 +8,14 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
contrib.go.opencensus.io/exporter/jaeger v0.1.0 h1:WNc9HbA38xEQmsI40Tjd/MNU/g8byN2Of7lwIjv0Jdc=
contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA=
contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg=
@@ -118,6 +112,8 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA=
+github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
@@ -131,6 +127,14 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b h1:OKALTB609+19AM7wsO0k8yMwAqjEIppcnYvyIhA+ZlQ=
+github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ=
+github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I=
+github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -158,7 +162,6 @@ github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=
github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis=
github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY=
github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E=
-github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -173,8 +176,10 @@ github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhY
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg=
github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU=
-github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200716180832-3ab515320794 h1:PIPH4SLjYXMMlX/cQqV7nIRatv7556yqUfWY+KBjrtQ=
-github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200716180832-3ab515320794/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
+github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM=
+github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k=
+github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
+github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
@@ -186,12 +191,12 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/drand/bls12-381 v0.3.2 h1:RImU8Wckmx8XQx1tp1q04OV73J9Tj6mmpQLYDP7V1XE=
github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y=
-github.com/drand/drand v1.0.3-0.20200714175734-29705eaf09d4 h1:+Rov3bfUriGWFR/lUVXnpimx+HMr9BXRC4by0BxuQ8k=
-github.com/drand/drand v1.0.3-0.20200714175734-29705eaf09d4/go.mod h1:SnqWL9jksIMK63UKkfmWI6f9PDN8ROoCgg+Z4zWk7hg=
+github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32 h1:sU+51aQRaDxg0KnjQg19KuYRIxDBEUHffBAICSnBys8=
+github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32/go.mod h1:0sQEVg+ngs1jaDPVIiEgY0lbENWJPaUlWxGHEaSmKVM=
github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw=
github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw=
-github.com/drand/kyber v1.1.1 h1:mwCY2XGRB+Qc1MPfrnRuVuXELkPhcq/r9yMoJIcDhHI=
-github.com/drand/kyber v1.1.1/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw=
+github.com/drand/kyber v1.1.2 h1:faemqlaFyLrbBSjZGRzzu5SG/do+uTYpHlnrJIHbAhQ=
+github.com/drand/kyber v1.1.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw=
github.com/drand/kyber-bls12381 v0.1.0 h1:/P4C65VnyEwxzR5ZYYVMNzY1If+aYBrdUU5ukwh7LQw=
github.com/drand/kyber-bls12381 v0.1.0/go.mod h1:N1emiHpm+jj7kMlxEbu3MUyOiooTgNySln564cgD9mk=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -217,79 +222,66 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY=
github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
-github.com/filecoin-project/chain-validation v0.0.6-0.20200813000554-40c22fe26eef h1:MtQRSnJLsQOOlmsd/Ua5KWXimpxcaa715h6FUh/eJPY=
-github.com/filecoin-project/chain-validation v0.0.6-0.20200813000554-40c22fe26eef/go.mod h1:SMj5VK1pYgqC8FXVEtOBRTc+9AIrYu+C+K3tAXi2Rk8=
-github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
-github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8=
github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
-github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
-github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
+github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsgLUl0I0KLjo8w=
+github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
+github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM=
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs=
-github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161 h1:K6t4Hrs+rwUxBz2xg88Bdqeh4k5/rycQFdPseZhRyfE=
-github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g=
-github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw=
-github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
-github.com/filecoin-project/go-bitfield v0.0.3/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
-github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
-github.com/filecoin-project/go-bitfield v0.1.2/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
+github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU=
+github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g=
github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q=
github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
+github.com/filecoin-project/go-bitfield v0.2.1 h1:S6Uuqcspqu81sWJ0He4OAfFLm1tSwPdVjtKTkl5m/xQ=
+github.com/filecoin-project/go-bitfield v0.2.1/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
-github.com/filecoin-project/go-data-transfer v0.6.1/go.mod h1:uRYBRKVBVM12CSusBtVrzDHkVw/3DKZpkxKJVP1Ydas=
-github.com/filecoin-project/go-data-transfer v0.6.3 h1:7TLwm8nuodHYD/uiwJjKc/PGRR+LwqM8jmlZqgWuUfY=
-github.com/filecoin-project/go-data-transfer v0.6.3/go.mod h1:PmBKVXkhh67/tnEdJXQwDHl5mT+7Tbcwe1NPninqhnM=
-github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA=
+github.com/filecoin-project/go-data-transfer v0.6.7 h1:Kacr5qz2YWtd3sensU6aXFtES7joeapVDeXApeUD35I=
+github.com/filecoin-project/go-data-transfer v0.6.7/go.mod h1:C++k1U6+jMQODOaen5OPDo9XQbth9Yq3ie94vNjBJbk=
+github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
+github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
-github.com/filecoin-project/go-fil-markets v0.5.6-0.20200814234959-80b1788108ac/go.mod h1:umicPCaN99ysHTiYOmwhuLxTFbOwcsI+mdw/t96vvM4=
-github.com/filecoin-project/go-fil-markets v0.5.8 h1:uwl0QNUVmmSlUQfxshpj21Dmhh6WKTQNhnb1GMfdp18=
-github.com/filecoin-project/go-fil-markets v0.5.8/go.mod h1:6ZX1vbZbnukbVQ8tCB/MmEizuW/bmRX7SpGAltU3KVg=
-github.com/filecoin-project/go-fil-markets v0.5.9 h1:iIO17UfIjUCiB37TRwgiBwAyfJJwHb8e8uAfu7F37gc=
-github.com/filecoin-project/go-fil-markets v0.5.9/go.mod h1:/cb1IoaiHhwFEWyIAPm9yN6Z+MiPujFZBT8BGH7LwB8=
-github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200817153016-2ea5cbaf5ec0/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
+github.com/filecoin-project/go-fil-markets v0.7.1 h1:e0NlpSnaeGyDUhCOzevjcxkSA54kt9BzlXpLRgduUFI=
+github.com/filecoin-project/go-fil-markets v0.7.1/go.mod h1:5Pt4DXQqUoUrp9QzlSdlYTpItXxwAtqKrxRWQ6hAOqk=
+github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
+github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
+github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
+github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
+github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
+github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI=
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52 h1:FXtCp0ybqdQL9knb3OGDpkNTaBbPxgkqPeWKotUwkH0=
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI=
github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ=
-github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs=
-github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE=
-github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
-github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
+github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg=
+github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
-github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
+github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
+github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
+github.com/filecoin-project/go-state-types v0.0.0-20200905071437-95828685f9df/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
+github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab h1:cEDC5Ei8UuT99hPWhCjA72SM9AuRtnpvdSTIYbnzN8I=
+github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-statemachine v0.0.0-20200714194326-a77c3ae20989/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
-github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370 h1:Jbburj7Ih2iaJ/o5Q9A+EAeTabME6YII7FLi9SKUf5c=
-github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
+github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw=
+github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
-github.com/filecoin-project/lotus v0.4.3-0.20200820203717-d1718369a182/go.mod h1:biFZPQ/YyQGfkHUmHMiaNf2hnD6zm1+OAXPQYQ61Zkg=
-github.com/filecoin-project/lotus v0.5.8-0.20200903221953-ada5e6ae68cf/go.mod h1:wxuzS4ozpCFThia18G+J5P0Jp/DSiq9ezzJF1yvZuP4=
-github.com/filecoin-project/sector-storage v0.0.0-20200712023225-1d67dcfa3c15/go.mod h1:salgVdX7qeXFo/xaiEQE29J4pPkjn71T0kt0n+VDBzo=
-github.com/filecoin-project/sector-storage v0.0.0-20200730050024-3ee28c3b6d9a/go.mod h1:oOawOl9Yk+qeytLzzIryjI8iRbqo+qzS6EEeElP4PWA=
-github.com/filecoin-project/sector-storage v0.0.0-20200810171746-eac70842d8e0 h1:E1fZ27fhKK05bhZItfTwqr1i05vXnEZJznQFEYwEEUU=
-github.com/filecoin-project/sector-storage v0.0.0-20200810171746-eac70842d8e0/go.mod h1:oOawOl9Yk+qeytLzzIryjI8iRbqo+qzS6EEeElP4PWA=
-github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
-github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
-github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
-github.com/filecoin-project/specs-actors v0.7.3-0.20200716231407-60a2ae96d2e6/go.mod h1:JOMUa7EijvpOO4ofD1yeHNmqohkmmnhTvz/IpB6so4c=
-github.com/filecoin-project/specs-actors v0.8.2/go.mod h1:Q3ACV5kBLvqPaYbthc/J1lGMJ5OwogmD9pzdtPRMdCw=
-github.com/filecoin-project/specs-actors v0.8.7-0.20200811203034-272d022c1923/go.mod h1:hukRu6vKQrrS7Nt+fC/ql4PqWLSfmAWNshD/VDtARZU=
-github.com/filecoin-project/specs-actors v0.9.2/go.mod h1:YasnVUOUha0DN5wB+twl+V8LlDKVNknRG00kTJpsfFA=
-github.com/filecoin-project/specs-actors v0.9.3 h1:Fi75G/UQ7R4eiIwnN+S6bBQ9LqKivyJdw62jJzTi6aE=
-github.com/filecoin-project/specs-actors v0.9.3/go.mod h1:YasnVUOUha0DN5wB+twl+V8LlDKVNknRG00kTJpsfFA=
-github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
-github.com/filecoin-project/specs-storage v0.1.1-0.20200730063404-f7db367e9401 h1:jLzN1hwO5WpKPu8ASbW8fs1FUCsOWNvoBXzQhv+8/E8=
-github.com/filecoin-project/specs-storage v0.1.1-0.20200730063404-f7db367e9401/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
-github.com/filecoin-project/statediff v0.0.1 h1:lym6d5wNnzr+5Uc/6RRWx1hgwb+tCKn2mFIK0Eb1Q18=
-github.com/filecoin-project/statediff v0.0.1/go.mod h1:qNWauolLFEzOiA4LNWermBRVNbaZHfPcPevumZeh+hE=
-github.com/filecoin-project/storage-fsm v0.0.0-20200805013058-9d9ea4e6331f/go.mod h1:1CGbd11KkHuyWPT+xwwCol1zl/jnlpiKD2L4fzKxaiI=
+github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
+github.com/filecoin-project/specs-actors v0.9.7/go.mod h1:wM2z+kwqYgXn5Z7scV1YHLyd1Q1cy0R8HfTIWQ0BFGU=
+github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk=
+github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
+github.com/filecoin-project/specs-actors/v2 v2.0.1 h1:bf08x6tqCDfClzrv2q/rmt/A/UbBOy1KgaoogQEcLhU=
+github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
+github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk=
+github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
+github.com/filecoin-project/test-vectors/schema v0.0.4 h1:QTRd0gb/NP4ZOTM7Dib5U3xE1/ToGDKnYLfxkC3t/m8=
+github.com/filecoin-project/test-vectors/schema v0.0.4/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
@@ -302,6 +294,9 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0=
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8=
+github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
@@ -352,7 +347,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -360,7 +354,6 @@ github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -372,6 +365,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws=
+github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -388,9 +383,7 @@ github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8v
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
@@ -409,7 +402,6 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@@ -431,9 +423,10 @@ github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE=
-github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k=
-github.com/hannahhoward/cbor-gen-for v0.0.0-20200723175505-5892b522820a h1:wfqh5oiHXvn3Rk54xy8Cwqh+HnYihGnjMNzdNb3/ld0=
-github.com/hannahhoward/cbor-gen-for v0.0.0-20200723175505-5892b522820a/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8=
+github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ=
+github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE=
+github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q=
+github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8=
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY=
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
@@ -482,7 +475,6 @@ github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3
github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0=
github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs=
github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM=
-github.com/ipfs/go-bitswap v0.2.8/go.mod h1:2Yjog0GMdH8+AsxkE0DI9D2mANaUTxbVVav0pPoZoug=
github.com/ipfs/go-bitswap v0.2.20 h1:Zfi5jDUoqxDThORUznqdeL77DdGniAzlccNJ4vr+Itc=
github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo=
github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
@@ -517,6 +509,8 @@ github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13X
github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8=
github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
+github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg=
+github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
@@ -535,18 +529,15 @@ github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9
github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ=
github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY=
+github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 h1:W3YMLEvOXqdW+sYMiguhWP6txJwQvIQqhvpU8yAMGQs=
+github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g=
github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0=
github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM=
-github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y=
github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0=
github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM=
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
-github.com/ipfs/go-graphsync v0.1.1 h1:bFDAYS0Z48yd8ROPI6f/zIVmJxaDLA6m8cVuJPKC5fE=
-github.com/ipfs/go-graphsync v0.1.1/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
-github.com/ipfs/go-graphsync v0.1.2 h1:25Ll9kIXCE+DY0dicvfS3KMw+U5sd01b/FJbA7KAbhg=
-github.com/ipfs/go-graphsync v0.1.2/go.mod h1:sLXVXm1OxtE2XYPw62MuXCdAuNwkAdsbnfrmos5odbA=
-github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE=
-github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242/go.mod h1:kq3Pi+UP3oHhAdKexE+kHHYRKMoFNuGero0R7q3hWGg=
+github.com/ipfs/go-graphsync v0.2.1 h1:MdehhqBSuTI2LARfKLkpYnt0mUrqHs/mtuDnESXHBfU=
+github.com/ipfs/go-graphsync v0.2.1/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10=
github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk=
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
@@ -578,7 +569,6 @@ github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAz
github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
-github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg=
github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA=
@@ -652,16 +642,14 @@ github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo=
github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg=
github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA=
github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs=
-github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae h1:OV9dxl8iPMCOD8Vi/hvFwRh3JWPXqmkYSVxWr9JnEzM=
-github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae/go.mod h1:2mvxpu4dKRnuH3mj5u6KW/tmRSCcXvy/KYiJ4nC6h4c=
-github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA=
+github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 h1:6phjU3kXvCEWOZpu+Ob0w6DzgPFZmDLgLPxJhD8RxEY=
+github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw=
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
-github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef h1:/yPelt/0CuzZsmRkYzBBnJ499JnAOGaIaAXHujx96ic=
-github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
-github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE=
+github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA=
+github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=
github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs=
-github.com/ipld/go-ipld-prime-proto v0.0.0-20200828231332-ae0aea07222b h1:ZtlW6pubN17TDaStlxgrwEXXwwUfJaXu9RobwczXato=
-github.com/ipld/go-ipld-prime-proto v0.0.0-20200828231332-ae0aea07222b/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs=
+github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es=
+github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
@@ -769,7 +757,6 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD
github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM=
github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ=
github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8=
-github.com/libp2p/go-libp2p v0.10.3/go.mod h1:0ER6iPSaPeQjryNgOnm9bLNpMJCYmuw54xJXsVR17eE=
github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE=
github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE=
github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4=
@@ -831,7 +818,6 @@ github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskW
github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE=
github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I=
-github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=
github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI=
github.com/libp2p/go-libp2p-daemon v0.2.2/go.mod h1:kyrpsLB2JeNYR2rvXSVWyY0iZuRIMhqzWR3im9BV6NQ=
github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI=
@@ -857,7 +843,6 @@ github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFP
github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg=
github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8=
github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90=
-github.com/libp2p/go-libp2p-metrics v0.0.1 h1:yumdPC/P2VzINdmcKZd0pciSUCpou+s0lwYCjBbzQZU=
github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08=
github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I=
github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo=
@@ -878,9 +863,10 @@ github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLK
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
github.com/libp2p/go-libp2p-noise v0.1.1 h1:vqYQWvnIcHpIoWJKC7Al4D6Hgj0H012TuXRhPwSMGpQ=
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
+github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk=
+github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE=
github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo=
github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es=
-github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY=
github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY=
github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20=
github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20=
@@ -897,18 +883,15 @@ github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuD
github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k=
github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA=
github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s=
-github.com/libp2p/go-libp2p-protocol v0.1.0 h1:HdqhEyhg0ToCaxgMhnOmUO8snQtt/kQlcjVk3UoJU3c=
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
-github.com/libp2p/go-libp2p-pubsub v0.3.5-0.20200820194335-bfc96c2cd081/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI=
-github.com/libp2p/go-libp2p-pubsub v0.3.6-0.20200901174250-06a12f17b7de h1:Dl0B0x6u+OSKXAa1DeB6xHFsUOBAhjrXJ10zykVSN6Q=
-github.com/libp2p/go-libp2p-pubsub v0.3.6-0.20200901174250-06a12f17b7de/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI=
+github.com/libp2p/go-libp2p-pubsub v0.3.6 h1:9oO8W7qIWCYQYyz5z8nUsPcb3rrFehBlkbqvbSVjBxY=
+github.com/libp2p/go-libp2p-pubsub v0.3.6/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI=
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M=
-github.com/libp2p/go-libp2p-quic-transport v0.7.1/go.mod h1:TD31to4E5exogR/GWHClXCfkktigjAl5rXSt7HoxNvY=
-github.com/libp2p/go-libp2p-quic-transport v0.8.0 h1:mHA94K2+TD0e9XtjWx/P5jGGZn0GdQ4OFYwNllagv4E=
-github.com/libp2p/go-libp2p-quic-transport v0.8.0/go.mod h1:F2FG/6Bzz0U6essUVxDzE0s9CrY4XGLbl7QEmDNvU7A=
+github.com/libp2p/go-libp2p-quic-transport v0.8.2 h1:FDaXBCBJ1e5hY6gnWEJ4NbYyLk8eezr4J6AY3q3KqwM=
+github.com/libp2p/go-libp2p-quic-transport v0.8.2/go.mod h1:L+e0q15ZNaYm3seHgbsXjWP8kXLEqz+elLWKk9l8DhM=
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg=
@@ -1041,9 +1024,8 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
-github.com/lucas-clemente/quic-go v0.17.3/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
-github.com/lucas-clemente/quic-go v0.18.0 h1:JhQDdqxdwdmGdKsKgXi1+coHRoGhvU6z0rNzOJqZ/4o=
-github.com/lucas-clemente/quic-go v0.18.0/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg=
+github.com/lucas-clemente/quic-go v0.18.1 h1:DMR7guC0NtVS8zNZR3IO7NARZvZygkSC56GGtC6cyys=
+github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg=
github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
@@ -1066,10 +1048,8 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@@ -1088,7 +1068,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
@@ -1371,9 +1350,8 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -1381,8 +1359,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/supranational/blst v0.1.2-alpha.1 h1:v0UqVlvbRNZIaSeMPr+T01kvTUq1h0EZuZ6gnDR1Mlg=
-github.com/supranational/blst v0.1.2-alpha.1/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
@@ -1421,20 +1397,19 @@ github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMU
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM=
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY=
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM=
-github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
-github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg=
github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
+github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
-github.com/whyrusleeping/cbor-gen v0.0.0-20200814224545-656e08ce49ee h1:U7zWWvvAjT76EiuWPSOiZlQDnaQYPxPoxugTtTAcJK0=
-github.com/whyrusleeping/cbor-gen v0.0.0-20200814224545-656e08ce49ee/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
+github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU=
+github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g=
@@ -1457,12 +1432,7 @@ github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7c
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
-github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345 h1:IJVAwIctqDFOrO0C2qzksXmANviyHJzrklU27e1ltzE=
-github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345/go.mod h1:D7hA8H5pyQx7Y5Em7IWx1R4vNJzfon3gpG9nxjkITjQ=
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829 h1:wb7xrDzfkLgPHsSEBm+VSx6aDdi64VtV0xvP0E6j8bk=
github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I=
@@ -1472,7 +1442,6 @@ github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8=
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs=
go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw=
@@ -1503,8 +1472,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/dig v1.8.0 h1:1rR6hnL/bu1EVcjnRDN5kx1vbIjEJDTGhSQ2B3ddpcI=
-go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw=
+go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY=
+go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw=
go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY=
go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw=
go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo=
@@ -1522,8 +1491,6 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
-go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
-go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU=
go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
@@ -1548,13 +1515,10 @@ golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200427165652-729f1e841bcc/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
@@ -1566,10 +1530,9 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc=
+golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1581,9 +1544,8 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -1627,11 +1589,8 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -1689,7 +1648,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1705,20 +1663,14 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200427175716-29b57079015a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y=
@@ -1762,23 +1714,14 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d h1:F3OmlXCzYtG9YE6tXDnUOlJBzVzHF8EcmZ1yTJlcgIk=
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc=
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
@@ -1799,11 +1742,8 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.25.0 h1:LodzhlzZEUfhXzNUMIfVlf9Gr6Ua5MMtoFWh7+f47qA=
-google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1829,12 +1769,7 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 h1:i+Aiej6cta/Frzp13/swvwz5O00kYcSe0A/C5Wd7zX8=
@@ -1855,10 +1790,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200617041141-9a465503579e/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1907,9 +1840,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=
diff --git a/journal/fs.go b/journal/fs.go
new file mode 100644
index 000000000..57774d3aa
--- /dev/null
+++ b/journal/fs.go
@@ -0,0 +1,136 @@
+package journal
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+const RFC3339nocolon = "2006-01-02T150405Z0700"
+
+// fsJournal is a basic journal backed by files on a filesystem.
+type fsJournal struct {
+ EventTypeRegistry
+
+ dir string
+ sizeLimit int64
+
+ fi *os.File
+ fSize int64
+
+ incoming chan *Event
+
+ closing chan struct{}
+ closed chan struct{}
+}
+
+// OpenFSJournal constructs a rolling filesystem journal, with a default
+// per-file size limit of 1GiB.
+func OpenFSJournal(lr repo.LockedRepo, disabled DisabledEvents) (Journal, error) {
+ dir := filepath.Join(lr.Path(), "journal")
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return nil, fmt.Errorf("failed to mk directory %s for file journal: %w", dir, err)
+ }
+
+ f := &fsJournal{
+ EventTypeRegistry: NewEventTypeRegistry(disabled),
+ dir: dir,
+ sizeLimit: 1 << 30,
+ incoming: make(chan *Event, 32),
+ closing: make(chan struct{}),
+ closed: make(chan struct{}),
+ }
+
+ if err := f.rollJournalFile(); err != nil {
+ return nil, err
+ }
+
+ go f.runLoop()
+
+ return f, nil
+}
+
+func (f *fsJournal) RecordEvent(evtType EventType, supplier func() interface{}) {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Warnf("recovered from panic while recording journal event; type=%s, err=%v", evtType, r)
+ }
+ }()
+
+ if !evtType.Enabled() {
+ return
+ }
+
+ je := &Event{
+ EventType: evtType,
+ Timestamp: build.Clock.Now(),
+ Data: supplier(),
+ }
+ select {
+ case f.incoming <- je:
+ case <-f.closing:
+ log.Warnw("journal closed but tried to log event", "event", je)
+ }
+}
+
+func (f *fsJournal) Close() error {
+ close(f.closing)
+ <-f.closed
+ return nil
+}
+
+func (f *fsJournal) putEvent(evt *Event) error {
+ b, err := json.Marshal(evt)
+ if err != nil {
+ return err
+ }
+ n, err := f.fi.Write(append(b, '\n'))
+ if err != nil {
+ return err
+ }
+
+ f.fSize += int64(n)
+
+ if f.fSize >= f.sizeLimit {
+ _ = f.rollJournalFile()
+ }
+
+ return nil
+}
+
+func (f *fsJournal) rollJournalFile() error {
+ if f.fi != nil {
+ _ = f.fi.Close()
+ }
+
+ nfi, err := os.Create(filepath.Join(f.dir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(RFC3339nocolon))))
+ if err != nil {
+ return xerrors.Errorf("failed to open journal file: %w", err)
+ }
+
+ f.fi = nfi
+ f.fSize = 0
+ return nil
+}
+
+func (f *fsJournal) runLoop() {
+ defer close(f.closed)
+
+ for {
+ select {
+ case je := <-f.incoming:
+ if err := f.putEvent(je); err != nil {
+ log.Errorw("failed to write out journal event", "event", je, "err", err)
+ }
+ case <-f.closing:
+ _ = f.fi.Close()
+ return
+ }
+ }
+}
diff --git a/journal/global.go b/journal/global.go
new file mode 100644
index 000000000..b4d0e0a1b
--- /dev/null
+++ b/journal/global.go
@@ -0,0 +1,9 @@
+package journal
+
+var (
+ // J is a globally accessible Journal. It starts being NilJournal, and early
+ // during the Lotus initialization routine, it is reset to whichever Journal
+ // is configured (by default, the filesystem journal). Components can safely
+ // record in the journal by calling: journal.J.RecordEvent(...).
+ J Journal = NilJournal() // nolint
+)
diff --git a/journal/journal.go b/journal/journal.go
deleted file mode 100644
index f043d37d6..000000000
--- a/journal/journal.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package journal
-
-import (
- "encoding/json"
- "fmt"
- "os"
- "path/filepath"
- "time"
-
- logging "github.com/ipfs/go-log"
- "golang.org/x/xerrors"
-
- "github.com/filecoin-project/lotus/build"
-)
-
-func InitializeSystemJournal(dir string) error {
- if err := os.MkdirAll(dir, 0755); err != nil {
- return err
- }
- j, err := OpenFSJournal(dir)
- if err != nil {
- return err
- }
- currentJournal = j
- return nil
-}
-
-func Add(sys string, val interface{}) {
- if currentJournal == nil {
- log.Warn("no journal configured")
- return
- }
- currentJournal.AddEntry(sys, val)
-}
-
-var log = logging.Logger("journal")
-
-var currentJournal Journal
-
-type Journal interface {
- AddEntry(system string, obj interface{})
- Close() error
-}
-
-// fsJournal is a basic journal backed by files on a filesystem
-type fsJournal struct {
- fi *os.File
- fSize int64
-
- journalDir string
-
- incoming chan *JournalEntry
- journalSizeLimit int64
-
- closing chan struct{}
-}
-
-func OpenFSJournal(dir string) (Journal, error) {
- fsj := &fsJournal{
- journalDir: dir,
- incoming: make(chan *JournalEntry, 32),
- journalSizeLimit: 1 << 30,
- closing: make(chan struct{}),
- }
-
- if err := fsj.rollJournalFile(); err != nil {
- return nil, err
- }
-
- go fsj.runLoop()
-
- return fsj, nil
-}
-
-type JournalEntry struct {
- System string
- Timestamp time.Time
- Val interface{}
-}
-
-func (fsj *fsJournal) putEntry(je *JournalEntry) error {
- b, err := json.Marshal(je)
- if err != nil {
- return err
- }
- n, err := fsj.fi.Write(append(b, '\n'))
- if err != nil {
- return err
- }
-
- fsj.fSize += int64(n)
-
- if fsj.fSize >= fsj.journalSizeLimit {
- return fsj.rollJournalFile()
- }
-
- return nil
-}
-
-const RFC3339nocolon = "2006-01-02T150405Z0700"
-
-func (fsj *fsJournal) rollJournalFile() error {
- if fsj.fi != nil {
- err := fsj.fi.Close()
- if err != nil {
- return err
- }
- }
-
- nfi, err := os.Create(filepath.Join(fsj.journalDir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(RFC3339nocolon))))
- if err != nil {
- return xerrors.Errorf("failed to open journal file: %w", err)
- }
-
- fsj.fi = nfi
- fsj.fSize = 0
- return nil
-}
-
-func (fsj *fsJournal) runLoop() {
- for {
- select {
- case je := <-fsj.incoming:
- if err := fsj.putEntry(je); err != nil {
- log.Errorw("failed to write out journal entry", "entry", je, "err", err)
- }
- case <-fsj.closing:
- if err := fsj.fi.Close(); err != nil {
- log.Errorw("failed to close journal", "err", err)
- }
- return
- }
- }
-}
-
-func (fsj *fsJournal) AddEntry(system string, obj interface{}) {
- je := &JournalEntry{
- System: system,
- Timestamp: build.Clock.Now(),
- Val: obj,
- }
- select {
- case fsj.incoming <- je:
- case <-fsj.closing:
- log.Warnw("journal closed but tried to log event", "entry", je)
- }
-}
-
-func (fsj *fsJournal) Close() error {
- close(fsj.closing)
- return nil
-}
diff --git a/journal/nil.go b/journal/nil.go
new file mode 100644
index 000000000..fa72fa373
--- /dev/null
+++ b/journal/nil.go
@@ -0,0 +1,16 @@
+package journal
+
+type nilJournal struct{}
+
+// nilj is a singleton nil journal.
+var nilj Journal = &nilJournal{}
+
+func NilJournal() Journal {
+ return nilj
+}
+
+func (n *nilJournal) RegisterEventType(_, _ string) EventType { return EventType{} }
+
+func (n *nilJournal) RecordEvent(_ EventType, _ func() interface{}) {}
+
+func (n *nilJournal) Close() error { return nil }
diff --git a/journal/registry.go b/journal/registry.go
new file mode 100644
index 000000000..6ab5b5fb1
--- /dev/null
+++ b/journal/registry.go
@@ -0,0 +1,57 @@
+package journal
+
+import "sync"
+
+// EventTypeRegistry is a component that constructs tracked EventType tokens,
+// for usage with a Journal.
+type EventTypeRegistry interface {
+
+ // RegisterEventType introduces a new event type to a journal, and
+ // returns an EventType token that components can later use to check whether
+ // journalling for that type is enabled/suppressed, and to tag journal
+ // entries appropriately.
+ RegisterEventType(system, event string) EventType
+}
+
+// eventTypeRegistry is an embeddable mixin that takes care of tracking disabled
+// event types, and returning initialized/safe EventTypes when requested.
+type eventTypeRegistry struct {
+ sync.Mutex
+
+ m map[string]EventType
+}
+
+var _ EventTypeRegistry = (*eventTypeRegistry)(nil)
+
+func NewEventTypeRegistry(disabled DisabledEvents) EventTypeRegistry {
+ ret := &eventTypeRegistry{
+ m: make(map[string]EventType, len(disabled)+32), // + extra capacity.
+ }
+
+ for _, et := range disabled {
+ et.enabled, et.safe = false, true
+ ret.m[et.System+":"+et.Event] = et
+ }
+
+ return ret
+}
+
+func (d *eventTypeRegistry) RegisterEventType(system, event string) EventType {
+ d.Lock()
+ defer d.Unlock()
+
+ key := system + ":" + event
+ if et, ok := d.m[key]; ok {
+ return et
+ }
+
+ et := EventType{
+ System: system,
+ Event: event,
+ enabled: true,
+ safe: true,
+ }
+
+ d.m[key] = et
+ return et
+}
diff --git a/journal/registry_test.go b/journal/registry_test.go
new file mode 100644
index 000000000..bce3d3f17
--- /dev/null
+++ b/journal/registry_test.go
@@ -0,0 +1,49 @@
+package journal
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestDisabledEvents(t *testing.T) {
+ req := require.New(t)
+
+ test := func(dis DisabledEvents) func(*testing.T) {
+ return func(t *testing.T) {
+ registry := NewEventTypeRegistry(dis)
+
+ reg1 := registry.RegisterEventType("system1", "disabled1")
+ reg2 := registry.RegisterEventType("system1", "disabled2")
+
+ req.False(reg1.Enabled())
+ req.False(reg2.Enabled())
+ req.True(reg1.safe)
+ req.True(reg2.safe)
+
+ reg3 := registry.RegisterEventType("system3", "enabled3")
+ req.True(reg3.Enabled())
+ req.True(reg3.safe)
+ }
+ }
+
+ t.Run("direct", test(DisabledEvents{
+ EventType{System: "system1", Event: "disabled1"},
+ EventType{System: "system1", Event: "disabled2"},
+ }))
+
+ dis, err := ParseDisabledEvents("system1:disabled1,system1:disabled2")
+ req.NoError(err)
+
+ t.Run("parsed", test(dis))
+
+ dis, err = ParseDisabledEvents(" system1:disabled1 , system1:disabled2 ")
+ req.NoError(err)
+
+ t.Run("parsed_spaces", test(dis))
+}
+
+func TestParseDisableEvents(t *testing.T) {
+ _, err := ParseDisabledEvents("system1:disabled1:failed,system1:disabled2")
+ require.Error(t, err)
+}
diff --git a/journal/types.go b/journal/types.go
new file mode 100644
index 000000000..5b51ed4c8
--- /dev/null
+++ b/journal/types.go
@@ -0,0 +1,102 @@
+package journal
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ logging "github.com/ipfs/go-log"
+)
+
+var log = logging.Logger("journal")
+
+var (
+ // DefaultDisabledEvents lists the journal events disabled by
+ // default, usually because they are considered noisy.
+ DefaultDisabledEvents = DisabledEvents{
+ EventType{System: "mpool", Event: "add"},
+ EventType{System: "mpool", Event: "remove"},
+ }
+)
+
+// DisabledEvents is the set of event types whose journaling is suppressed.
+type DisabledEvents []EventType
+
+// ParseDisabledEvents parses a string of the form: "system1:event1,system1:event2[,...]"
+// into a DisabledEvents object, returning an error if the string failed to parse.
+//
+// It sanitizes strings via strings.TrimSpace.
+func ParseDisabledEvents(s string) (DisabledEvents, error) {
+ s = strings.TrimSpace(s) // sanitize
+ evts := strings.Split(s, ",")
+ ret := make(DisabledEvents, 0, len(evts))
+ for _, evt := range evts {
+ evt = strings.TrimSpace(evt) // sanitize
+ s := strings.Split(evt, ":")
+ if len(s) != 2 {
+ return nil, fmt.Errorf("invalid event type: %s", s)
+ }
+ ret = append(ret, EventType{System: s[0], Event: s[1]})
+ }
+ return ret, nil
+}
+
+// EventType represents the signature of an event.
+type EventType struct {
+ System string
+ Event string
+
+ // enabled stores whether this event type is enabled.
+ enabled bool
+
+ // safe is a sentinel marker that's set to true if this EventType was
+ // constructed correctly (via Journal#RegisterEventType).
+ safe bool
+}
+
+func (et EventType) String() string {
+ return et.System + ":" + et.Event
+}
+
+// Enabled returns whether this event type is enabled in the journaling
+// subsystem. Users are advised to check this before actually attempting to
+// add a journal entry, as it helps bypass object construction for events that
+// would be discarded anyway.
+//
+// All event types are enabled by default, and specific event types can only
+// be disabled at Journal construction time.
+func (et EventType) Enabled() bool {
+ return et.safe && et.enabled
+}
+
+// Journal represents an audit trail of system actions.
+//
+// Every entry is tagged with a timestamp, a system name, and an event name.
+// The supplied data can be any type, as long as it is JSON serializable,
+// including structs, map[string]interface{}, or primitive types.
+//
+// For cleanliness and type safety, we recommend to use typed events. See the
+// *Evt struct types in this package for more info.
+type Journal interface {
+ EventTypeRegistry
+
+ // RecordEvent records this event to the journal, if and only if the
+ // EventType is enabled. If so, it calls the supplier function to obtain
+ // the payload to record.
+ //
+ // Implementations MUST recover from panics raised by the supplier function.
+ RecordEvent(evtType EventType, supplier func() interface{})
+
+ // Close closes this journal for further writing.
+ Close() error
+}
+
+// Event represents a journal entry.
+//
+// See godocs on Journal for more information.
+type Event struct {
+ EventType
+
+ Timestamp time.Time
+ Data interface{}
+}
diff --git a/lib/backupds/datastore.go b/lib/backupds/datastore.go
new file mode 100644
index 000000000..1555577f3
--- /dev/null
+++ b/lib/backupds/datastore.go
@@ -0,0 +1,189 @@
+package backupds
+
+import (
+ "crypto/sha256"
+ "io"
+ "sync"
+
+ logging "github.com/ipfs/go-log/v2"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+)
+
+var log = logging.Logger("backupds")
+
+type Datastore struct {
+ child datastore.Batching
+
+ backupLk sync.RWMutex
+}
+
+func Wrap(child datastore.Batching) *Datastore {
+ return &Datastore{
+ child: child,
+ }
+}
+
+// Writes a datastore dump into the provided writer as
+// [array(*) of [key, value] tuples, checksum]
+func (d *Datastore) Backup(out io.Writer) error {
+ scratch := make([]byte, 9)
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, out, cbg.MajArray, 2); err != nil {
+ return xerrors.Errorf("writing tuple header: %w", err)
+ }
+
+ hasher := sha256.New()
+ hout := io.MultiWriter(hasher, out)
+
+ // write KVs
+ {
+ // write indefinite length array header
+ if _, err := hout.Write([]byte{0x9f}); err != nil {
+ return xerrors.Errorf("writing header: %w", err)
+ }
+
+ d.backupLk.Lock()
+ defer d.backupLk.Unlock()
+
+ log.Info("Starting datastore backup")
+ defer log.Info("Datastore backup done")
+
+ qr, err := d.child.Query(query.Query{})
+ if err != nil {
+ return xerrors.Errorf("query: %w", err)
+ }
+ defer func() {
+ if err := qr.Close(); err != nil {
+ log.Errorf("query close error: %+v", err)
+ return
+ }
+ }()
+
+ for result := range qr.Next() {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, hout, cbg.MajArray, 2); err != nil {
+ return xerrors.Errorf("writing tuple header: %w", err)
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, hout, cbg.MajByteString, uint64(len([]byte(result.Key)))); err != nil {
+ return xerrors.Errorf("writing key header: %w", err)
+ }
+
+ if _, err := hout.Write([]byte(result.Key)[:]); err != nil {
+ return xerrors.Errorf("writing key: %w", err)
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, hout, cbg.MajByteString, uint64(len(result.Value))); err != nil {
+ return xerrors.Errorf("writing value header: %w", err)
+ }
+
+ if _, err := hout.Write(result.Value[:]); err != nil {
+ return xerrors.Errorf("writing value: %w", err)
+ }
+ }
+
+ // array break
+ if _, err := hout.Write([]byte{0xff}); err != nil {
+ return xerrors.Errorf("writing array 'break': %w", err)
+ }
+ }
+
+ // Write the checksum
+ {
+ sum := hasher.Sum(nil)
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, hout, cbg.MajByteString, uint64(len(sum))); err != nil {
+ return xerrors.Errorf("writing checksum header: %w", err)
+ }
+
+ if _, err := hout.Write(sum[:]); err != nil {
+ return xerrors.Errorf("writing checksum: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// proxy
+
+func (d *Datastore) Get(key datastore.Key) (value []byte, err error) {
+ return d.child.Get(key)
+}
+
+func (d *Datastore) Has(key datastore.Key) (exists bool, err error) {
+ return d.child.Has(key)
+}
+
+func (d *Datastore) GetSize(key datastore.Key) (size int, err error) {
+ return d.child.GetSize(key)
+}
+
+func (d *Datastore) Query(q query.Query) (query.Results, error) {
+ return d.child.Query(q)
+}
+
+func (d *Datastore) Put(key datastore.Key, value []byte) error {
+ d.backupLk.RLock()
+ defer d.backupLk.RUnlock()
+
+ return d.child.Put(key, value)
+}
+
+func (d *Datastore) Delete(key datastore.Key) error {
+ d.backupLk.RLock()
+ defer d.backupLk.RUnlock()
+
+ return d.child.Delete(key)
+}
+
+func (d *Datastore) Sync(prefix datastore.Key) error {
+ d.backupLk.RLock()
+ defer d.backupLk.RUnlock()
+
+ return d.child.Sync(prefix)
+}
+
+func (d *Datastore) Close() error {
+ d.backupLk.RLock()
+ defer d.backupLk.RUnlock()
+
+ return d.child.Close()
+}
+
+func (d *Datastore) Batch() (datastore.Batch, error) {
+ b, err := d.child.Batch()
+ if err != nil {
+ return nil, err
+ }
+
+ return &bbatch{
+ b: b,
+ rlk: d.backupLk.RLocker(),
+ }, nil
+}
+
+type bbatch struct {
+ b datastore.Batch
+ rlk sync.Locker
+}
+
+func (b *bbatch) Put(key datastore.Key, value []byte) error {
+ return b.b.Put(key, value)
+}
+
+func (b *bbatch) Delete(key datastore.Key) error {
+ return b.b.Delete(key)
+}
+
+func (b *bbatch) Commit() error {
+ b.rlk.Lock()
+ defer b.rlk.Unlock()
+
+ return b.b.Commit()
+}
+
+var _ datastore.Batch = &bbatch{}
+var _ datastore.Batching = &Datastore{}
diff --git a/lib/backupds/read.go b/lib/backupds/read.go
new file mode 100644
index 000000000..f9a433637
--- /dev/null
+++ b/lib/backupds/read.go
@@ -0,0 +1,100 @@
+package backupds
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "io"
+
+ "github.com/ipfs/go-datastore"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+)
+
+func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte) error) error {
+ scratch := make([]byte, 9)
+
+ if _, err := r.Read(scratch[:1]); err != nil {
+ return xerrors.Errorf("reading array header: %w", err)
+ }
+
+ if scratch[0] != 0x82 {
+ return xerrors.Errorf("expected array(2) header byte 0x82, got %x", scratch[0])
+ }
+
+ hasher := sha256.New()
+ hr := io.TeeReader(r, hasher)
+
+ if _, err := hr.Read(scratch[:1]); err != nil {
+ return xerrors.Errorf("reading array header: %w", err)
+ }
+
+ if scratch[0] != 0x9f {
+ return xerrors.Errorf("expected indefinite length array header byte 0x9f, got %x", scratch[0])
+ }
+
+ for {
+ if _, err := hr.Read(scratch[:1]); err != nil {
+ return xerrors.Errorf("reading tuple header: %w", err)
+ }
+
+ if scratch[0] == 0xff {
+ break
+ }
+
+ if scratch[0] != 0x82 {
+ return xerrors.Errorf("expected array(2) header 0x82, got %x", scratch[0])
+ }
+
+ keyb, err := cbg.ReadByteArray(hr, 1<<40)
+ if err != nil {
+ return xerrors.Errorf("reading key: %w", err)
+ }
+ key := datastore.NewKey(string(keyb))
+
+ value, err := cbg.ReadByteArray(hr, 1<<40)
+ if err != nil {
+ return xerrors.Errorf("reading value: %w", err)
+ }
+
+ if err := cb(key, value); err != nil {
+ return err
+ }
+ }
+
+ sum := hasher.Sum(nil)
+
+ expSum, err := cbg.ReadByteArray(r, 32)
+ if err != nil {
+ return xerrors.Errorf("reading expected checksum: %w", err)
+ }
+
+ if !bytes.Equal(sum, expSum) {
+ return xerrors.Errorf("checksum didn't match; expected %x, got %x", expSum, sum)
+ }
+
+ return nil
+}
+
+func RestoreInto(r io.Reader, dest datastore.Batching) error {
+ batch, err := dest.Batch()
+ if err != nil {
+ return xerrors.Errorf("creating batch: %w", err)
+ }
+
+ err = ReadBackup(r, func(key datastore.Key, value []byte) error {
+ if err := batch.Put(key, value); err != nil {
+ return xerrors.Errorf("put key: %w", err)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return xerrors.Errorf("reading backup: %w", err)
+ }
+
+ if err := batch.Commit(); err != nil {
+ return xerrors.Errorf("committing batch: %w", err)
+ }
+
+ return nil
+}
diff --git a/lib/peermgr/peermgr.go b/lib/peermgr/peermgr.go
index 80b05e8ce..2f9d34674 100644
--- a/lib/peermgr/peermgr.go
+++ b/lib/peermgr/peermgr.go
@@ -10,7 +10,10 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes"
"go.opencensus.io/stats"
"go.uber.org/fx"
+ "go.uber.org/multierr"
+ "golang.org/x/xerrors"
+ "github.com/libp2p/go-libp2p-core/event"
host "github.com/libp2p/go-libp2p-core/host"
net "github.com/libp2p/go-libp2p-core/network"
peer "github.com/libp2p/go-libp2p-core/peer"
@@ -50,12 +53,17 @@ type PeerMgr struct {
h host.Host
dht *dht.IpfsDHT
- notifee *net.NotifyBundle
+ notifee *net.NotifyBundle
+ filPeerEmitter event.Emitter
done chan struct{}
}
-func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes.BootstrapPeers) *PeerMgr {
+type NewFilPeer struct {
+ Id peer.ID
+}
+
+func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes.BootstrapPeers) (*PeerMgr, error) {
pm := &PeerMgr{
h: h,
dht: dht,
@@ -69,10 +77,18 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes
done: make(chan struct{}),
}
+ emitter, err := h.EventBus().Emitter(new(NewFilPeer))
+ if err != nil {
+ return nil, xerrors.Errorf("creating NewFilPeer emitter: %w", err)
+ }
+ pm.filPeerEmitter = emitter
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
- return pm.Stop(ctx)
+ return multierr.Combine(
+ pm.filPeerEmitter.Close(),
+ pm.Stop(ctx),
+ )
},
})
@@ -84,10 +100,11 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes
h.Network().Notify(pm.notifee)
- return pm
+ return pm, nil
}
func (pmgr *PeerMgr) AddFilecoinPeer(p peer.ID) {
+ _ = pmgr.filPeerEmitter.Emit(NewFilPeer{Id: p}) //nolint:errcheck
pmgr.peersLk.Lock()
defer pmgr.peersLk.Unlock()
pmgr.peers[p] = time.Duration(0)
diff --git a/lib/rpcenc/reader.go b/lib/rpcenc/reader.go
index 3f4d5c604..617c6495e 100644
--- a/lib/rpcenc/reader.go
+++ b/lib/rpcenc/reader.go
@@ -19,8 +19,8 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
+ "github.com/filecoin-project/go-state-types/abi"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
var log = logging.Logger("rpcenc")
diff --git a/lib/sigs/bls/init.go b/lib/sigs/bls/init.go
index c63cf0b65..42633eee8 100644
--- a/lib/sigs/bls/init.go
+++ b/lib/sigs/bls/init.go
@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
blst "github.com/supranational/blst/bindings/go"
diff --git a/lib/sigs/secp/init.go b/lib/sigs/secp/init.go
index 1285b19b6..674bbbb28 100644
--- a/lib/sigs/secp/init.go
+++ b/lib/sigs/secp/init.go
@@ -5,7 +5,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-crypto"
- crypto2 "github.com/filecoin-project/specs-actors/actors/crypto"
+ crypto2 "github.com/filecoin-project/go-state-types/crypto"
"github.com/minio/blake2b-simd"
"github.com/filecoin-project/lotus/lib/sigs"
diff --git a/lib/sigs/sigs.go b/lib/sigs/sigs.go
index 4a4fd7340..1f56846a8 100644
--- a/lib/sigs/sigs.go
+++ b/lib/sigs/sigs.go
@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/crypto"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
diff --git a/lib/tablewriter/tablewriter.go b/lib/tablewriter/tablewriter.go
index d77611390..cd045710e 100644
--- a/lib/tablewriter/tablewriter.go
+++ b/lib/tablewriter/tablewriter.go
@@ -12,6 +12,7 @@ import (
type Column struct {
Name string
SeparateLine bool
+ Lines int
}
type TableWriter struct {
@@ -50,6 +51,7 @@ cloop:
for i, column := range w.cols {
if column.Name == col {
byColID[i] = fmt.Sprint(val)
+ w.cols[i].Lines++
continue cloop
}
}
@@ -58,6 +60,7 @@ cloop:
w.cols = append(w.cols, Column{
Name: col,
SeparateLine: false,
+ Lines: 1,
})
}
@@ -77,7 +80,11 @@ func (w *TableWriter) Flush(out io.Writer) error {
w.rows = append([]map[int]string{header}, w.rows...)
- for col := range w.cols {
+ for col, c := range w.cols {
+ if c.Lines == 0 {
+ continue
+ }
+
for _, row := range w.rows {
val, found := row[col]
if !found {
@@ -94,9 +101,13 @@ func (w *TableWriter) Flush(out io.Writer) error {
cols := make([]string, len(w.cols))
for ci, col := range w.cols {
+ if col.Lines == 0 {
+ continue
+ }
+
e, _ := row[ci]
pad := colLengths[ci] - cliStringLength(e) + 2
- if !col.SeparateLine {
+ if !col.SeparateLine && col.Lines > 0 {
e = e + strings.Repeat(" ", pad)
if _, err := fmt.Fprint(out, e); err != nil {
return err
diff --git a/lotuspond/front/src/chain/methodgen.go b/lotuspond/front/src/chain/methodgen.go
index 3197803fe..5a00d5e6e 100644
--- a/lotuspond/front/src/chain/methodgen.go
+++ b/lotuspond/front/src/chain/methodgen.go
@@ -4,12 +4,11 @@ import (
"encoding/json"
"io/ioutil"
"os"
- "reflect"
- "github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
- "github.com/filecoin-project/specs-actors/actors/builtin"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/stmgr"
)
func main() {
@@ -17,6 +16,7 @@ func main() {
panic(err) // note: must run in lotuspond/front/src/chain
}
+ // TODO: ActorUpgrade: this is going to be a problem.
names := map[string]string{
"system": "fil/1/system",
"init": "fil/1/init",
@@ -42,33 +42,25 @@ func main() {
}
}
- methods := map[cid.Cid]interface{}{
- // builtin.SystemActorCodeID: builtin.MethodsSystem - apparently it doesn't have methods
- builtin.InitActorCodeID: builtin.MethodsInit,
- builtin.CronActorCodeID: builtin.MethodsCron,
- builtin.AccountActorCodeID: builtin.MethodsAccount,
- builtin.StoragePowerActorCodeID: builtin.MethodsPower,
- builtin.StorageMinerActorCodeID: builtin.MethodsMiner,
- builtin.StorageMarketActorCodeID: builtin.MethodsMarket,
- builtin.PaymentChannelActorCodeID: builtin.MethodsPaych,
- builtin.MultisigActorCodeID: builtin.MethodsMultisig,
- builtin.RewardActorCodeID: builtin.MethodsReward,
- builtin.VerifiedRegistryActorCodeID: builtin.MethodsVerifiedRegistry,
- }
-
out := map[string][]string{}
- for c, methods := range methods {
+
+ for c, methods := range stmgr.MethodsMap {
cmh, err := multihash.Decode(c.Hash())
if err != nil {
panic(err)
}
- rt := reflect.TypeOf(methods)
- nf := rt.NumField()
+ name := string(cmh.Digest)
+ remaining := len(methods)
- out[string(cmh.Digest)] = append(out[string(cmh.Digest)], "Send")
- for i := 0; i < nf; i++ {
- out[string(cmh.Digest)] = append(out[string(cmh.Digest)], rt.Field(i).Name)
+ // iterate over actor methods in order.
+ for i := abi.MethodNum(0); remaining > 0; i++ {
+ m, ok := methods[i]
+ if !ok {
+ continue
+ }
+ out[name] = append(out[name], m.Name)
+ remaining--
}
}
diff --git a/lotuspond/front/src/chain/methods.json b/lotuspond/front/src/chain/methods.json
index ad1076c84..b271bfae5 100644
--- a/lotuspond/front/src/chain/methods.json
+++ b/lotuspond/front/src/chain/methods.json
@@ -23,7 +23,8 @@
"AddSigner",
"RemoveSigner",
"SwapSigner",
- "ChangeNumApprovalsThreshold"
+ "ChangeNumApprovalsThreshold",
+ "LockBalance"
],
"fil/1/paymentchannel": [
"Send",
@@ -86,6 +87,10 @@
"SubmitPoRepForBulkVerify",
"CurrentTotalPower"
],
+ "fil/1/system": [
+ "Send",
+ "Constructor"
+ ],
"fil/1/verifiedregistry": [
"Send",
"Constructor",
@@ -94,5 +99,108 @@
"AddVerifiedClient",
"UseBytes",
"RestoreBytes"
+ ],
+ "fil/2/account": [
+ "Send",
+ "Constructor",
+ "PubkeyAddress"
+ ],
+ "fil/2/cron": [
+ "Send",
+ "Constructor",
+ "EpochTick"
+ ],
+ "fil/2/init": [
+ "Send",
+ "Constructor",
+ "Exec"
+ ],
+ "fil/2/multisig": [
+ "Send",
+ "Constructor",
+ "Propose",
+ "Approve",
+ "Cancel",
+ "AddSigner",
+ "RemoveSigner",
+ "SwapSigner",
+ "ChangeNumApprovalsThreshold",
+ "LockBalance"
+ ],
+ "fil/2/paymentchannel": [
+ "Send",
+ "Constructor",
+ "UpdateChannelState",
+ "Settle",
+ "Collect"
+ ],
+ "fil/2/reward": [
+ "Send",
+ "Constructor",
+ "AwardBlockReward",
+ "ThisEpochReward",
+ "UpdateNetworkKPI"
+ ],
+ "fil/2/storagemarket": [
+ "Send",
+ "Constructor",
+ "AddBalance",
+ "WithdrawBalance",
+ "PublishStorageDeals",
+ "VerifyDealsForActivation",
+ "ActivateDeals",
+ "OnMinerSectorsTerminate",
+ "ComputeDataCommitment",
+ "CronTick"
+ ],
+ "fil/2/storageminer": [
+ "Send",
+ "Constructor",
+ "ControlAddresses",
+ "ChangeWorkerAddress",
+ "ChangePeerID",
+ "SubmitWindowedPoSt",
+ "PreCommitSector",
+ "ProveCommitSector",
+ "ExtendSectorExpiration",
+ "TerminateSectors",
+ "DeclareFaults",
+ "DeclareFaultsRecovered",
+ "OnDeferredCronEvent",
+ "CheckSectorProven",
+ "ApplyRewards",
+ "ReportConsensusFault",
+ "WithdrawBalance",
+ "ConfirmSectorProofsValid",
+ "ChangeMultiaddrs",
+ "CompactPartitions",
+ "CompactSectorNumbers",
+ "ConfirmUpdateWorkerKey",
+ "RepayDebt",
+ "ChangeOwnerAddress"
+ ],
+ "fil/2/storagepower": [
+ "Send",
+ "Constructor",
+ "CreateMiner",
+ "UpdateClaimedPower",
+ "EnrollCronEvent",
+ "OnEpochTickEnd",
+ "UpdatePledgeTotal",
+ "SubmitPoRepForBulkVerify",
+ "CurrentTotalPower"
+ ],
+ "fil/2/system": [
+ "Send",
+ "Constructor"
+ ],
+ "fil/2/verifiedregistry": [
+ "Send",
+ "Constructor",
+ "AddVerifier",
+ "RemoveVerifier",
+ "AddVerifiedClient",
+ "UseBytes",
+ "RestoreBytes"
]
}
\ No newline at end of file
diff --git a/lotuspond/spawn.go b/lotuspond/spawn.go
index f4e8decee..ce01b115e 100644
--- a/lotuspond/spawn.go
+++ b/lotuspond/spawn.go
@@ -11,24 +11,22 @@ import (
"sync/atomic"
"time"
- "github.com/filecoin-project/lotus/chain/types"
-
+ "github.com/google/uuid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/gen"
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
"github.com/filecoin-project/lotus/genesis"
)
func init() {
- miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
}
func (api *api) Spawn() (nodeInfo, error) {
@@ -71,6 +69,8 @@ func (api *api) Spawn() (nodeInfo, error) {
Meta: (&genesis.AccountMeta{Owner: genm.Owner}).ActorMeta(),
})
template.VerifregRootKey = gen.DefaultVerifregRootkeyActor
+ template.RemainderAccount = gen.DefaultRemainderAccountActor
+ template.NetworkName = "pond-" + uuid.New().String()
tb, err := json.Marshal(&template)
if err != nil {
@@ -190,7 +190,7 @@ func (api *api) SpawnStorage(fullNodeRepo string) (nodeInfo, error) {
mux := newWsMux()
- cmd = exec.Command("./lotus-miner", "run", "--api", fmt.Sprintf("%d", 2500+id), "--nosync")
+ cmd = exec.Command("./lotus-miner", "run", "--miner-api", fmt.Sprintf("%d", 2500+id), "--nosync")
cmd.Stderr = io.MultiWriter(os.Stderr, errlogfile, mux.errpw)
cmd.Stdout = io.MultiWriter(os.Stdout, logfile, mux.outpw)
cmd.Env = append(os.Environ(), "LOTUS_MINER_PATH="+dir, "LOTUS_PATH="+fullNodeRepo)
@@ -250,7 +250,7 @@ func (api *api) RestartNode(id int32) (nodeInfo, error) {
var cmd *exec.Cmd
if nd.meta.Storage {
- cmd = exec.Command("./lotus-miner", "run", "--api", fmt.Sprintf("%d", 2500+id), "--nosync")
+ cmd = exec.Command("./lotus-miner", "run", "--miner-api", fmt.Sprintf("%d", 2500+id), "--nosync")
} else {
cmd = exec.Command("./lotus", "daemon", "--api", fmt.Sprintf("%d", 2500+id))
}
diff --git a/markets/journal.go b/markets/journal.go
new file mode 100644
index 000000000..a73d28011
--- /dev/null
+++ b/markets/journal.go
@@ -0,0 +1,76 @@
+package markets
+
+import (
+ "github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ "github.com/filecoin-project/go-fil-markets/storagemarket"
+
+ "github.com/filecoin-project/lotus/journal"
+)
+
+type StorageClientEvt struct {
+ Event string
+ Deal storagemarket.ClientDeal
+}
+
+type StorageProviderEvt struct {
+ Event string
+ Deal storagemarket.MinerDeal
+}
+
+type RetrievalClientEvt struct {
+ Event string
+ Deal retrievalmarket.ClientDealState
+}
+
+type RetrievalProviderEvt struct {
+ Event string
+ Deal retrievalmarket.ProviderDealState
+}
+
+// StorageClientJournaler records journal events from the storage client.
+func StorageClientJournaler(evtType journal.EventType) func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
+ return func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
+ journal.J.RecordEvent(evtType, func() interface{} {
+ return StorageClientEvt{
+ Event: storagemarket.ClientEvents[event],
+ Deal: deal,
+ }
+ })
+ }
+}
+
+// StorageProviderJournaler records journal events from the storage provider.
+func StorageProviderJournaler(evtType journal.EventType) func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
+ return func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
+ journal.J.RecordEvent(evtType, func() interface{} {
+ return StorageProviderEvt{
+ Event: storagemarket.ProviderEvents[event],
+ Deal: deal,
+ }
+ })
+ }
+}
+
+// RetrievalClientJournaler records journal events from the retrieval client.
+func RetrievalClientJournaler(evtType journal.EventType) func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
+ return func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
+ journal.J.RecordEvent(evtType, func() interface{} {
+ return RetrievalClientEvt{
+ Event: retrievalmarket.ClientEvents[event],
+ Deal: deal,
+ }
+ })
+ }
+}
+
+// RetrievalProviderJournaler records journal events from the retrieval provider.
+func RetrievalProviderJournaler(evtType journal.EventType) func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
+ return func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
+ journal.J.RecordEvent(evtType, func() interface{} {
+ return RetrievalProviderEvt{
+ Event: retrievalmarket.ProviderEvents[event],
+ Deal: deal,
+ }
+ })
+ }
+}
diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go
index 6f386dbba..a8e1c20aa 100644
--- a/markets/loggers/loggers.go
+++ b/markets/loggers/loggers.go
@@ -3,7 +3,7 @@ package marketevents
import (
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
logging "github.com/ipfs/go-log/v2"
)
@@ -29,6 +29,17 @@ func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrieval
log.Infow("retrieval event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
}
+// ReadyLogger returns a function to log the results of module initialization
+func ReadyLogger(module string) func(error) {
+ return func(err error) {
+ if err != nil {
+ log.Errorw("module initialization error", "module", module, "err", err)
+ } else {
+ log.Infow("module ready", "module", module)
+ }
+ }
+}
+
type RetrievalEvent struct {
Event retrievalmarket.ClientEvent
Status retrievalmarket.DealStatus
diff --git a/markets/retrievaladapter/client.go b/markets/retrievaladapter/client.go
index e57a11bd1..1bef23e12 100644
--- a/markets/retrievaladapter/client.go
+++ b/markets/retrievaladapter/client.go
@@ -3,34 +3,29 @@ package retrievaladapter
import (
"context"
- "golang.org/x/xerrors"
-
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
-
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/shared"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multiaddr"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl/full"
payapi "github.com/filecoin-project/lotus/node/impl/paych"
- "github.com/filecoin-project/lotus/paychmgr"
)
type retrievalClientNode struct {
chainAPI full.ChainAPI
- pmgr *paychmgr.Manager
payAPI payapi.PaychAPI
stateAPI full.StateAPI
}
// NewRetrievalClientNode returns a new node adapter for a retrieval client that talks to the
// Lotus Node
-func NewRetrievalClientNode(pmgr *paychmgr.Manager, payAPI payapi.PaychAPI, chainAPI full.ChainAPI, stateAPI full.StateAPI) retrievalmarket.RetrievalClientNode {
- return &retrievalClientNode{pmgr: pmgr, payAPI: payAPI, chainAPI: chainAPI, stateAPI: stateAPI}
+func NewRetrievalClientNode(payAPI payapi.PaychAPI, chainAPI full.ChainAPI, stateAPI full.StateAPI) retrievalmarket.RetrievalClientNode {
+ return &retrievalClientNode{payAPI: payAPI, chainAPI: chainAPI, stateAPI: stateAPI}
}
// GetOrCreatePaymentChannel sets up a new payment channel if one does not exist
@@ -39,14 +34,18 @@ func NewRetrievalClientNode(pmgr *paychmgr.Manager, payAPI payapi.PaychAPI, chai
func (rcn *retrievalClientNode) GetOrCreatePaymentChannel(ctx context.Context, clientAddress address.Address, minerAddress address.Address, clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) {
// TODO: respect the provided TipSetToken (a serialized TipSetKey) when
// querying the chain
- return rcn.pmgr.GetPaych(ctx, clientAddress, minerAddress, clientFundsAvailable)
+ ci, err := rcn.payAPI.PaychGet(ctx, clientAddress, minerAddress, clientFundsAvailable)
+ if err != nil {
+ return address.Undef, cid.Undef, err
+ }
+ return ci.Channel, ci.WaitSentinel, nil
}
// Allocate late creates a lane within a payment channel so that calls to
// CreatePaymentVoucher will automatically make vouchers only for the difference
// in total
-func (rcn *retrievalClientNode) AllocateLane(paymentChannel address.Address) (uint64, error) {
- return rcn.pmgr.AllocateLane(paymentChannel)
+func (rcn *retrievalClientNode) AllocateLane(ctx context.Context, paymentChannel address.Address) (uint64, error) {
+ return rcn.payAPI.PaychAllocateLane(ctx, paymentChannel)
}
// CreatePaymentVoucher creates a new payment voucher in the given lane for a
@@ -60,7 +59,7 @@ func (rcn *retrievalClientNode) CreatePaymentVoucher(ctx context.Context, paymen
return nil, err
}
if voucher.Voucher == nil {
- return nil, xerrors.Errorf("Could not create voucher - shortfall: %d", voucher.Shortfall)
+ return nil, retrievalmarket.NewShortfallError(voucher.Shortfall)
}
return voucher.Voucher, nil
}
@@ -74,15 +73,23 @@ func (rcn *retrievalClientNode) GetChainHead(ctx context.Context) (shared.TipSet
return head.Key().Bytes(), head.Height(), nil
}
-// WaitForPaymentChannelAddFunds waits messageCID to appear on chain. If it doesn't appear within
-// defaultMsgWaitTimeout it returns error
-func (rcn *retrievalClientNode) WaitForPaymentChannelAddFunds(messageCID cid.Cid) error {
- _, err := rcn.payAPI.PaychMgr.GetPaychWaitReady(context.TODO(), messageCID)
- return err
+func (rcn *retrievalClientNode) WaitForPaymentChannelReady(ctx context.Context, messageCID cid.Cid) (address.Address, error) {
+ return rcn.payAPI.PaychGetWaitReady(ctx, messageCID)
}
-func (rcn *retrievalClientNode) WaitForPaymentChannelCreation(messageCID cid.Cid) (address.Address, error) {
- return rcn.payAPI.PaychMgr.GetPaychWaitReady(context.TODO(), messageCID)
+func (rcn *retrievalClientNode) CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (retrievalmarket.ChannelAvailableFunds, error) {
+
+ channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(ctx, paymentChannel)
+ if err != nil {
+ return retrievalmarket.ChannelAvailableFunds{}, err
+ }
+ return retrievalmarket.ChannelAvailableFunds{
+ ConfirmedAmt: channelAvailableFunds.ConfirmedAmt,
+ PendingAmt: channelAvailableFunds.PendingAmt,
+ PendingWaitSentinel: channelAvailableFunds.PendingWaitSentinel,
+ QueuedAmt: channelAvailableFunds.QueuedAmt,
+ VoucherReedeemedAmt: channelAvailableFunds.VoucherReedeemedAmt,
+ }, nil
}
func (rcn *retrievalClientNode) GetKnownAddresses(ctx context.Context, p retrievalmarket.RetrievalPeer, encodedTs shared.TipSetToken) ([]multiaddr.Multiaddr, error) {
diff --git a/markets/retrievaladapter/provider.go b/markets/retrievaladapter/provider.go
index f22a31ccc..674ec4793 100644
--- a/markets/retrievaladapter/provider.go
+++ b/markets/retrievaladapter/provider.go
@@ -5,6 +5,7 @@ import (
"io"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
@@ -13,8 +14,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/shared"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
)
diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go
index 03e5b5353..824693dac 100644
--- a/markets/storageadapter/client.go
+++ b/markets/storageadapter/client.go
@@ -6,7 +6,9 @@ import (
"bytes"
"context"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
"golang.org/x/xerrors"
@@ -14,7 +16,11 @@ import (
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-fil-markets/shared"
"github.com/filecoin-project/go-fil-markets/storagemarket"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/market"
@@ -24,12 +30,6 @@ import (
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/markets/utils"
"github.com/filecoin-project/lotus/node/impl/full"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- samarket "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/ipfs/go-cid"
)
@@ -97,37 +97,14 @@ func (c *ClientNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Sign
return err == nil, err
}
-func (c *ClientNodeAdapter) ListClientDeals(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) ([]storagemarket.StorageDeal, error) {
- tsk, err := types.TipSetKeyFromBytes(encodedTs)
- if err != nil {
- return nil, err
- }
-
- allDeals, err := c.StateMarketDeals(ctx, tsk)
- if err != nil {
- return nil, err
- }
-
- var out []storagemarket.StorageDeal
-
- for _, deal := range allDeals {
- storageDeal := utils.FromOnChainDeal(deal.Proposal, deal.State)
- if storageDeal.Client == addr {
- out = append(out, storageDeal)
- }
- }
-
- return out, nil
-}
-
// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients.
func (c *ClientNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) {
// (Provider Node API)
smsg, err := c.MpoolPushMessage(ctx, &types.Message{
- To: builtin.StorageMarketActorAddr,
+ To: miner0.StorageMarketActorAddr,
From: addr,
Value: amount,
- Method: builtin.MethodsMarket.AddBalance,
+ Method: miner0.MethodsMarket.AddBalance,
}, nil)
if err != nil {
return cid.Undef, err
@@ -161,7 +138,7 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor
pubmsg, err := c.cs.GetMessage(*deal.PublishMessage)
if err != nil {
- return 0, xerrors.Errorf("getting deal pubsish message: %w", err)
+ return 0, xerrors.Errorf("getting deal publish message: %w", err)
}
mi, err := stmgr.StateMinerInfo(ctx, c.sm, c.cs.GetHeaviestTipSet(), deal.Proposal.Provider)
@@ -178,15 +155,15 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor
return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s", pubmsg.From, deal.Proposal.Provider)
}
- if pubmsg.To != builtin.StorageMarketActorAddr {
+ if pubmsg.To != miner0.StorageMarketActorAddr {
return 0, xerrors.Errorf("deal publish message wasn't set to StorageMarket actor (to=%s)", pubmsg.To)
}
- if pubmsg.Method != builtin.MethodsMarket.PublishStorageDeals {
+ if pubmsg.Method != miner0.MethodsMarket.PublishStorageDeals {
return 0, xerrors.Errorf("deal publish message called incorrect method (method=%s)", pubmsg.Method)
}
- var params samarket.PublishStorageDealsParams
+ var params market0.PublishStorageDealsParams
if err := params.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil {
return 0, err
}
@@ -218,7 +195,7 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor
return 0, xerrors.Errorf("deal publish failed: exit=%d", ret.ExitCode)
}
- var res samarket.PublishStorageDealsReturn
+ var res market0.PublishStorageDealsReturn
if err := res.UnmarshalCBOR(bytes.NewReader(ret.Return)); err != nil {
return 0, err
}
@@ -296,7 +273,7 @@ func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider
}
switch msg.Method {
- case builtin.MethodsMiner.PreCommitSector:
+ case miner0.MethodsMiner.PreCommitSector:
var params miner.SectorPreCommitInfo
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return true, false, xerrors.Errorf("unmarshal pre commit: %w", err)
@@ -311,7 +288,7 @@ func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider
}
return true, false, nil
- case builtin.MethodsMiner.ProveCommitSector:
+ case miner0.MethodsMiner.ProveCommitSector:
var params miner.ProveCommitSectorParams
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return true, false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err)
@@ -351,6 +328,11 @@ func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID a
// Called immediately to check if the deal has already expired or been slashed
checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) {
+ if ts == nil {
+ // keep listening for events
+ return false, true, nil
+ }
+
// Check if the deal has already expired
if sd.Proposal.EndEpoch <= ts.Height() {
onDealExpired(nil)
@@ -428,7 +410,7 @@ func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID a
return nil
}
-func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Address, proposal samarket.DealProposal) (*samarket.ClientDealProposal, error) {
+func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Address, proposal market0.DealProposal) (*market0.ClientDealProposal, error) {
// TODO: output spec signed proposal
buf, err := cborutil.Dump(&proposal)
if err != nil {
@@ -445,7 +427,7 @@ func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Add
return nil, err
}
- return &samarket.ClientDealProposal{
+ return &market0.ClientDealProposal{
Proposal: proposal,
ClientSignature: *sig,
}, nil
@@ -456,37 +438,6 @@ func (c *ClientNodeAdapter) GetDefaultWalletAddress(ctx context.Context) (addres
return addr, err
}
-func (c *ClientNodeAdapter) ValidateAskSignature(ctx context.Context, ask *storagemarket.SignedStorageAsk, encodedTs shared.TipSetToken) (bool, error) {
- tsk, err := types.TipSetKeyFromBytes(encodedTs)
- if err != nil {
- return false, err
- }
-
- mi, err := c.StateMinerInfo(ctx, ask.Ask.Miner, tsk)
- if err != nil {
- return false, xerrors.Errorf("failed to get worker for miner in ask: %w", err)
- }
-
- sigb, err := cborutil.Dump(ask.Ask)
- if err != nil {
- return false, xerrors.Errorf("failed to re-serialize ask")
- }
-
- ts, err := c.ChainGetTipSet(ctx, tsk)
- if err != nil {
- return false, xerrors.Errorf("failed to load tipset")
- }
-
- m, err := c.StateManager.ResolveToKeyAddress(ctx, mi.Worker, ts)
-
- if err != nil {
- return false, xerrors.Errorf("failed to resolve miner to key address")
- }
-
- err = sigs.Verify(ask.Signature, m, sigb)
- return err == nil, err
-}
-
func (c *ClientNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) {
head, err := c.ChainHead(ctx)
if err != nil {
@@ -496,12 +447,12 @@ func (c *ClientNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToke
return head.Key().Bytes(), head.Height(), nil
}
-func (c *ClientNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, err error) error) error {
+func (c *ClientNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error {
receipt, err := c.StateWaitMsg(ctx, mcid, build.MessageConfidence)
if err != nil {
- return cb(0, nil, err)
+ return cb(0, nil, cid.Undef, err)
}
- return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, nil)
+ return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil)
}
func (c *ClientNodeAdapter) GetMinerInfo(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*storagemarket.StorageProviderInfo, error) {
diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go
index 1a6627529..b1071adcd 100644
--- a/markets/storageadapter/provider.go
+++ b/markets/storageadapter/provider.go
@@ -6,35 +6,40 @@ import (
"bytes"
"context"
"io"
+ "time"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/shared"
"github.com/filecoin-project/go-fil-markets/storagemarket"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/markets/utils"
+ "github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/storage/sectorblocks"
)
+var addPieceRetryWait = 5 * time.Minute
+var addPieceRetryTimeout = 6 * time.Hour
var log = logging.Logger("storageadapter")
type ProviderNodeAdapter struct {
@@ -45,14 +50,24 @@ type ProviderNodeAdapter struct {
secb *sectorblocks.SectorBlocks
ev *events.Events
+
+ publishSpec, addBalanceSpec *api.MessageSendSpec
}
-func NewProviderNodeAdapter(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode {
- return &ProviderNodeAdapter{
- FullNode: full,
- dag: dag,
- secb: secb,
- ev: events.NewEvents(context.TODO(), full),
+func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode {
+ return func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode {
+ na := &ProviderNodeAdapter{
+ FullNode: full,
+
+ dag: dag,
+ secb: secb,
+ ev: events.NewEvents(context.TODO(), full),
+ }
+ if fc != nil {
+ na.publishSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxPublishDealsFee)}
+ na.addBalanceSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxMarketBalanceAddFee)}
+ }
+ return na
}
}
@@ -64,8 +79,8 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark
return cid.Undef, err
}
- params, err := actors.SerializeParams(&market.PublishStorageDealsParams{
- Deals: []market.ClientDealProposal{deal.ClientDealProposal},
+ params, err := actors.SerializeParams(&market0.PublishStorageDealsParams{
+ Deals: []market0.ClientDealProposal{deal.ClientDealProposal},
})
if err != nil {
@@ -74,12 +89,12 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark
// TODO: We may want this to happen after fetching data
smsg, err := n.MpoolPushMessage(ctx, &types.Message{
- To: builtin.StorageMarketActorAddr,
+ To: market.Address,
From: mi.Worker,
Value: types.NewInt(0),
- Method: builtin.MethodsMarket.PublishStorageDeals,
+ Method: builtin0.MethodsMarket.PublishStorageDeals,
Params: params,
- }, nil)
+ }, n.publishSpec)
if err != nil {
return cid.Undef, err
}
@@ -91,7 +106,7 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema
return nil, xerrors.Errorf("deal.PublishCid can't be nil")
}
- p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sealing.DealInfo{
+ sdInfo := sealing.DealInfo{
DealID: deal.DealID,
PublishCid: deal.PublishCid,
DealSchedule: sealing.DealSchedule{
@@ -99,7 +114,25 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema
EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch,
},
KeepUnsealed: deal.FastRetrieval,
- })
+ }
+
+ p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo)
+ curTime := time.Now()
+ for time.Since(curTime) < addPieceRetryTimeout {
+ if !xerrors.Is(err, sealing.ErrTooManySectorsSealing) {
+ if err != nil {
+ log.Errorf("failed to addPiece for deal %d, err: %w", deal.DealID, err)
+ }
+ break
+ }
+ select {
+ case <-time.After(addPieceRetryWait):
+ p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo)
+ case <-ctx.Done():
+ return nil, xerrors.New("context expired while waiting to retry AddPiece")
+ }
+ }
+
if err != nil {
return nil, xerrors.Errorf("AddPiece failed: %s", err)
}
@@ -122,28 +155,6 @@ func (n *ProviderNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Si
return err == nil, err
}
-func (n *ProviderNodeAdapter) ListProviderDeals(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) ([]storagemarket.StorageDeal, error) {
- tsk, err := types.TipSetKeyFromBytes(encodedTs)
- if err != nil {
- return nil, err
- }
- allDeals, err := n.StateMarketDeals(ctx, tsk)
- if err != nil {
- return nil, err
- }
-
- var out []storagemarket.StorageDeal
-
- for _, deal := range allDeals {
- sharedDeal := utils.FromOnChainDeal(deal.Proposal, deal.State)
- if sharedDeal.Provider == addr {
- out = append(out, sharedDeal)
- }
- }
-
- return out, nil
-}
-
func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
@@ -178,11 +189,11 @@ func (n *ProviderNodeAdapter) EnsureFunds(ctx context.Context, addr, wallet addr
func (n *ProviderNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) {
// (Provider Node API)
smsg, err := n.MpoolPushMessage(ctx, &types.Message{
- To: builtin.StorageMarketActorAddr,
+ To: market.Address,
From: addr,
Value: amount,
- Method: builtin.MethodsMarket.AddBalance,
- }, nil)
+ Method: builtin0.MethodsMarket.AddBalance,
+ }, n.addBalanceSpec)
if err != nil {
return cid.Undef, err
}
@@ -303,7 +314,7 @@ func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provide
}
switch msg.Method {
- case builtin.MethodsMiner.PreCommitSector:
+ case builtin0.MethodsMiner.PreCommitSector:
var params miner.SectorPreCommitInfo
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return true, false, xerrors.Errorf("unmarshal pre commit: %w", err)
@@ -318,7 +329,7 @@ func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provide
}
return true, false, nil
- case builtin.MethodsMiner.ProveCommitSector:
+ case builtin0.MethodsMiner.ProveCommitSector:
var params miner.ProveCommitSectorParams
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return true, false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err)
@@ -355,20 +366,22 @@ func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetTo
return head.Key().Bytes(), head.Height(), nil
}
-func (n *ProviderNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, err error) error) error {
+func (n *ProviderNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error {
receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence)
if err != nil {
- return cb(0, nil, err)
+ return cb(0, nil, cid.Undef, err)
}
- return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, nil)
+ return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil)
}
-func (n *ProviderNodeAdapter) GetDataCap(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*verifreg.DataCap, error) {
+func (n *ProviderNodeAdapter) GetDataCap(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*abi.StoragePower, error) {
tsk, err := types.TipSetKeyFromBytes(encodedTs)
if err != nil {
return nil, err
}
- return n.StateVerifiedClientStatus(ctx, addr, tsk)
+
+ sp, err := n.StateVerifiedClientStatus(ctx, addr, tsk)
+ return sp, err
}
func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error {
diff --git a/markets/utils/converters.go b/markets/utils/converters.go
index e1089842e..4a3d21140 100644
--- a/markets/utils/converters.go
+++ b/markets/utils/converters.go
@@ -1,10 +1,9 @@
package utils
import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
peer "github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multiaddr"
@@ -31,13 +30,6 @@ func NewStorageProviderInfo(address address.Address, miner address.Address, sect
}
}
-func FromOnChainDeal(prop market.DealProposal, state market.DealState) storagemarket.StorageDeal {
- return storagemarket.StorageDeal{
- DealProposal: prop,
- DealState: state,
- }
-}
-
func ToSharedBalance(bal api.MarketBalance) storagemarket.Balance {
return storagemarket.Balance{
Locked: bal.Locked,
diff --git a/metrics/metrics.go b/metrics/metrics.go
index a6732e8ea..5dd865263 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -30,6 +30,7 @@ var (
var (
LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless)
ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless)
+ ChainNodeHeightExpected = stats.Int64("chain/node_height_expected", "Expected Height of the node", stats.UnitDimensionless)
ChainNodeWorkerHeight = stats.Int64("chain/node_worker_height", "Current Height of workers on the node", stats.UnitDimensionless)
MessagePublished = stats.Int64("message/published", "Counter for total locally published messages", stats.UnitDimensionless)
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
@@ -62,6 +63,10 @@ var (
Measure: ChainNodeHeight,
Aggregation: view.LastValue(),
}
+ ChainNodeHeightExpectedView = &view.View{
+ Measure: ChainNodeHeightExpected,
+ Aggregation: view.LastValue(),
+ }
ChainNodeWorkerHeightView = &view.View{
Measure: ChainNodeWorkerHeight,
Aggregation: view.LastValue(),
@@ -138,6 +143,7 @@ var (
var DefaultViews = append([]*view.View{
InfoView,
ChainNodeHeightView,
+ ChainNodeHeightExpectedView,
ChainNodeWorkerHeightView,
BlockReceivedView,
BlockValidationFailureView,
diff --git a/miner/miner.go b/miner/miner.go
index 34f1255a9..aebcb7a25 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -9,11 +9,13 @@ import (
"sync"
"time"
+ proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
lru "github.com/hashicorp/golang-lru"
"github.com/filecoin-project/lotus/api"
@@ -31,6 +33,11 @@ import (
var log = logging.Logger("miner")
+// Journal event types.
+const (
+ evtTypeBlockMined = iota
+)
+
// returns a callback reporting whether we mined a blocks in this round
type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEpoch, error), abi.ChainEpoch, error)
@@ -66,6 +73,9 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address,
sf: sf,
minedBlockHeights: arc,
+ evtTypes: [...]journal.EventType{
+ evtTypeBlockMined: journal.J.RegisterEventType("miner", "block_mined"),
+ },
}
}
@@ -85,6 +95,8 @@ type Miner struct {
sf *slashfilter.SlashFilter
minedBlockHeights *lru.ARCCache
+
+ evtTypes [1]journal.EventType
}
func (m *Miner) Address() address.Address {
@@ -127,6 +139,7 @@ func (m *Miner) niceSleep(d time.Duration) bool {
case <-build.Clock.After(d):
return true
case <-m.stop:
+ log.Infow("received interrupt while trying to sleep in mining cycle")
return false
}
}
@@ -136,7 +149,7 @@ func (m *Miner) mine(ctx context.Context) {
defer span.End()
var lastBase MiningBase
-
+minerLoop:
for {
select {
case <-m.stop:
@@ -157,7 +170,9 @@ func (m *Miner) mine(ctx context.Context) {
prebase, err := m.GetBestMiningCandidate(ctx)
if err != nil {
log.Errorf("failed to get best mining candidate: %s", err)
- m.niceSleep(time.Second * 5)
+ if !m.niceSleep(time.Second * 5) {
+ continue minerLoop
+ }
continue
}
@@ -187,24 +202,31 @@ func (m *Miner) mine(ctx context.Context) {
_, err = m.api.BeaconGetEntry(ctx, prebase.TipSet.Height()+prebase.NullRounds+1)
if err != nil {
log.Errorf("failed getting beacon entry: %s", err)
+ if !m.niceSleep(time.Second) {
+ continue minerLoop
+ }
continue
}
base = prebase
}
+ base.NullRounds += injectNulls // testing
+
if base.TipSet.Equals(lastBase.TipSet) && lastBase.NullRounds == base.NullRounds {
log.Warnf("BestMiningCandidate from the previous round: %s (nulls:%d)", lastBase.TipSet.Cids(), lastBase.NullRounds)
- m.niceSleep(time.Duration(build.BlockDelaySecs) * time.Second)
+ if !m.niceSleep(time.Duration(build.BlockDelaySecs) * time.Second) {
+ continue minerLoop
+ }
continue
}
- base.NullRounds += injectNulls // testing
-
b, err := m.mineOne(ctx, base)
if err != nil {
log.Errorf("mining block failed: %+v", err)
- m.niceSleep(time.Second)
+ if !m.niceSleep(time.Second) {
+ continue minerLoop
+ }
onDone(false, 0, err)
continue
}
@@ -217,12 +239,14 @@ func (m *Miner) mine(ctx context.Context) {
onDone(b != nil, h, nil)
if b != nil {
- journal.Add("blockMined", map[string]interface{}{
- "parents": base.TipSet.Cids(),
- "nulls": base.NullRounds,
- "epoch": b.Header.Height,
- "timestamp": b.Header.Timestamp,
- "cid": b.Header.Cid(),
+ journal.J.RecordEvent(m.evtTypes[evtTypeBlockMined], func() interface{} {
+ return map[string]interface{}{
+ "parents": base.TipSet.Cids(),
+ "nulls": base.NullRounds,
+ "epoch": b.Header.Height,
+ "timestamp": b.Header.Timestamp,
+ "cid": b.Header.Cid(),
+ }
})
btime := time.Unix(int64(b.Header.Timestamp), 0)
@@ -303,6 +327,7 @@ func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error)
}
ltsw, err := m.api.ChainTipSetWeight(ctx, m.lastWork.TipSet.Key())
if err != nil {
+ m.lastWork = nil
return nil, err
}
@@ -338,7 +363,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg,
if mbi == nil {
return nil, nil
}
- if !mbi.HasMinPower {
+ if !mbi.EligibleForMining {
// slashed or just have no power yet
return nil, nil
}
@@ -359,7 +384,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg,
rbase = bvals[len(bvals)-1]
}
- ticket, err := m.computeTicket(ctx, &rbase, base, len(bvals) > 0)
+ ticket, err := m.computeTicket(ctx, &rbase, base)
if err != nil {
return nil, xerrors.Errorf("scratching ticket failed: %w", err)
}
@@ -429,7 +454,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg,
return b, nil
}
-func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase, haveNewEntries bool) (*types.Ticket, error) {
+func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase) (*types.Ticket, error) {
mi, err := m.api.StateMinerInfo(ctx, m.address, types.EmptyTSK)
if err != nil {
return nil, err
@@ -444,11 +469,12 @@ func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, bas
return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err)
}
- if !haveNewEntries {
+ round := base.TipSet.Height() + base.NullRounds + 1
+ if round > build.UpgradeSmokeHeight {
buf.Write(base.TipSet.MinTicket().VRFProof)
}
- input, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_TicketProduction, base.TipSet.Height()+base.NullRounds+1-build.TicketRandomnessLookback, buf.Bytes())
+ input, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return nil, err
}
@@ -464,7 +490,7 @@ func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, bas
}
func (m *Miner) createBlock(base *MiningBase, addr address.Address, ticket *types.Ticket,
- eproof *types.ElectionProof, bvals []types.BeaconEntry, wpostProof []abi.PoStProof, msgs []*types.SignedMessage) (*types.BlockMsg, error) {
+ eproof *types.ElectionProof, bvals []types.BeaconEntry, wpostProof []proof0.PoStProof, msgs []*types.SignedMessage) (*types.BlockMsg, error) {
uts := base.TipSet.MinTimestamp() + build.BlockDelaySecs*(uint64(base.NullRounds)+1)
nheight := base.TipSet.Height() + base.NullRounds + 1
diff --git a/miner/testminer.go b/miner/testminer.go
index 0b1cbb12b..64e3b3a62 100644
--- a/miner/testminer.go
+++ b/miner/testminer.go
@@ -7,10 +7,10 @@ import (
ds "github.com/ipfs/go-datastore"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
- "github.com/filecoin-project/specs-actors/actors/abi"
)
type MineReq struct {
diff --git a/node/builder.go b/node/builder.go
index b653fa429..15f268a31 100644
--- a/node/builder.go
+++ b/node/builder.go
@@ -3,6 +3,7 @@ package node
import (
"context"
"errors"
+ "os"
"time"
logging "github.com/ipfs/go-log"
@@ -19,8 +20,9 @@ import (
"go.uber.org/fx"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-fil-markets/discovery"
+ discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
- "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
@@ -29,11 +31,12 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/beacon"
- "github.com/filecoin-project/lotus/chain/blocksync"
+ "github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/market"
"github.com/filecoin-project/lotus/chain/messagepool"
+ "github.com/filecoin-project/lotus/chain/messagesigner"
"github.com/filecoin-project/lotus/chain/metrics"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@@ -45,6 +48,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
+ "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/peermgr"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
@@ -68,6 +72,10 @@ import (
"github.com/filecoin-project/lotus/storage/sectorblocks"
)
+// EnvJournalDisabledEvents is the environment variable through which disabled
+// journal events can be customized.
+const EnvJournalDisabledEvents = "LOTUS_JOURNAL_DISABLED_EVENTS"
+
//nolint:deadcode,varcheck
var log = logging.Logger("builder")
@@ -92,11 +100,16 @@ var (
type invoke int
+// Invokes are called in the order they are defined.
//nolint:golint
const (
+ // InitJournal at position 0 initializes the journal global var as soon as
+ // the system starts, so that it's available for all other components.
+ InitJournalKey = invoke(iota)
+
// libp2p
- PstoreAddSelfKeysKey = invoke(iota)
+ PstoreAddSelfKeysKey
StartListeningKey
BootstrapKey
@@ -104,7 +117,7 @@ const (
SetGenesisKey
RunHelloKey
- RunBlockSyncKey
+ RunChainExchangeKey
RunChainGraphsync
RunPeerMgrKey
@@ -124,7 +137,6 @@ const (
HeadMetricsKey
SettlePaymentChannelsKey
RunPeerTaggerKey
- JournalKey
SetApiEndpointKey
@@ -152,11 +164,25 @@ type Settings struct {
func defaults() []Option {
return []Option{
+ // global system journal.
+ Override(new(journal.DisabledEvents), func() journal.DisabledEvents {
+ if env, ok := os.LookupEnv(EnvJournalDisabledEvents); ok {
+ if ret, err := journal.ParseDisabledEvents(env); err == nil {
+ return ret
+ }
+ }
+ // fallback if env variable is not set, or if it failed to parse.
+ return journal.DefaultDisabledEvents
+ }),
+ Override(new(journal.Journal), modules.OpenFilesystemJournal),
+ Override(InitJournalKey, func(j journal.Journal) {
+ journal.J = j // eagerly sets the global journal through fx.Invoke.
+ }),
+
Override(new(helpers.MetricsCtx), context.Background),
Override(new(record.Validator), modules.RecordValidator),
Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)),
Override(new(dtypes.ShutdownChan), make(chan struct{})),
- Override(JournalKey, modules.SetupJournal),
// Filecoin modules
@@ -227,26 +253,31 @@ func Online() Option {
Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap),
Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap),
- Override(new(dtypes.DrandConfig), modules.BuiltinDrandConfig),
+ Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig),
Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages),
Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier),
Override(new(vm.SyscallBuilder), vm.Syscalls),
Override(new(*store.ChainStore), modules.ChainStore),
- Override(new(*stmgr.StateManager), stmgr.NewStateManager),
+ Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()),
+ Override(new(*stmgr.StateManager), stmgr.NewStateManagerWithUpgradeSchedule),
Override(new(*wallet.LocalWallet), wallet.NewWallet),
Override(new(api.WalletAPI), From(new(*wallet.LocalWallet))),
Override(new(wallet.Default), From(new(*wallet.LocalWallet))),
+ Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner),
Override(new(dtypes.ChainGCLocker), blockstore.NewGCLocker),
Override(new(dtypes.ChainGCBlockstore), modules.ChainGCBlockstore),
- Override(new(dtypes.ChainExchange), modules.ChainExchange),
- Override(new(dtypes.ChainBlockService), modules.ChainBlockservice),
+ Override(new(dtypes.ChainBitswap), modules.ChainBitswap),
+ Override(new(dtypes.ChainBlockService), modules.ChainBlockService),
// Filecoin services
+ // We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value.
+ // It will be called implicitly by the Syncer constructor.
+ Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }),
Override(new(*chain.Syncer), modules.NewSyncer),
- Override(new(*blocksync.BlockSync), blocksync.NewClient),
+ Override(new(exchange.Client), exchange.NewClient),
Override(new(*messagepool.MessagePool), modules.MessagePool),
Override(new(modules.Genesis), modules.ErrorGenesis),
@@ -255,19 +286,19 @@ func Online() Option {
Override(new(dtypes.NetworkName), modules.NetworkName),
Override(new(*hello.Service), hello.NewHelloService),
- Override(new(*blocksync.BlockSyncService), blocksync.NewBlockSyncService),
+ Override(new(exchange.Server), exchange.NewServer),
Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr),
Override(new(dtypes.Graphsync), modules.Graphsync),
Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)),
Override(RunHelloKey, modules.RunHello),
- Override(RunBlockSyncKey, modules.RunBlockSync),
+ Override(RunChainExchangeKey, modules.RunChainExchange),
Override(RunPeerMgrKey, modules.RunPeerMgr),
Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks),
- Override(new(*discovery.Local), modules.NewLocalDiscovery),
- Override(new(retrievalmarket.PeerResolver), modules.RetrievalResolver),
+ Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery),
+ Override(new(discovery.PeerResolver), modules.RetrievalResolver),
Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient),
Override(new(dtypes.ClientDatastore), modules.NewClientDatastore),
@@ -275,7 +306,7 @@ func Online() Option {
Override(new(modules.ClientDealFunds), modules.NewClientDealFunds),
Override(new(storagemarket.StorageClient), modules.StorageClient),
Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter),
- Override(new(beacon.RandomBeacon), modules.RandomBeacon),
+ Override(new(beacon.Schedule), modules.RandomSchedule),
Override(new(*paychmgr.Store), paychmgr.NewStore),
Override(new(*paychmgr.Manager), paychmgr.NewManager),
@@ -317,7 +348,7 @@ func Online() Option {
Override(new(dtypes.DealFilter), modules.BasicDealFilter(nil)),
Override(new(modules.ProviderDealFunds), modules.NewProviderDealFunds),
Override(new(storagemarket.StorageProvider), modules.StorageProvider),
- Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter),
+ Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil)),
Override(HandleRetrievalKey, modules.HandleRetrieval),
Override(GetParamsKey, modules.GetParams),
Override(HandleDealsKey, modules.HandleDeals),
@@ -358,7 +389,7 @@ func StorageMiner(out *api.StorageMiner) Option {
func(s *Settings) error {
resAPI := &impl.StorageMinerAPI{}
- s.invokes[ExtractApiKey] = fx.Extract(resAPI)
+ s.invokes[ExtractApiKey] = fx.Populate(resAPI)
*out = resAPI
return nil
},
@@ -439,6 +470,8 @@ func ConfigStorageMiner(c interface{}) Option {
Override(new(dtypes.DealFilter), modules.BasicDealFilter(dealfilter.CliDealFilter(cfg.Dealmaking.Filter))),
),
+ Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees)),
+
Override(new(sectorstorage.SealerConfig), cfg.Storage),
Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)),
)
@@ -481,12 +514,18 @@ func Repo(r repo.Repo) Option {
}
func FullAPI(out *api.FullNode) Option {
- return func(s *Settings) error {
- resAPI := &impl.FullNodeAPI{}
- s.invokes[ExtractApiKey] = fx.Extract(resAPI)
- *out = resAPI
- return nil
- }
+ return Options(
+ func(s *Settings) error {
+ s.nodeType = repo.FullNode
+ return nil
+ },
+ func(s *Settings) error {
+ resAPI := &impl.FullNodeAPI{}
+ s.invokes[ExtractApiKey] = fx.Populate(resAPI)
+ *out = resAPI
+ return nil
+ },
+ )
}
type StopFunc func(context.Context) error
@@ -494,9 +533,8 @@ type StopFunc func(context.Context) error
// New builds and starts new Filecoin node
func New(ctx context.Context, opts ...Option) (StopFunc, error) {
settings := Settings{
- modules: map[interface{}]fx.Option{},
- invokes: make([]fx.Option, _nInvokes),
- nodeType: repo.FullNode,
+ modules: map[interface{}]fx.Option{},
+ invokes: make([]fx.Option, _nInvokes),
}
// apply module options in the right order
@@ -541,6 +579,6 @@ func Test() Option {
return Options(
Unset(RunPeerMgrKey),
Unset(new(*peermgr.PeerMgr)),
- Override(new(beacon.RandomBeacon), testing.RandomBeacon),
+ Override(new(beacon.Schedule), testing.RandomBeacon),
)
}
diff --git a/node/config/def.go b/node/config/def.go
index f5071ada5..63340cfd5 100644
--- a/node/config/def.go
+++ b/node/config/def.go
@@ -62,9 +62,11 @@ type SealingConfig struct {
}
type MinerFeeConfig struct {
- MaxPreCommitGasFee types.FIL
- MaxCommitGasFee types.FIL
- MaxWindowPoStGasFee types.FIL
+ MaxPreCommitGasFee types.FIL
+ MaxCommitGasFee types.FIL
+ MaxWindowPoStGasFee types.FIL
+ MaxPublishDealsFee types.FIL
+ MaxMarketBalanceAddFee types.FIL
}
// API contains configs for API endpoint
@@ -152,7 +154,7 @@ func DefaultStorageMiner() *StorageMiner {
MaxWaitDealsSectors: 2, // 64G with 32G sectors
MaxSealingSectors: 0,
MaxSealingSectorsForDeals: 0,
- WaitDealsDelay: Duration(time.Hour),
+ WaitDealsDelay: Duration(time.Hour * 6),
},
Storage: sectorstorage.SealerConfig{
@@ -174,13 +176,15 @@ func DefaultStorageMiner() *StorageMiner {
ConsiderOfflineRetrievalDeals: true,
PieceCidBlocklist: []cid.Cid{},
// TODO: It'd be nice to set this based on sector size
- ExpectedSealDuration: Duration(time.Hour * 12),
+ ExpectedSealDuration: Duration(time.Hour * 24),
},
Fees: MinerFeeConfig{
- MaxPreCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))), // 0.05
- MaxCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))),
- MaxWindowPoStGasFee: types.FIL(types.FromFil(50)),
+ MaxPreCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))), // 0.05
+ MaxCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))),
+ MaxWindowPoStGasFee: types.FIL(types.FromFil(50)),
+ MaxPublishDealsFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(33))), // 0.03ish
+ MaxMarketBalanceAddFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(100))), // 0.01
},
}
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
diff --git a/node/hello/cbor_gen.go b/node/hello/cbor_gen.go
index 48d111c6b..3b85e3a74 100644
--- a/node/hello/cbor_gen.go
+++ b/node/hello/cbor_gen.go
@@ -6,7 +6,7 @@ import (
"fmt"
"io"
- abi "github.com/filecoin-project/specs-actors/actors/abi"
+ abi "github.com/filecoin-project/go-state-types/abi"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
diff --git a/node/hello/hello.go b/node/hello/hello.go
index 78da9c7e3..05d53de06 100644
--- a/node/hello/hello.go
+++ b/node/hello/hello.go
@@ -4,10 +4,10 @@ import (
"context"
"time"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
xerrors "golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p-core/host"
@@ -104,6 +104,10 @@ func (hs *Service) HandleStream(s inet.Stream) {
build.Clock.Sleep(time.Millisecond * 300)
}
+ if hs.pmgr != nil {
+ hs.pmgr.AddFilecoinPeer(s.Conn().RemotePeer())
+ }
+
ts, err := hs.syncer.FetchTipSet(context.Background(), s.Conn().RemotePeer(), types.NewTipSetKey(hmsg.HeaviestTipSet...))
if err != nil {
log.Errorf("failed to fetch tipset from peer during hello: %+v", err)
@@ -117,9 +121,6 @@ func (hs *Service) HandleStream(s inet.Stream) {
log.Infof("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer())
hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts)
}
- if hs.pmgr != nil {
- hs.pmgr.AddFilecoinPeer(s.Conn().RemotePeer())
- }
}
diff --git a/node/impl/backup.go b/node/impl/backup.go
new file mode 100644
index 000000000..10f673a4b
--- /dev/null
+++ b/node/impl/backup.go
@@ -0,0 +1,67 @@
+package impl
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/mitchellh/go-homedir"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/lib/backupds"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+)
+
+func backup(mds dtypes.MetadataDS, fpath string) error {
+ bb, ok := os.LookupEnv("LOTUS_BACKUP_BASE_PATH")
+ if !ok {
+ return xerrors.Errorf("LOTUS_BACKUP_BASE_PATH env var not set")
+ }
+
+ bds, ok := mds.(*backupds.Datastore)
+ if !ok {
+ return xerrors.Errorf("expected a backup datastore")
+ }
+
+ bb, err := homedir.Expand(bb)
+ if err != nil {
+ return xerrors.Errorf("expanding base path: %w", err)
+ }
+
+ bb, err = filepath.Abs(bb)
+ if err != nil {
+ return xerrors.Errorf("getting absolute base path: %w", err)
+ }
+
+ fpath, err = homedir.Expand(fpath)
+ if err != nil {
+ return xerrors.Errorf("expanding file path: %w", err)
+ }
+
+ fpath, err = filepath.Abs(fpath)
+ if err != nil {
+ return xerrors.Errorf("getting absolute file path: %w", err)
+ }
+
+ if !strings.HasPrefix(fpath, bb) {
+ return xerrors.Errorf("backup file name (%s) must be inside base path (%s)", fpath, bb)
+ }
+
+ out, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return xerrors.Errorf("open %s: %w", fpath, err)
+ }
+
+ if err := bds.Backup(out); err != nil {
+ if cerr := out.Close(); cerr != nil {
+ log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err)
+ }
+ return xerrors.Errorf("backup error: %w", err)
+ }
+
+ if err := out.Close(); err != nil {
+ return xerrors.Errorf("closing backup file: %w", err)
+ }
+
+ return nil
+}
diff --git a/node/impl/client/client.go b/node/impl/client/client.go
index 3a157318e..f146dcea3 100644
--- a/node/impl/client/client.go
+++ b/node/impl/client/client.go
@@ -6,8 +6,10 @@ import (
"io"
"os"
+ "github.com/filecoin-project/go-state-types/dline"
+
datatransfer "github.com/filecoin-project/go-data-transfer"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
"golang.org/x/xerrors"
"github.com/ipfs/go-blockservice"
@@ -31,6 +33,7 @@ import (
"go.uber.org/fx"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-fil-markets/discovery"
"github.com/filecoin-project/go-fil-markets/pieceio"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
rm "github.com/filecoin-project/go-fil-markets/retrievalmarket"
@@ -38,8 +41,7 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-multistore"
"github.com/filecoin-project/go-padreader"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
@@ -57,7 +59,7 @@ import (
var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
-const dealStartBufferHours uint64 = 24
+const dealStartBufferHours uint64 = 49
type API struct {
fx.In
@@ -68,7 +70,7 @@ type API struct {
paych.PaychAPI
SMDealClient storagemarket.StorageClient
- RetDiscovery rm.PeerResolver
+ RetDiscovery discovery.PeerResolver
Retrieval rm.RetrievalClient
Chain *store.ChainStore
@@ -80,12 +82,12 @@ type API struct {
Host host.Host
}
-func calcDealExpiration(minDuration uint64, md *miner.DeadlineInfo, startEpoch abi.ChainEpoch) abi.ChainEpoch {
+func calcDealExpiration(minDuration uint64, md *dline.Info, startEpoch abi.ChainEpoch) abi.ChainEpoch {
// Make sure we give some time for the miner to seal
minExp := startEpoch + abi.ChainEpoch(minDuration)
// Align on miners ProvingPeriodBoundary
- return minExp + miner.WPoStProvingPeriod - (minExp % miner.WPoStProvingPeriod) + (md.PeriodStart % miner.WPoStProvingPeriod) - 1
+ return minExp + md.WPoStProvingPeriod - (minExp % md.WPoStProvingPeriod) + (md.PeriodStart % md.WPoStProvingPeriod) - 1
}
func (a *API) imgr() *importmgr.Mgr {
@@ -114,7 +116,13 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
}
}
}
- exist, err := a.WalletHas(ctx, params.Wallet)
+
+ walletKey, err := a.StateAPI.StateManager.ResolveToKeyAddress(ctx, params.Wallet, nil)
+ if err != nil {
+ return nil, xerrors.Errorf("failed resolving params.Wallet addr: %w", params.Wallet)
+ }
+
+ exist, err := a.WalletHas(ctx, walletKey)
if err != nil {
return nil, xerrors.Errorf("failed getting addr from wallet: %w", params.Wallet)
}
@@ -151,7 +159,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
}
blocksPerHour := 60 * 60 / build.BlockDelaySecs
- dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour)
+ dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) // TODO: Get this from storage ask
}
result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{
@@ -197,6 +205,7 @@ func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) {
Duration: uint64(v.Proposal.Duration()),
DealID: v.DealID,
CreationTime: v.CreationTime.Time(),
+ Verified: v.Proposal.VerifiedDeal,
}
}
@@ -220,6 +229,7 @@ func (a *API) ClientGetDealInfo(ctx context.Context, d cid.Cid) (*api.DealInfo,
Duration: uint64(v.Proposal.Duration()),
DealID: v.DealID,
CreationTime: v.CreationTime.Time(),
+ Verified: v.Proposal.VerifiedDeal,
}, nil
}
@@ -612,18 +622,18 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
return
}
-func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
+func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("failed getting miner info: %w", err)
}
info := utils.NewStorageProviderInfo(miner, mi.Worker, mi.SectorSize, p, mi.Multiaddrs)
- signedAsk, err := a.SMDealClient.GetAsk(ctx, info)
+ ask, err := a.SMDealClient.GetAsk(ctx, info)
if err != nil {
return nil, err
}
- return signedAsk, nil
+ return ask, nil
}
func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
@@ -709,7 +719,7 @@ func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath stri
// TODO: does that defer mean to remove the whole blockstore?
defer bufferedDS.Remove(ctx, c) //nolint:errcheck
- ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any)
+ ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
// entire DAG selector
allSelector := ssb.ExploreRecursive(selector.RecursionLimitNone(),
@@ -845,5 +855,10 @@ func newDealInfo(v storagemarket.ClientDeal) api.DealInfo {
Duration: uint64(v.Proposal.Duration()),
DealID: v.DealID,
CreationTime: v.CreationTime.Time(),
+ Verified: v.Proposal.VerifiedDeal,
}
}
+
+func (a *API) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error {
+ return a.Retrieval.TryRestartInsufficientFunds(paymentChannel)
+}
diff --git a/node/impl/common/common.go b/node/impl/common/common.go
index 6a69b2a7a..da7cfff25 100644
--- a/node/impl/common/common.go
+++ b/node/impl/common/common.go
@@ -121,6 +121,12 @@ func (a *CommonAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo,
func (a *CommonAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
autonat := a.RawHost.(*basichost.BasicHost).AutoNat
+ if autonat == nil {
+ return api.NatInfo{
+ Reachability: network.ReachabilityUnknown,
+ }, nil
+ }
+
var maddr string
if autonat.Status() == network.ReachabilityPublic {
pa, err := autonat.PublicAddr()
@@ -170,9 +176,14 @@ func (a *CommonAPI) ID(context.Context) (peer.ID, error) {
}
func (a *CommonAPI) Version(context.Context) (api.Version, error) {
+ v, err := build.VersionForType(build.RunningNodeType)
+ if err != nil {
+ return api.Version{}, err
+ }
+
return api.Version{
Version: build.UserVersion(),
- APIVersion: build.APIVersion,
+ APIVersion: v,
BlockDelay: build.BlockDelaySecs,
}, nil
diff --git a/node/impl/full.go b/node/impl/full.go
index feeb29745..add40917c 100644
--- a/node/impl/full.go
+++ b/node/impl/full.go
@@ -1,6 +1,8 @@
package impl
import (
+ "context"
+
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/lotus/api"
@@ -9,6 +11,7 @@ import (
"github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/impl/market"
"github.com/filecoin-project/lotus/node/impl/paych"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
)
var log = logging.Logger("node")
@@ -26,6 +29,12 @@ type FullNodeAPI struct {
full.WalletAPI
full.SyncAPI
full.BeaconAPI
+
+ DS dtypes.MetadataDS
+}
+
+func (n *FullNodeAPI) CreateBackup(ctx context.Context, fpath string) error {
+ return backup(n.DS, fpath)
}
var _ api.FullNode = &FullNodeAPI{}
diff --git a/node/impl/full/beacon.go b/node/impl/full/beacon.go
index 07037f6e1..bc7232c27 100644
--- a/node/impl/full/beacon.go
+++ b/node/impl/full/beacon.go
@@ -4,21 +4,22 @@ import (
"context"
"fmt"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/abi"
"go.uber.org/fx"
)
type BeaconAPI struct {
fx.In
- Beacon beacon.RandomBeacon
+ Beacon beacon.Schedule
}
func (a *BeaconAPI) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
- rr := a.Beacon.MaxBeaconRoundForEpoch(epoch, types.BeaconEntry{})
- e := a.Beacon.Entry(ctx, rr)
+ b := a.Beacon.BeaconForEpoch(epoch)
+ rr := b.MaxBeaconRoundForEpoch(epoch)
+ e := b.Entry(ctx, rr)
select {
case be, ok := <-e:
diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go
index ce5e3822a..aa2ae4df1 100644
--- a/node/impl/full/chain.go
+++ b/node/impl/full/chain.go
@@ -26,8 +26,8 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
@@ -197,6 +197,10 @@ func (a *ChainAPI) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error
return blk.RawData(), nil
}
+func (a *ChainAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error {
+ return a.Chain.Blockstore().DeleteBlock(obj)
+}
+
func (a *ChainAPI) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) {
return a.Chain.Blockstore().Has(obj)
}
@@ -250,11 +254,31 @@ func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid)
}
func (a *ChainAPI) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error {
- ts, err := a.Chain.GetTipSetFromKey(tsk)
+ newHeadTs, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- return a.Chain.SetHead(ts)
+
+ currentTs, err := a.ChainHead(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting head: %w", err)
+ }
+
+ for currentTs.Height() >= newHeadTs.Height() {
+ for _, blk := range currentTs.Key().Cids() {
+ err = a.Chain.UnmarkBlockAsValidated(ctx, blk)
+ if err != nil {
+ return xerrors.Errorf("unmarking block as validated %s: %w", blk, err)
+ }
+ }
+
+ currentTs, err = a.ChainGetTipSet(ctx, currentTs.Parents())
+ if err != nil {
+ return xerrors.Errorf("loading tipset: %w", err)
+ }
+ }
+
+ return a.Chain.SetHead(newHeadTs)
}
func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) {
@@ -281,6 +305,8 @@ func (s stringKey) Key() string {
return (string)(s)
}
+// TODO: ActorUpgrade: this entire function is a problem (in theory) as we don't know the HAMT version.
+// In practice, hamt v0 should work "just fine" for reading.
func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {
return func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {
store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
@@ -300,7 +326,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
return nil, nil, xerrors.Errorf("parsing int64: %w", err)
}
- ik := adt.IntKey(i)
+ ik := abi.IntKey(i)
names[0] = "@H:" + ik.Key()
}
@@ -311,7 +337,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
return nil, nil, xerrors.Errorf("parsing uint64: %w", err)
}
- ik := adt.UIntKey(i)
+ ik := abi.UIntKey(i)
names[0] = "@H:" + ik.Key()
}
@@ -418,7 +444,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
return nil, nil, xerrors.Errorf("getting actor head for @state: %w", err)
}
- m, err := vm.DumpActorState(act.Code, head.RawData())
+ m, err := vm.DumpActorState(&act, head.RawData())
if err != nil {
return nil, nil, err
}
@@ -495,7 +521,7 @@ func (a *ChainAPI) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Mess
return cm.VMMessage(), nil
}
-func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error) {
+func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, skipoldmsgs bool, tsk types.TipSetKey) (<-chan []byte, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
@@ -503,15 +529,11 @@ func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk t
r, w := io.Pipe()
out := make(chan []byte)
go func() {
- defer w.Close() //nolint:errcheck // it is a pipe
-
bw := bufio.NewWriterSize(w, 1<<20)
- defer bw.Flush() //nolint:errcheck // it is a write to a pipe
- if err := a.Chain.Export(ctx, ts, nroots, bw); err != nil {
- log.Errorf("chain export call failed: %s", err)
- return
- }
+ err := a.Chain.Export(ctx, ts, nroots, skipoldmsgs, bw)
+ bw.Flush() //nolint:errcheck // it is a write to a pipe
+ w.CloseWithError(err) //nolint:errcheck // it is a pipe
}()
go func() {
@@ -523,13 +545,23 @@ func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk t
log.Errorf("chain export pipe read failed: %s", err)
return
}
- select {
- case out <- buf[:n]:
- case <-ctx.Done():
- log.Warnf("export writer failed: %s", ctx.Err())
- return
+ if n > 0 {
+ select {
+ case out <- buf[:n]:
+ case <-ctx.Done():
+ log.Warnf("export writer failed: %s", ctx.Err())
+ return
+ }
}
if err == io.EOF {
+ // send empty slice to indicate correct eof
+ select {
+ case out <- []byte{}:
+ case <-ctx.Done():
+ log.Warnf("export writer failed: %s", ctx.Err())
+ return
+ }
+
return
}
}
diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go
index 778c2c4eb..3580ca26d 100644
--- a/node/impl/full/gas.go
+++ b/node/impl/full/gas.go
@@ -6,21 +6,24 @@ import (
"math/rand"
"sort"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ "go.uber.org/fx"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/exitcode"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
-
- "go.uber.org/fx"
- "golang.org/x/xerrors"
)
type GasAPI struct {
@@ -50,6 +53,35 @@ func (a *GasAPI) GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxq
return out, nil
}
+type gasMeta struct {
+ price big.Int
+ limit int64
+}
+
+func medianGasPremium(prices []gasMeta, blocks int) abi.TokenAmount {
+ sort.Slice(prices, func(i, j int) bool {
+ // sort desc by price
+ return prices[i].price.GreaterThan(prices[j].price)
+ })
+
+ at := build.BlockGasTarget * int64(blocks) / 2
+ prev1, prev2 := big.Zero(), big.Zero()
+ for _, price := range prices {
+ prev1, prev2 = price.price, prev1
+ at -= price.limit
+ if at < 0 {
+ break
+ }
+ }
+
+ premium := prev1
+ if prev2.Sign() != 0 {
+ premium = big.Div(types.BigAdd(prev1, prev2), types.NewInt(2))
+ }
+
+ return premium
+}
+
func (a *GasAPI) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, _ types.TipSetKey) (types.BigInt, error) {
@@ -57,11 +89,6 @@ func (a *GasAPI) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64,
nblocksincl = 1
}
- type gasMeta struct {
- price big.Int
- limit int64
- }
-
var prices []gasMeta
var blocks int
@@ -92,25 +119,7 @@ func (a *GasAPI) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64,
ts = pts
}
- sort.Slice(prices, func(i, j int) bool {
- // sort desc by price
- return prices[i].price.GreaterThan(prices[j].price)
- })
-
- at := build.BlockGasTarget * int64(blocks) / 2
- prev1, prev2 := big.Zero(), big.Zero()
- for _, price := range prices {
- prev1, prev2 = price.price, prev1
- at -= price.limit
- if at > 0 {
- continue
- }
- }
-
- premium := prev1
- if prev2.Sign() != 0 {
- premium = big.Div(types.BigAdd(prev1, prev2), types.NewInt(2))
- }
+ premium := medianGasPremium(prices, blocks)
if types.BigCmp(premium, types.NewInt(MinGasPremium)) < 0 {
switch nblocksincl {
@@ -151,7 +160,18 @@ func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message,
priorMsgs = append(priorMsgs, m)
}
- res, err := a.Stmgr.CallWithGas(ctx, &msg, priorMsgs, ts)
+ // Try calling until we find a height with no migration.
+ var res *api.InvocResult
+ for {
+ res, err = a.Stmgr.CallWithGas(ctx, &msg, priorMsgs, ts)
+ if err != stmgr.ErrExpensiveFork {
+ break
+ }
+ ts, err = a.Chain.GetTipSetFromKey(ts.Parents())
+ if err != nil {
+ return -1, xerrors.Errorf("getting parent tipset: %w", err)
+ }
+ }
if err != nil {
return -1, xerrors.Errorf("CallWithGas failed: %w", err)
}
@@ -160,8 +180,14 @@ func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message,
}
// Special case for PaymentChannel collect, which is deleting actor
- var act types.Actor
- err = a.Stmgr.WithParentState(ts, a.Stmgr.WithActor(msg.To, stmgr.GetActor(&act)))
+ st, err := a.Stmgr.ParentState(ts)
+ if err != nil {
+ _ = err
+ // somewhat ignore it as it can happen and we just want to detect
+ // an existing PaymentChannel actor
+ return res.MsgRct.GasUsed, nil
+ }
+ act, err := st.GetActor(msg.To)
if err != nil {
_ = err
// somewhat ignore it as it can happen and we just want to detect
@@ -169,10 +195,10 @@ func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message,
return res.MsgRct.GasUsed, nil
}
- if !act.Code.Equals(builtin.PaymentChannelActorCodeID) {
+ if !builtin.IsPaymentChannelActor(act.Code) {
return res.MsgRct.GasUsed, nil
}
- if msgIn.Method != builtin.MethodsPaych.Collect {
+ if msgIn.Method != builtin0.MethodsPaych.Collect {
return res.MsgRct.GasUsed, nil
}
@@ -198,30 +224,14 @@ func (a *GasAPI) GasEstimateMessageGas(ctx context.Context, msg *types.Message,
}
if msg.GasFeeCap == types.EmptyInt || types.BigCmp(msg.GasFeeCap, types.NewInt(0)) == 0 {
- feeCap, err := a.GasEstimateFeeCap(ctx, msg, 10, types.EmptyTSK)
+ feeCap, err := a.GasEstimateFeeCap(ctx, msg, 20, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("estimating fee cap: %w", err)
}
msg.GasFeeCap = feeCap
}
- capGasFee(msg, spec.Get().MaxFee)
+ messagepool.CapGasFee(msg, spec.Get().MaxFee)
return msg, nil
}
-
-func capGasFee(msg *types.Message, maxFee abi.TokenAmount) {
- if maxFee.Equals(big.Zero()) {
- maxFee = types.NewInt(build.FilecoinPrecision / 10)
- }
-
- gl := types.NewInt(uint64(msg.GasLimit))
- totalFee := types.BigMul(msg.GasFeeCap, gl)
-
- if totalFee.LessThanEqual(maxFee) {
- return
- }
-
- msg.GasFeeCap = big.Div(maxFee, gl)
- msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
-}
diff --git a/node/impl/full/gas_test.go b/node/impl/full/gas_test.go
new file mode 100644
index 000000000..2452ab807
--- /dev/null
+++ b/node/impl/full/gas_test.go
@@ -0,0 +1,40 @@
+package full
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func TestMedian(t *testing.T) {
+ require.Equal(t, types.NewInt(5), medianGasPremium([]gasMeta{
+ {big.NewInt(5), build.BlockGasTarget},
+ }, 1))
+
+ require.Equal(t, types.NewInt(10), medianGasPremium([]gasMeta{
+ {big.NewInt(5), build.BlockGasTarget},
+ {big.NewInt(10), build.BlockGasTarget},
+ }, 1))
+
+ require.Equal(t, types.NewInt(15), medianGasPremium([]gasMeta{
+ {big.NewInt(10), build.BlockGasTarget / 2},
+ {big.NewInt(20), build.BlockGasTarget / 2},
+ }, 1))
+
+ require.Equal(t, types.NewInt(25), medianGasPremium([]gasMeta{
+ {big.NewInt(10), build.BlockGasTarget / 2},
+ {big.NewInt(20), build.BlockGasTarget / 2},
+ {big.NewInt(30), build.BlockGasTarget / 2},
+ }, 1))
+
+ require.Equal(t, types.NewInt(15), medianGasPremium([]gasMeta{
+ {big.NewInt(10), build.BlockGasTarget / 2},
+ {big.NewInt(20), build.BlockGasTarget / 2},
+ {big.NewInt(30), build.BlockGasTarget / 2},
+ }, 2))
+}
diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go
index bfb7439bb..1f093606c 100644
--- a/node/impl/full/mpool.go
+++ b/node/impl/full/mpool.go
@@ -2,6 +2,7 @@ package full
import (
"context"
+ "encoding/json"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
@@ -9,8 +10,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/messagepool"
- "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/messagesigner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -21,9 +21,7 @@ type MpoolAPI struct {
WalletAPI
GasAPI
- Chain *store.ChainStore
-
- Mpool *messagepool.MessagePool
+ MessageSigner *messagesigner.MessageSigner
PushLocks *dtypes.MpoolLocker
}
@@ -112,12 +110,19 @@ func (a *MpoolAPI) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (ci
return a.Mpool.Push(smsg)
}
+func (a *MpoolAPI) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
+ return a.Mpool.PushUntrusted(smsg)
+}
+
func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
+ cp := *msg
+ msg = &cp
+ inMsg := *msg
+ fromA, err := a.Stmgr.ResolveToKeyAddress(ctx, msg.From, nil)
+ if err != nil {
+ return nil, xerrors.Errorf("getting key address: %w", err)
+ }
{
- fromA, err := a.Stmgr.ResolveToKeyAddress(ctx, msg.From, nil)
- if err != nil {
- return nil, xerrors.Errorf("getting key address: %w", err)
- }
done, err := a.PushLocks.TakeLock(ctx, fromA)
if err != nil {
return nil, xerrors.Errorf("taking lock: %w", err)
@@ -129,38 +134,39 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
return nil, xerrors.Errorf("MpoolPushMessage expects message nonce to be 0, was %d", msg.Nonce)
}
- msg, err := a.GasAPI.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK)
+ msg, err = a.GasAPI.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("GasEstimateMessageGas error: %w", err)
}
- sign := func(from address.Address, nonce uint64) (*types.SignedMessage, error) {
- msg.Nonce = nonce
- if msg.From.Protocol() == address.ID {
- log.Warnf("Push from ID address (%s), adjusting to %s", msg.From, from)
- msg.From = from
- }
-
- b, err := a.WalletBalance(ctx, msg.From)
- if err != nil {
- return nil, xerrors.Errorf("mpool push: getting origin balance: %w", err)
- }
-
- if b.LessThan(msg.Value) {
- return nil, xerrors.Errorf("mpool push: not enough funds: %s < %s", b, msg.Value)
- }
-
- return a.WalletSignMessage(ctx, from, msg)
+ if msg.GasPremium.GreaterThan(msg.GasFeeCap) {
+ inJson, _ := json.Marshal(inMsg)
+ outJson, _ := json.Marshal(msg)
+ return nil, xerrors.Errorf("After estimation, GasPremium is greater than GasFeeCap, inmsg: %s, outmsg: %s",
+ inJson, outJson)
}
- var m *types.SignedMessage
-again:
- m, err = a.Mpool.PushWithNonce(ctx, msg.From, sign)
- if err == messagepool.ErrTryAgain {
- log.Debugf("temporary failure while pushing message: %s; retrying", err)
- goto again
+ if msg.From.Protocol() == address.ID {
+ log.Warnf("Push from ID address (%s), adjusting to %s", msg.From, fromA)
+ msg.From = fromA
}
- return m, err
+
+ b, err := a.WalletBalance(ctx, msg.From)
+ if err != nil {
+ return nil, xerrors.Errorf("mpool push: getting origin balance: %w", err)
+ }
+
+ if b.LessThan(msg.Value) {
+ return nil, xerrors.Errorf("mpool push: not enough funds: %s < %s", b, msg.Value)
+ }
+
+ // Sign and push the message
+ return a.MessageSigner.SignMessage(ctx, msg, func(smsg *types.SignedMessage) error {
+ if _, err := a.Mpool.Push(smsg); err != nil {
+ return xerrors.Errorf("mpool push: failed to push message: %w", err)
+ }
+ return nil
+ })
}
func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go
index f1e3c61fd..715689edc 100644
--- a/node/impl/full/multisig.go
+++ b/node/impl/full/multisig.go
@@ -3,19 +3,19 @@ package full
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
- samsig "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
"github.com/ipfs/go-cid"
- "github.com/minio/blake2b-simd"
"go.uber.org/fx"
"golang.org/x/xerrors"
)
@@ -28,57 +28,31 @@ type MsigAPI struct {
MpoolAPI MpoolAPI
}
+func (a *MsigAPI) messageBuilder(ctx context.Context, from address.Address) (multisig.MessageBuilder, error) {
+ nver, err := a.StateAPI.StateNetworkVersion(ctx, types.EmptyTSK)
+ if err != nil {
+ return nil, err
+ }
+
+ return multisig.Message(actors.VersionForNetwork(nver), from), nil
+}
+
// TODO: remove gp (gasPrice) from arguments
+// TODO: Add "vesting start" to arguments.
func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
- lenAddrs := uint64(len(addrs))
-
- if lenAddrs < req {
- return cid.Undef, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ mb, err := a.messageBuilder(ctx, src)
+ if err != nil {
+ return cid.Undef, err
}
- if req == 0 {
- req = lenAddrs
- }
-
- if src == address.Undef {
- return cid.Undef, xerrors.Errorf("must provide source address")
- }
-
- // Set up constructor parameters for multisig
- msigParams := &samsig.ConstructorParams{
- Signers: addrs,
- NumApprovalsThreshold: req,
- UnlockDuration: duration,
- }
-
- enc, actErr := actors.SerializeParams(msigParams)
- if actErr != nil {
- return cid.Undef, actErr
- }
-
- // new actors are created by invoking 'exec' on the init actor with the constructor params
- execParams := &init_.ExecParams{
- CodeCID: builtin.MultisigActorCodeID,
- ConstructorParams: enc,
- }
-
- enc, actErr = actors.SerializeParams(execParams)
- if actErr != nil {
- return cid.Undef, actErr
- }
-
- // now we create the message to send this with
- msg := types.Message{
- To: builtin.InitActorAddr,
- From: src,
- Method: builtin.MethodsInit.Exec,
- Params: enc,
- Value: val,
+ msg, err := mb.Create(addrs, req, 0, duration, val)
+ if err != nil {
+ return cid.Undef, err
}
// send the message out to the network
- smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, &msg, nil)
+ smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil)
if err != nil {
return cid.Undef, err
}
@@ -88,38 +62,14 @@ func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Ad
func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
- if msig == address.Undef {
- return cid.Undef, xerrors.Errorf("must provide a multisig address for proposal")
+ mb, err := a.messageBuilder(ctx, src)
+ if err != nil {
+ return cid.Undef, err
}
- if to == address.Undef {
- return cid.Undef, xerrors.Errorf("must provide a target address for proposal")
- }
-
- if amt.Sign() == -1 {
- return cid.Undef, xerrors.Errorf("must provide a positive amount for proposed send")
- }
-
- if src == address.Undef {
- return cid.Undef, xerrors.Errorf("must provide source address")
- }
-
- enc, actErr := actors.SerializeParams(&samsig.ProposeParams{
- To: to,
- Value: amt,
- Method: abi.MethodNum(method),
- Params: params,
- })
- if actErr != nil {
- return cid.Undef, xerrors.Errorf("failed to serialize parameters: %w", actErr)
- }
-
- msg := &types.Message{
- To: msig,
- From: src,
- Value: types.NewInt(0),
- Method: builtin.MethodsMultisig.Propose,
- Params: enc,
+ msg, err := mb.Propose(msig, to, amt, abi.MethodNum(method), params)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to create proposal: %w", err)
}
smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil)
@@ -130,13 +80,40 @@ func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to addr
return smsg.Cid(), nil
}
+func (a *MsigAPI) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
+ enc, actErr := serializeAddParams(newAdd, inc)
+ if actErr != nil {
+ return cid.Undef, actErr
+ }
+
+ return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.AddSigner), enc)
+}
+
+func (a *MsigAPI) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
+ enc, actErr := serializeAddParams(newAdd, inc)
+ if actErr != nil {
+ return cid.Undef, actErr
+ }
+
+ return a.MsigApprove(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.AddSigner), enc)
+}
+
+func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
+ enc, actErr := serializeAddParams(newAdd, inc)
+ if actErr != nil {
+ return cid.Undef, actErr
+ }
+
+ return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.AddSigner), enc)
+}
+
func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
enc, actErr := serializeSwapParams(oldAdd, newAdd)
if actErr != nil {
return cid.Undef, actErr
}
- return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
+ return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.SwapSigner), enc)
}
func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
@@ -145,7 +122,7 @@ func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src
return cid.Undef, actErr
}
- return a.MsigApprove(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
+ return a.MsigApprove(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.SwapSigner), enc)
}
func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
@@ -154,7 +131,7 @@ func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src
return cid.Undef, actErr
}
- return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
+ return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(builtin0.MethodsMultisig.SwapSigner), enc)
}
func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
@@ -170,14 +147,6 @@ func (a *MsigAPI) msigApproveOrCancel(ctx context.Context, operation api.MsigPro
return cid.Undef, xerrors.Errorf("must provide multisig address")
}
- if to == address.Undef {
- return cid.Undef, xerrors.Errorf("must provide proposed target address")
- }
-
- if amt.Sign() == -1 {
- return cid.Undef, xerrors.Errorf("must provide the positive amount that was proposed")
- }
-
if src == address.Undef {
return cid.Undef, xerrors.Errorf("must provide source address")
}
@@ -190,7 +159,7 @@ func (a *MsigAPI) msigApproveOrCancel(ctx context.Context, operation api.MsigPro
proposer = proposerID
}
- p := samsig.ProposalHashData{
+ p := multisig.ProposalHashData{
Requester: proposer,
To: to,
Value: amt,
@@ -198,42 +167,22 @@ func (a *MsigAPI) msigApproveOrCancel(ctx context.Context, operation api.MsigPro
Params: params,
}
- pser, err := p.Serialize()
- if err != nil {
- return cid.Undef, err
- }
- phash := blake2b.Sum256(pser)
-
- enc, err := actors.SerializeParams(&samsig.TxnIDParams{
- ID: samsig.TxnID(txID),
- ProposalHash: phash[:],
- })
-
+ mb, err := a.messageBuilder(ctx, src)
if err != nil {
return cid.Undef, err
}
- var msigResponseMethod abi.MethodNum
-
- /*
- We pass in a MsigProposeResponse instead of MethodNum to
- tighten the possible inputs to just Approve and Cancel.
- */
+ var msg *types.Message
switch operation {
case api.MsigApprove:
- msigResponseMethod = builtin.MethodsMultisig.Approve
+ msg, err = mb.Approve(msig, txID, &p)
case api.MsigCancel:
- msigResponseMethod = builtin.MethodsMultisig.Cancel
+ msg, err = mb.Cancel(msig, txID, &p)
default:
return cid.Undef, xerrors.Errorf("Invalid operation for msigApproveOrCancel")
}
-
- msg := &types.Message{
- To: msig,
- From: src,
- Value: types.NewInt(0),
- Method: msigResponseMethod,
- Params: enc,
+ if err != nil {
+ return cid.Undef, err
}
smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil)
@@ -244,8 +193,20 @@ func (a *MsigAPI) msigApproveOrCancel(ctx context.Context, operation api.MsigPro
return smsg.Cid(), nil
}
+func serializeAddParams(new address.Address, inc bool) ([]byte, error) {
+ enc, actErr := actors.SerializeParams(&multisig0.AddSignerParams{
+ Signer: new,
+ Increase: inc,
+ })
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return enc, nil
+}
+
func serializeSwapParams(old address.Address, new address.Address) ([]byte, error) {
- enc, actErr := actors.SerializeParams(&samsig.SwapSignerParams{
+ enc, actErr := actors.SerializeParams(&multisig0.SwapSignerParams{
From: old,
To: new,
})
diff --git a/node/impl/full/state.go b/node/impl/full/state.go
index 0e1aad27c..33ee75697 100644
--- a/node/impl/full/state.go
+++ b/node/impl/full/state.go
@@ -3,32 +3,33 @@ package full
import (
"bytes"
"context"
- "errors"
- "fmt"
"strconv"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
+
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/go-state-types/network"
+
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
- cbg "github.com/whyrusleeping/cbor-gen"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- samsig "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/state"
@@ -41,8 +42,6 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
-var errBreakForeach = errors.New("break")
-
type StateAPI struct {
fx.In
@@ -53,130 +52,184 @@ type StateAPI struct {
ProofVerifier ffiwrapper.Verifier
StateManager *stmgr.StateManager
Chain *store.ChainStore
- Beacon beacon.RandomBeacon
+ Beacon beacon.Schedule
}
func (a *StateAPI) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
return stmgr.GetNetworkName(ctx, a.StateManager, a.Chain.GetHeaviestTipSet().ParentState())
}
-func (a *StateAPI) StateMinerSectors(ctx context.Context, addr address.Address, filter *abi.BitField, filterOut bool, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
+func (a *StateAPI) StateMinerSectors(ctx context.Context, addr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- return stmgr.GetMinerSectorSet(ctx, a.StateManager, ts, addr, filter, filterOut)
+ return stmgr.GetMinerSectorSet(ctx, a.StateManager, ts, addr, sectorNos)
}
-func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
- var out []*api.ChainSectorInfo
-
- err := a.StateManager.WithParentStateTsk(tsk,
- a.StateManager.WithActor(maddr,
- a.StateManager.WithActorState(ctx, func(store adt.Store, mas *miner.State) error {
- var allActive []abi.BitField
-
- err := a.StateManager.WithDeadlines(
- a.StateManager.WithEachDeadline(
- a.StateManager.WithEachPartition(func(store adt.Store, partIdx uint64, partition *miner.Partition) error {
- active, err := partition.ActiveSectors()
- if err != nil {
- return xerrors.Errorf("partition.ActiveSectors: %w", err)
- }
-
- allActive = append(allActive, active)
- return nil
- })))(store, mas)
- if err != nil {
- return xerrors.Errorf("with deadlines: %w", err)
- }
-
- active, err := bitfield.MultiMerge(allActive...)
- if err != nil {
- return xerrors.Errorf("merging active sector bitfields: %w", err)
- }
-
- out, err = stmgr.LoadSectorsFromSet(ctx, a.Chain.Blockstore(), mas.Sectors, &active, false)
- return err
- })))
+func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { // TODO: only used in cli
+ ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
- return nil, xerrors.Errorf("getting active sectors from partitions: %w", err)
+ return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
+ act, err := a.StateManager.LoadActorTsk(ctx, maddr, tsk)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor: %w", err)
+ }
+
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ activeSectors, err := miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
+ if err != nil {
+ return nil, xerrors.Errorf("merge partition active sets: %w", err)
+ }
+
+ return stmgr.GetMinerSectorSet(ctx, a.StateManager, ts, maddr, &activeSectors)
+}
+
+func (a *StateAPI) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
+ act, err := a.StateManager.LoadActorTsk(ctx, actor, tsk)
+ if err != nil {
+ return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor: %w", err)
+ }
+
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ return mas.Info()
+}
+
+func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) {
+ act, err := a.StateManager.LoadActorTsk(ctx, m, tsk)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor: %w", err)
+ }
+
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ deadlines, err := mas.NumDeadlines()
+ if err != nil {
+ return nil, xerrors.Errorf("getting deadline count: %w", err)
+ }
+
+ out := make([]api.Deadline, deadlines)
+ if err := mas.ForEachDeadline(func(i uint64, dl miner.Deadline) error {
+ ps, err := dl.PostSubmissions()
+ if err != nil {
+ return err
+ }
+
+ out[i] = api.Deadline{
+ PostSubmissions: ps,
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
return out, nil
}
-func (a *StateAPI) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) {
- ts, err := a.Chain.GetTipSetFromKey(tsk)
+func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) {
+ act, err := a.StateManager.LoadActorTsk(ctx, m, tsk)
if err != nil {
- return api.MinerInfo{}, xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
- mi, err := stmgr.StateMinerInfo(ctx, a.StateManager, ts, actor)
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
if err != nil {
- return api.MinerInfo{}, err
+ return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
- return api.NewApiMinerInfo(mi), nil
+
+ dl, err := mas.LoadDeadline(dlIdx)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load the deadline: %w", err)
+ }
+
+ var out []api.Partition
+ err = dl.ForEachPartition(func(_ uint64, part miner.Partition) error {
+ allSectors, err := part.AllSectors()
+ if err != nil {
+ return xerrors.Errorf("getting AllSectors: %w", err)
+ }
+
+ faultySectors, err := part.FaultySectors()
+ if err != nil {
+ return xerrors.Errorf("getting FaultySectors: %w", err)
+ }
+
+ recoveringSectors, err := part.RecoveringSectors()
+ if err != nil {
+ return xerrors.Errorf("getting RecoveringSectors: %w", err)
+ }
+
+ liveSectors, err := part.LiveSectors()
+ if err != nil {
+ return xerrors.Errorf("getting LiveSectors: %w", err)
+ }
+
+ activeSectors, err := part.ActiveSectors()
+ if err != nil {
+ return xerrors.Errorf("getting ActiveSectors: %w", err)
+ }
+
+ out = append(out, api.Partition{
+ AllSectors: allSectors,
+ FaultySectors: faultySectors,
+ RecoveringSectors: recoveringSectors,
+ LiveSectors: liveSectors,
+ ActiveSectors: activeSectors,
+ })
+ return nil
+ })
+
+ return out, err
}
-func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]*miner.Deadline, error) {
- var out []*miner.Deadline
- return out, a.StateManager.WithParentStateTsk(tsk,
- a.StateManager.WithActor(m,
- a.StateManager.WithActorState(ctx,
- a.StateManager.WithDeadlines(
- a.StateManager.WithEachDeadline(
- func(store adt.Store, idx uint64, deadline *miner.Deadline) error {
- out = append(out, deadline)
- return nil
- })))))
-}
-
-func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]*miner.Partition, error) {
- var out []*miner.Partition
- return out, a.StateManager.WithParentStateTsk(tsk,
- a.StateManager.WithActor(m,
- a.StateManager.WithActorState(ctx,
- a.StateManager.WithDeadlines(
- a.StateManager.WithDeadline(dlIdx,
- a.StateManager.WithEachPartition(func(store adt.Store, partIdx uint64, partition *miner.Partition) error {
- out = append(out, partition)
- return nil
- }))))))
-}
-
-func (a *StateAPI) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*miner.DeadlineInfo, error) {
- ts, err := a.Chain.GetTipSetFromKey(tsk)
+func (a *StateAPI) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) {
+ ts, err := a.StateManager.ChainStore().GetTipSetFromKey(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- var mas miner.State
- _, err = a.StateManager.LoadActorState(ctx, addr, &mas, ts)
+ act, err := a.StateManager.LoadActor(ctx, addr, ts)
if err != nil {
- return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
+ return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
- return mas.DeadlineInfo(ts.Height()).NextNotElapsed(), nil
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ di, err := mas.DeadlineInfo(ts.Height())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get deadline info: %w", err)
+ }
+
+ return di.NextNotElapsed(), nil
}
-func (a *StateAPI) StateMinerFaults(ctx context.Context, addr address.Address, tsk types.TipSetKey) (abi.BitField, error) {
- out := bitfield.New()
-
- err := a.StateManager.WithParentStateTsk(tsk,
- a.StateManager.WithActor(addr,
- a.StateManager.WithActorState(ctx,
- a.StateManager.WithDeadlines(
- a.StateManager.WithEachDeadline(
- a.StateManager.WithEachPartition(func(store adt.Store, idx uint64, partition *miner.Partition) (err error) {
- out, err = bitfield.MergeBitFields(out, partition.Faults)
- return err
- }))))))
+func (a *StateAPI) StateMinerFaults(ctx context.Context, addr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
+ act, err := a.StateManager.LoadActorTsk(ctx, addr, tsk)
if err != nil {
- return bitfield.BitField{}, err
+ return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor: %w", err)
}
- return out, err
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ return miner.AllPartSectors(mas, miner.Partition.FaultySectors)
}
func (a *StateAPI) StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, endTsk types.TipSetKey) ([]*api.Fault, error) {
@@ -222,23 +275,18 @@ func (a *StateAPI) StateAllMinerFaults(ctx context.Context, lookback abi.ChainEp
return allFaults, nil*/
}
-func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Address, tsk types.TipSetKey) (abi.BitField, error) {
- out := bitfield.New()
-
- err := a.StateManager.WithParentStateTsk(tsk,
- a.StateManager.WithActor(addr,
- a.StateManager.WithActorState(ctx,
- a.StateManager.WithDeadlines(
- a.StateManager.WithEachDeadline(
- a.StateManager.WithEachPartition(func(store adt.Store, idx uint64, partition *miner.Partition) (err error) {
- out, err = bitfield.MergeBitFields(out, partition.Recoveries)
- return err
- }))))))
+func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
+ act, err := a.StateManager.LoadActorTsk(ctx, addr, tsk)
if err != nil {
- return bitfield.BitField{}, err
+ return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor: %w", err)
}
- return out, err
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ return miner.AllPartSectors(mas, miner.Partition.RecoveringSectors)
}
func (a *StateAPI) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) {
@@ -247,23 +295,34 @@ func (a *StateAPI) StateMinerPower(ctx context.Context, addr address.Address, ts
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- m, net, err := stmgr.GetPower(ctx, a.StateManager, ts, addr)
+ m, net, hmp, err := stmgr.GetPower(ctx, a.StateManager, ts, addr)
if err != nil {
return nil, err
}
return &api.MinerPower{
- MinerPower: m,
- TotalPower: net,
+ MinerPower: m,
+ TotalPower: net,
+ HasMinPower: hmp,
}, nil
}
-func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) {
+func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- return a.StateManager.Call(ctx, msg, ts)
+ for {
+ res, err = a.StateManager.Call(ctx, msg, ts)
+ if err != stmgr.ErrExpensiveFork {
+ break
+ }
+ ts, err = a.Chain.GetTipSetFromKey(ts.Parents())
+ if err != nil {
+ return nil, xerrors.Errorf("getting parent tipset: %w", err)
+ }
+ }
+ return res, err
}
func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid.Cid) (*api.InvocResult, error) {
@@ -360,7 +419,7 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts
return nil, xerrors.Errorf("getting actor head: %w", err)
}
- oif, err := vm.DumpActorState(act.Code, blk.RawData())
+ oif, err := vm.DumpActorState(act, blk.RawData())
if err != nil {
return nil, xerrors.Errorf("dumping actor state (a:%s): %w", actor, err)
}
@@ -482,37 +541,31 @@ func (a *StateAPI) StateMarketBalance(ctx context.Context, addr address.Address,
func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) {
out := map[string]api.MarketBalance{}
- var state market.State
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- if _, err := a.StateManager.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts); err != nil {
- return nil, err
- }
- store := a.StateManager.ChainStore().Store(ctx)
- escrow, err := adt.AsMap(store, state.EscrowTable)
+
+ state, err := a.StateManager.GetMarketState(ctx, ts)
if err != nil {
return nil, err
}
- locked, err := adt.AsMap(store, state.LockedTable)
+ escrow, err := state.EscrowTable()
+ if err != nil {
+ return nil, err
+ }
+ locked, err := state.LockedTable()
if err != nil {
return nil, err
}
- var es, lk abi.TokenAmount
- err = escrow.ForEach(&es, func(k string) error {
- a, err := address.NewFromBytes([]byte(k))
+ err = escrow.ForEach(func(a address.Address, es abi.TokenAmount) error {
+
+ lk, err := locked.Get(a)
if err != nil {
return err
}
- if found, err := locked.Get(adt.AddrKey(a), &lk); err != nil {
- return err
- } else if !found {
- return fmt.Errorf("locked funds not found")
- }
-
out[a.String()] = api.MarketBalance{
Escrow: es,
Locked: lk,
@@ -528,37 +581,36 @@ func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSet
func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) {
out := map[string]api.MarketDeal{}
- var state market.State
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- if _, err := a.StateManager.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts); err != nil {
- return nil, err
- }
- store := a.StateManager.ChainStore().Store(ctx)
- da, err := adt.AsArray(store, state.Proposals)
+ state, err := a.StateManager.GetMarketState(ctx, ts)
if err != nil {
return nil, err
}
- sa, err := adt.AsArray(store, state.States)
+ da, err := state.Proposals()
if err != nil {
return nil, err
}
- var d market.DealProposal
- if err := da.ForEach(&d, func(i int64) error {
- var s market.DealState
- if found, err := sa.Get(uint64(i), &s); err != nil {
+ sa, err := state.States()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := da.ForEach(func(dealID abi.DealID, d market.DealProposal) error {
+ s, found, err := sa.Get(dealID)
+ if err != nil {
return xerrors.Errorf("failed to get state for deal in proposals array: %w", err)
} else if !found {
- s.SectorStartEpoch = -1
+ s = market.EmptyDealState()
}
- out[strconv.FormatInt(i, 10)] = api.MarketDeal{
+ out[strconv.FormatInt(int64(dealID), 10)] = api.MarketDeal{
Proposal: d,
- State: s,
+ State: *s,
}
return nil
}); err != nil {
@@ -576,106 +628,60 @@ func (a *StateAPI) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID
}
func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.Cid) (map[string]types.Actor, error) {
- store := adt.WrapStore(ctx, cbor.NewCborStore(a.Chain.Blockstore()))
+ store := a.Chain.Store(ctx)
- nh, err := adt.AsMap(store, new)
+ oldTree, err := state.LoadStateTree(store, old)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("failed to load old state tree: %w", err)
}
- oh, err := adt.AsMap(store, old)
+ newTree, err := state.LoadStateTree(store, new)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("failed to load new state tree: %w", err)
}
- out := map[string]types.Actor{}
-
- var (
- ncval, ocval cbg.Deferred
- buf = bytes.NewReader(nil)
- )
- err = nh.ForEach(&ncval, func(k string) error {
- var act types.Actor
-
- addr, err := address.NewFromBytes([]byte(k))
- if err != nil {
- return xerrors.Errorf("address in state tree was not valid: %w", err)
- }
-
- found, err := oh.Get(adt.AddrKey(addr), &ocval)
- if err != nil {
- return err
- }
-
- if found && bytes.Equal(ocval.Raw, ncval.Raw) {
- return nil // not changed
- }
-
- buf.Reset(ncval.Raw)
- err = act.UnmarshalCBOR(buf)
- buf.Reset(nil)
-
- if err != nil {
- return err
- }
-
- out[addr.String()] = act
-
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return out, nil
+ return state.Diff(oldTree, newTree)
}
func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) {
- var out api.MinerSectors
-
- err := a.StateManager.WithParentStateTsk(tsk,
- a.StateManager.WithActor(addr,
- a.StateManager.WithActorState(ctx, func(store adt.Store, mas *miner.State) error {
- var allActive []abi.BitField
-
- err := a.StateManager.WithDeadlines(
- a.StateManager.WithEachDeadline(
- a.StateManager.WithEachPartition(func(store adt.Store, partIdx uint64, partition *miner.Partition) error {
- active, err := partition.ActiveSectors()
- if err != nil {
- return xerrors.Errorf("partition.ActiveSectors: %w", err)
- }
-
- allActive = append(allActive, active)
- return nil
- })))(store, mas)
- if err != nil {
- return xerrors.Errorf("with deadlines: %w", err)
- }
-
- active, err := bitfield.MultiMerge(allActive...)
- if err != nil {
- return xerrors.Errorf("merging active sector bitfields: %w", err)
- }
-
- out.Active, err = active.Count()
- if err != nil {
- return xerrors.Errorf("counting active sectors: %w", err)
- }
-
- sarr, err := adt.AsArray(store, mas.Sectors)
- if err != nil {
- return err
- }
-
- out.Sectors = sarr.Length()
- return nil
- })))
+ act, err := a.StateManager.LoadActorTsk(ctx, addr, tsk)
if err != nil {
return api.MinerSectors{}, err
}
-
- return out, nil
+ mas, err := miner.Load(a.Chain.Store(ctx), act)
+ if err != nil {
+ return api.MinerSectors{}, err
+ }
+ var activeCount, liveCount, faultyCount uint64
+ if err := mas.ForEachDeadline(func(_ uint64, dl miner.Deadline) error {
+ return dl.ForEachPartition(func(_ uint64, part miner.Partition) error {
+ if active, err := part.ActiveSectors(); err != nil {
+ return err
+ } else if count, err := active.Count(); err != nil {
+ return err
+ } else {
+ activeCount += count
+ }
+ if live, err := part.LiveSectors(); err != nil {
+ return err
+ } else if count, err := live.Count(); err != nil {
+ return err
+ } else {
+ liveCount += count
+ }
+ if faulty, err := part.FaultySectors(); err != nil {
+ return err
+ } else if count, err := faulty.Count(); err != nil {
+ return err
+ } else {
+ faultyCount += count
+ }
+ return nil
+ })
+ }); err != nil {
+ return api.MinerSectors{}, err
+ }
+ return api.MinerSectors{Live: liveCount, Active: activeCount, Faulty: faultyCount}, nil
}
func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
@@ -683,7 +689,13 @@ func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.A
if err != nil {
return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- return stmgr.PreCommitInfo(ctx, a.StateManager, maddr, n, ts)
+
+ pci, err := stmgr.PreCommitInfo(ctx, a.StateManager, maddr, n, ts)
+ if err != nil {
+ return miner.SectorPreCommitOnChainInfo{}, err
+ }
+
+ return *pci, err
}
func (a *StateAPI) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) {
@@ -694,97 +706,28 @@ func (a *StateAPI) StateSectorGetInfo(ctx context.Context, maddr address.Address
return stmgr.MinerSectorInfo(ctx, a.StateManager, maddr, n, ts)
}
-type sectorPartitionCb func(store adt.Store, mas *miner.State, di uint64, pi uint64, part *miner.Partition) error
-
-func (a *StateAPI) sectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey, cb sectorPartitionCb) error {
- return a.StateManager.WithParentStateTsk(tsk,
- a.StateManager.WithActor(maddr,
- a.StateManager.WithActorState(ctx, func(store adt.Store, mas *miner.State) error {
- return a.StateManager.WithDeadlines(func(store adt.Store, deadlines *miner.Deadlines) error {
- err := a.StateManager.WithEachDeadline(func(store adt.Store, di uint64, deadline *miner.Deadline) error {
- return a.StateManager.WithEachPartition(func(store adt.Store, pi uint64, partition *miner.Partition) error {
- set, err := partition.Sectors.IsSet(uint64(sectorNumber))
- if err != nil {
- return xerrors.Errorf("is set: %w", err)
- }
- if set {
- if err := cb(store, mas, di, pi, partition); err != nil {
- return err
- }
-
- return errBreakForeach
- }
- return nil
- })(store, di, deadline)
- })(store, deadlines)
- if err == errBreakForeach {
- err = nil
- }
- return err
- })(store, mas)
- })))
-}
-
-func (a *StateAPI) StateSectorExpiration(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*api.SectorExpiration, error) {
- var onTimeEpoch, earlyEpoch abi.ChainEpoch
-
- err := a.sectorPartition(ctx, maddr, sectorNumber, tsk, func(store adt.Store, mas *miner.State, di uint64, pi uint64, part *miner.Partition) error {
- quant := mas.QuantSpecForDeadline(di)
- expirations, err := miner.LoadExpirationQueue(store, part.ExpirationsEpochs, quant)
- if err != nil {
- return xerrors.Errorf("loading expiration queue: %w", err)
- }
-
- var eset miner.ExpirationSet
- return expirations.Array.ForEach(&eset, func(epoch int64) error {
- set, err := eset.OnTimeSectors.IsSet(uint64(sectorNumber))
- if err != nil {
- return xerrors.Errorf("checking if sector is in onTime set: %w", err)
- }
- if set {
- onTimeEpoch = abi.ChainEpoch(epoch)
- }
-
- set, err = eset.EarlySectors.IsSet(uint64(sectorNumber))
- if err != nil {
- return xerrors.Errorf("checking if sector is in early set: %w", err)
- }
- if set {
- earlyEpoch = abi.ChainEpoch(epoch)
- }
-
- return nil
- })
- })
+func (a *StateAPI) StateSectorExpiration(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorExpiration, error) {
+ act, err := a.StateManager.LoadActorTsk(ctx, maddr, tsk)
if err != nil {
return nil, err
}
-
- if onTimeEpoch == 0 {
- return nil, xerrors.Errorf("expiration for sector %d not found", sectorNumber)
- }
-
- return &api.SectorExpiration{
- OnTime: onTimeEpoch,
- Early: earlyEpoch,
- }, nil
-}
-
-func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*api.SectorLocation, error) {
- var found *api.SectorLocation
-
- err := a.sectorPartition(ctx, maddr, sectorNumber, tsk, func(store adt.Store, mas *miner.State, di, pi uint64, partition *miner.Partition) error {
- found = &api.SectorLocation{
- Deadline: di,
- Partition: pi,
- }
- return errBreakForeach
- })
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
if err != nil {
return nil, err
}
+ return mas.GetSectorExpiration(sectorNumber)
+}
- return found, nil
+func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorLocation, error) {
+ act, err := a.StateManager.LoadActorTsk(ctx, maddr, tsk)
+ if err != nil {
+ return nil, err
+ }
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return nil, err
+ }
+ return mas.FindSector(sectorNumber)
}
func (a *StateAPI) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) {
@@ -862,28 +805,97 @@ func (a *StateAPI) MsigGetAvailableBalance(ctx context.Context, addr address.Add
return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- var st samsig.State
- act, err := a.StateManager.LoadActorState(ctx, addr, &st, ts)
+ act, err := a.StateManager.LoadActor(ctx, addr, ts)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to load multisig actor: %w", err)
+ }
+ msas, err := multisig.Load(a.Chain.Store(ctx), act)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to load multisig actor state: %w", err)
+ }
+ locked, err := msas.LockedBalance(ts.Height())
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to compute locked multisig balance: %w", err)
+ }
+ return types.BigSub(act.Balance, locked), nil
+}
+
+func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MsigVesting, error) {
+ ts, err := a.Chain.GetTipSetFromKey(tsk)
+ if err != nil {
+ return api.EmptyVesting, xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ }
+
+ act, err := a.StateManager.LoadActor(ctx, addr, ts)
+ if err != nil {
+ return api.EmptyVesting, xerrors.Errorf("failed to load multisig actor: %w", err)
+ }
+
+ msas, err := multisig.Load(a.Chain.Store(ctx), act)
+ if err != nil {
+ return api.EmptyVesting, xerrors.Errorf("failed to load multisig actor state: %w", err)
+ }
+
+ ib, err := msas.InitialBalance()
+ if err != nil {
+ return api.EmptyVesting, xerrors.Errorf("failed to load multisig initial balance: %w", err)
+ }
+
+ se, err := msas.StartEpoch()
+ if err != nil {
+ return api.EmptyVesting, xerrors.Errorf("failed to load multisig start epoch: %w", err)
+ }
+
+ ud, err := msas.UnlockDuration()
+ if err != nil {
+ return api.EmptyVesting, xerrors.Errorf("failed to load multisig unlock duration: %w", err)
+ }
+
+ return api.MsigVesting{
+ InitialBalance: ib,
+ StartEpoch: se,
+ UnlockDuration: ud,
+ }, nil
+}
+
+func (a *StateAPI) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) {
+ startTs, err := a.Chain.GetTipSetFromKey(start)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading start tipset %s: %w", start, err)
+ }
+
+ endTs, err := a.Chain.GetTipSetFromKey(end)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading end tipset %s: %w", end, err)
+ }
+
+ if startTs.Height() > endTs.Height() {
+ return types.EmptyInt, xerrors.Errorf("start tipset %d is after end tipset %d", startTs.Height(), endTs.Height())
+ } else if startTs.Height() == endTs.Height() {
+ return big.Zero(), nil
+ }
+
+ act, err := a.StateManager.LoadActor(ctx, addr, endTs)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to load multisig actor at end epoch: %w", err)
+ }
+
+ msas, err := multisig.Load(a.Chain.Store(ctx), act)
if err != nil {
return types.EmptyInt, xerrors.Errorf("failed to load multisig actor state: %w", err)
}
- if act.Code != builtin.MultisigActorCodeID {
- return types.EmptyInt, fmt.Errorf("given actor was not a multisig")
+ startLk, err := msas.LockedBalance(startTs.Height())
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to compute locked balance at start height: %w", err)
}
- if st.UnlockDuration == 0 {
- return act.Balance, nil
+ endLk, err := msas.LockedBalance(endTs.Height())
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to compute locked balance at end height: %w", err)
}
- offset := ts.Height() - st.StartEpoch
- if offset > st.UnlockDuration {
- return act.Balance, nil
- }
-
- minBalance := types.BigDiv(st.InitialBalance, types.NewInt(uint64(st.UnlockDuration)))
- minBalance = types.BigMul(minBalance, types.NewInt(uint64(offset)))
- return types.BigSub(act.Balance, minBalance), nil
+ return types.BigSub(startLk, endLk), nil
}
var initialPledgeNum = types.NewInt(110)
@@ -895,160 +907,133 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr
return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- var minerState miner.State
- var powerState power.State
- var rewardState reward.State
-
- err = a.StateManager.WithParentStateTsk(tsk, func(state *state.StateTree) error {
- if err := a.StateManager.WithActor(maddr, a.StateManager.WithActorState(ctx, &minerState))(state); err != nil {
- return xerrors.Errorf("getting miner state: %w", err)
- }
-
- if err := a.StateManager.WithActor(builtin.StoragePowerActorAddr, a.StateManager.WithActorState(ctx, &powerState))(state); err != nil {
- return xerrors.Errorf("getting power state: %w", err)
- }
-
- if err := a.StateManager.WithActor(builtin.RewardActorAddr, a.StateManager.WithActorState(ctx, &rewardState))(state); err != nil {
- return xerrors.Errorf("getting reward state: %w", err)
- }
-
- return nil
- })
+ state, err := a.StateManager.ParentState(ts)
if err != nil {
- return types.EmptyInt, err
+ return types.EmptyInt, xerrors.Errorf("loading state %s: %w", tsk, err)
}
- dealWeights := market.VerifyDealsForActivationReturn{
- DealWeight: big.Zero(),
- VerifiedDealWeight: big.Zero(),
- }
-
- if len(pci.DealIDs) != 0 {
- var err error
- params, err := actors.SerializeParams(&market.VerifyDealsForActivationParams{
- DealIDs: pci.DealIDs,
- SectorExpiry: pci.Expiration,
- })
- if err != nil {
- return types.EmptyInt, err
- }
-
- ret, err := a.StateManager.Call(ctx, &types.Message{
- From: maddr,
- To: builtin.StorageMarketActorAddr,
- Method: builtin.MethodsMarket.VerifyDealsForActivation,
- Params: params,
- }, ts)
- if err != nil {
- return types.EmptyInt, err
- }
-
- if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret.MsgRct.Return)); err != nil {
- return types.BigInt{}, err
- }
- }
-
- mi, err := a.StateMinerInfo(ctx, maddr, tsk)
+ ssize, err := pci.SealProof.SectorSize()
if err != nil {
- return types.EmptyInt, err
+ return types.EmptyInt, xerrors.Errorf("failed to get resolve size: %w", err)
}
- ssize := mi.SectorSize
+ store := a.Chain.Store(ctx)
- duration := pci.Expiration - ts.Height() // NB: not exactly accurate, but should always lead us to *over* estimate, not under
+ var sectorWeight abi.StoragePower
+ if act, err := state.GetActor(market.Address); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading market actor %s: %w", maddr, err)
+ } else if s, err := market.Load(store, act); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading market actor state %s: %w", maddr, err)
+ } else if w, vw, err := s.VerifyDealsForActivation(maddr, pci.DealIDs, ts.Height(), pci.Expiration); err != nil {
+ return types.EmptyInt, xerrors.Errorf("verifying deals for activation: %w", err)
+ } else {
+ // NB: not exactly accurate, but should always lead us to *over* estimate, not under
+ duration := pci.Expiration - ts.Height()
+ sectorWeight = builtin.QAPowerForWeight(ssize, duration, w, vw)
+ }
- sectorWeight := miner.QAPowerForWeight(ssize, duration, dealWeights.DealWeight, dealWeights.VerifiedDealWeight)
- deposit := miner.PreCommitDepositForPower(
- rewardState.ThisEpochRewardSmoothed,
- powerState.ThisEpochQAPowerSmoothed,
- sectorWeight,
- )
+ var powerSmoothed builtin.FilterEstimate
+ if act, err := state.GetActor(power.Address); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading power actor: %w", err)
+ } else if s, err := power.Load(store, act); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading power actor state: %w", err)
+ } else if p, err := s.TotalPowerSmoothed(); err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to determine total power: %w", err)
+ } else {
+ powerSmoothed = p
+ }
+
+ rewardActor, err := state.GetActor(reward.Address)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading miner actor: %w", err)
+ }
+
+ rewardState, err := reward.Load(store, rewardActor)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading reward actor state: %w", err)
+ }
+
+ deposit, err := rewardState.PreCommitDepositForPower(powerSmoothed, sectorWeight)
+ if err != nil {
+ return big.Zero(), xerrors.Errorf("calculating precommit deposit: %w", err)
+ }
return types.BigDiv(types.BigMul(deposit, initialPledgeNum), initialPledgeDen), nil
}
func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) {
+ // TODO: this repeats a lot of the previous function. Fix that.
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- var minerState miner.State
- var powerState power.State
- var rewardState reward.State
-
- err = a.StateManager.WithParentStateTsk(tsk, func(state *state.StateTree) error {
- if err := a.StateManager.WithActor(maddr, a.StateManager.WithActorState(ctx, &minerState))(state); err != nil {
- return xerrors.Errorf("getting miner state: %w", err)
- }
-
- if err := a.StateManager.WithActor(builtin.StoragePowerActorAddr, a.StateManager.WithActorState(ctx, &powerState))(state); err != nil {
- return xerrors.Errorf("getting power state: %w", err)
- }
-
- if err := a.StateManager.WithActor(builtin.RewardActorAddr, a.StateManager.WithActorState(ctx, &rewardState))(state); err != nil {
- return xerrors.Errorf("getting reward state: %w", err)
- }
-
- return nil
- })
+ state, err := a.StateManager.ParentState(ts)
if err != nil {
- return types.EmptyInt, err
+ return types.EmptyInt, xerrors.Errorf("loading state %s: %w", tsk, err)
}
- dealWeights := market.VerifyDealsForActivationReturn{
- DealWeight: big.Zero(),
- VerifiedDealWeight: big.Zero(),
- }
-
- if len(pci.DealIDs) != 0 {
- var err error
- params, err := actors.SerializeParams(&market.VerifyDealsForActivationParams{
- DealIDs: pci.DealIDs,
- SectorExpiry: pci.Expiration,
- })
- if err != nil {
- return types.EmptyInt, err
- }
-
- ret, err := a.StateManager.Call(ctx, &types.Message{
- From: maddr,
- To: builtin.StorageMarketActorAddr,
- Method: builtin.MethodsMarket.VerifyDealsForActivation,
- Params: params,
- }, ts)
- if err != nil {
- return types.EmptyInt, err
- }
-
- if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret.MsgRct.Return)); err != nil {
- return types.BigInt{}, err
- }
- }
-
- mi, err := a.StateMinerInfo(ctx, maddr, tsk)
+ ssize, err := pci.SealProof.SectorSize()
if err != nil {
- return types.EmptyInt, err
+ return types.EmptyInt, xerrors.Errorf("failed to get resolve size: %w", err)
}
- ssize := mi.SectorSize
+ store := a.Chain.Store(ctx)
- duration := pci.Expiration - ts.Height() // NB: not exactly accurate, but should always lead us to *over* estimate, not under
+ var sectorWeight abi.StoragePower
+ if act, err := state.GetActor(market.Address); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading miner actor %s: %w", maddr, err)
+ } else if s, err := market.Load(store, act); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading market actor state %s: %w", maddr, err)
+ } else if w, vw, err := s.VerifyDealsForActivation(maddr, pci.DealIDs, ts.Height(), pci.Expiration); err != nil {
+ return types.EmptyInt, xerrors.Errorf("verifying deals for activation: %w", err)
+ } else {
+ // NB: not exactly accurate, but should always lead us to *over* estimate, not under
+ duration := pci.Expiration - ts.Height()
+ sectorWeight = builtin.QAPowerForWeight(ssize, duration, w, vw)
+ }
+
+ var (
+ powerSmoothed builtin.FilterEstimate
+ pledgeCollateral abi.TokenAmount
+ )
+ if act, err := state.GetActor(power.Address); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading miner actor: %w", err)
+ } else if s, err := power.Load(store, act); err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading power actor state: %w", err)
+ } else if p, err := s.TotalPowerSmoothed(); err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to determine total power: %w", err)
+ } else if c, err := s.TotalLocked(); err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to determine pledge collateral: %w", err)
+ } else {
+ powerSmoothed = p
+ pledgeCollateral = c
+ }
+
+ rewardActor, err := state.GetActor(reward.Address)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading miner actor: %w", err)
+ }
+
+ rewardState, err := reward.Load(store, rewardActor)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("loading reward actor state: %w", err)
+ }
circSupply, err := a.StateCirculatingSupply(ctx, ts.Key())
if err != nil {
return big.Zero(), xerrors.Errorf("getting circulating supply: %w", err)
}
- sectorWeight := miner.QAPowerForWeight(ssize, duration, dealWeights.DealWeight, dealWeights.VerifiedDealWeight)
- initialPledge := miner.InitialPledgeForPower(
+ initialPledge, err := rewardState.InitialPledgeForPower(
sectorWeight,
- rewardState.ThisEpochBaselinePower,
- powerState.ThisEpochPledgeCollateral,
- rewardState.ThisEpochRewardSmoothed,
- powerState.ThisEpochQAPowerSmoothed,
+ pledgeCollateral,
+ &powerSmoothed,
circSupply.FilCirculating,
)
+ if err != nil {
+ return big.Zero(), xerrors.Errorf("calculating initial pledge: %w", err)
+ }
return types.BigDiv(types.BigMul(initialPledge, initialPledgeNum), initialPledgeDen), nil
}
@@ -1059,30 +1044,34 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address
return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- var act *types.Actor
- var mas miner.State
-
- if err := a.StateManager.WithParentState(ts, a.StateManager.WithActor(maddr, func(actor *types.Actor) error {
- act = actor
- return a.StateManager.WithActorState(ctx, &mas)(actor)
- })); err != nil {
- return types.BigInt{}, xerrors.Errorf("getting miner state: %w", err)
+ act, err := a.StateManager.LoadActor(ctx, maddr, ts)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to load miner actor: %w", err)
}
- as := store.ActorStore(ctx, a.Chain.Blockstore())
- vested, err := mas.CheckVestedFunds(as, ts.Height())
+ mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return types.EmptyInt, xerrors.Errorf("failed to load miner actor state: %w", err)
+ }
+
+ vested, err := mas.VestedFunds(ts.Height())
if err != nil {
return types.EmptyInt, err
}
- return types.BigAdd(mas.GetAvailableBalance(act.Balance), vested), nil
+ abal, err := mas.AvailableBalance(act.Balance)
+ if err != nil {
+ return types.EmptyInt, err
+ }
+
+ return types.BigAdd(abal, vested), nil
}
// StateVerifiedClientStatus returns the data cap for the given address.
-// Returns nil if there is no entry in the data cap table for the
+// Returns zero if there is no entry in the data cap table for the
// address.
-func (a *StateAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*verifreg.DataCap, error) {
- act, err := a.StateGetActor(ctx, builtin.VerifiedRegistryActorAddr, tsk)
+func (a *StateAPI) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
+ act, err := a.StateGetActor(ctx, verifreg.Address, tsk)
if err != nil {
return nil, err
}
@@ -1093,28 +1082,67 @@ func (a *StateAPI) StateVerifiedClientStatus(ctx context.Context, addr address.A
return nil, err
}
- store := a.StateManager.ChainStore().Store(ctx)
-
- var st verifreg.State
- if err := store.Get(ctx, act.Head, &st); err != nil {
- return nil, err
- }
-
- vh, err := adt.AsMap(store, st.VerifiedClients)
+ vrs, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), act)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("failed to load verified registry state: %w", err)
}
- var dcap verifreg.DataCap
- if found, err := vh.Get(adt.AddrKey(aid), &dcap); err != nil {
- return nil, err
- } else if !found {
+ verified, dcap, err := vrs.VerifierDataCap(aid)
+ if err != nil {
+ return nil, xerrors.Errorf("looking up verifier: %w", err)
+ }
+ if !verified {
return nil, nil
}
return &dcap, nil
}
+// StateVerifiedClientStatus returns the data cap for the given address.
+// Returns zero if there is no entry in the data cap table for the
+// address.
+func (a *StateAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
+ act, err := a.StateGetActor(ctx, verifreg.Address, tsk)
+ if err != nil {
+ return nil, err
+ }
+
+ aid, err := a.StateLookupID(ctx, addr, tsk)
+ if err != nil {
+ log.Warnf("lookup failure %v", err)
+ return nil, err
+ }
+
+ vrs, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), act)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load verified registry state: %w", err)
+ }
+
+ verified, dcap, err := vrs.VerifiedClientDataCap(aid)
+ if err != nil {
+ return nil, xerrors.Errorf("looking up verified client: %w", err)
+ }
+ if !verified {
+ return nil, nil
+ }
+
+ return &dcap, nil
+}
+
+func (a *StateAPI) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) {
+ vact, err := a.StateGetActor(ctx, verifreg.Address, tsk)
+ if err != nil {
+ return address.Undef, err
+ }
+
+ vst, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), vact)
+ if err != nil {
+ return address.Undef, err
+ }
+
+ return vst.RootKey()
+}
+
var dealProviderCollateralNum = types.NewInt(110)
var dealProviderCollateralDen = types.NewInt(100)
@@ -1126,23 +1154,24 @@ func (a *StateAPI) StateDealProviderCollateralBounds(ctx context.Context, size a
return api.DealCollateralBounds{}, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- var powerState power.State
- var rewardState reward.State
-
- err = a.StateManager.WithParentStateTsk(ts.Key(), func(state *state.StateTree) error {
- if err := a.StateManager.WithActor(builtin.StoragePowerActorAddr, a.StateManager.WithActorState(ctx, &powerState))(state); err != nil {
- return xerrors.Errorf("getting power state: %w", err)
- }
-
- if err := a.StateManager.WithActor(builtin.RewardActorAddr, a.StateManager.WithActorState(ctx, &rewardState))(state); err != nil {
- return xerrors.Errorf("getting reward state: %w", err)
- }
-
- return nil
- })
-
+ pact, err := a.StateGetActor(ctx, power.Address, tsk)
if err != nil {
- return api.DealCollateralBounds{}, xerrors.Errorf("getting power and reward actor states: %w", err)
+ return api.DealCollateralBounds{}, xerrors.Errorf("failed to load power actor: %w", err)
+ }
+
+ ract, err := a.StateGetActor(ctx, reward.Address, tsk)
+ if err != nil {
+ return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor: %w", err)
+ }
+
+ pst, err := power.Load(a.StateManager.ChainStore().Store(ctx), pact)
+ if err != nil {
+ return api.DealCollateralBounds{}, xerrors.Errorf("failed to load power actor state: %w", err)
+ }
+
+ rst, err := reward.Load(a.StateManager.ChainStore().Store(ctx), ract)
+ if err != nil {
+ return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor state: %w", err)
}
circ, err := a.StateCirculatingSupply(ctx, ts.Key())
@@ -1150,7 +1179,23 @@ func (a *StateAPI) StateDealProviderCollateralBounds(ctx context.Context, size a
return api.DealCollateralBounds{}, xerrors.Errorf("getting total circulating supply: %w", err)
}
- min, max := market.DealProviderCollateralBounds(size, verified, powerState.ThisEpochQualityAdjPower, rewardState.ThisEpochBaselinePower, circ.FilCirculating)
+ powClaim, err := pst.TotalPower()
+ if err != nil {
+ return api.DealCollateralBounds{}, xerrors.Errorf("getting total power: %w", err)
+ }
+
+ rewPow, err := rst.ThisEpochBaselinePower()
+ if err != nil {
+ return api.DealCollateralBounds{}, xerrors.Errorf("getting reward baseline power: %w", err)
+ }
+
+ min, max := policy.DealProviderCollateralBounds(size,
+ verified,
+ powClaim.RawBytePower,
+ powClaim.QualityAdjPower,
+ rewPow,
+ circ.FilCirculating,
+ a.StateManager.GetNtwkVersion(ctx, ts.Height()))
return api.DealCollateralBounds{
Min: types.BigDiv(types.BigMul(min, dealProviderCollateralNum), dealProviderCollateralDen),
Max: max,
@@ -1163,16 +1208,67 @@ func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetK
return api.CirculatingSupply{}, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
- st, _, err := a.StateManager.TipSetState(ctx, ts)
+ sTree, err := a.stateForTs(ctx, ts)
if err != nil {
return api.CirculatingSupply{}, err
}
-
- cst := cbor.NewCborStore(a.Chain.Blockstore())
- sTree, err := state.LoadStateTree(cst, st)
- if err != nil {
- return api.CirculatingSupply{}, err
- }
-
return a.StateManager.GetCirculatingSupplyDetailed(ctx, ts.Height(), sTree)
}
+
+func (a *StateAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) {
+ ts, err := a.Chain.GetTipSetFromKey(tsk)
+ if err != nil {
+ return network.VersionMax, xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ }
+
+ return a.StateManager.GetNtwkVersion(ctx, ts.Height()), nil
+}
+
+func (a *StateAPI) StateMsgGasCost(ctx context.Context, inputMsg cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) {
+ var msg cid.Cid
+ var ts *types.TipSet
+ var err error
+ if tsk != types.EmptyTSK {
+ msg = inputMsg
+ ts, err = a.Chain.LoadTipSet(tsk)
+ if err != nil {
+ return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ }
+ } else {
+ mlkp, err := a.StateSearchMsg(ctx, inputMsg)
+ if err != nil {
+ return nil, xerrors.Errorf("searching for msg %s: %w", inputMsg, err)
+ }
+ if mlkp == nil {
+ return nil, xerrors.Errorf("didn't find msg %s", inputMsg)
+ }
+
+ executionTs, err := a.Chain.GetTipSetFromKey(mlkp.TipSet)
+ if err != nil {
+ return nil, xerrors.Errorf("loading tipset %s: %w", mlkp.TipSet, err)
+ }
+
+ ts, err = a.Chain.LoadTipSet(executionTs.Parents())
+ if err != nil {
+ return nil, xerrors.Errorf("loading parent tipset %s: %w", mlkp.TipSet, err)
+ }
+
+ msg = mlkp.Message
+ }
+
+ m, r, err := a.StateManager.Replay(ctx, ts, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ return &api.MsgGasCost{
+ Message: msg,
+ GasUsed: big.NewInt(r.GasUsed),
+ BaseFeeBurn: r.GasCosts.BaseFeeBurn,
+ OverEstimationBurn: r.GasCosts.OverEstimationBurn,
+ MinerPenalty: r.GasCosts.MinerPenalty,
+ MinerTip: r.GasCosts.MinerTip,
+ Refund: r.GasCosts.Refund,
+ TotalCost: big.Sub(m.RequiredFunds(), r.GasCosts.Refund),
+ }, nil
+}
diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go
index 9066df56f..1bd3af415 100644
--- a/node/impl/full/sync.go
+++ b/node/impl/full/sync.go
@@ -2,6 +2,7 @@ package full
import (
"context"
+ "sync/atomic"
cid "github.com/ipfs/go-cid"
pubsub "github.com/libp2p/go-libp2p-pubsub"
@@ -13,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -28,7 +30,9 @@ type SyncAPI struct {
func (a *SyncAPI) SyncState(ctx context.Context) (*api.SyncState, error) {
states := a.Syncer.State()
- out := &api.SyncState{}
+ out := &api.SyncState{
+ VMApplied: atomic.LoadUint64(&vm.StatApplied),
+ }
for i := range states {
ss := &states[i]
@@ -97,12 +101,23 @@ func (a *SyncAPI) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHe
return a.Syncer.IncomingBlocks(ctx)
}
+func (a *SyncAPI) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error {
+ log.Warnf("Marking tipset %s as checkpoint", tsk)
+ return a.Syncer.SetCheckpoint(tsk)
+}
+
func (a *SyncAPI) SyncMarkBad(ctx context.Context, bcid cid.Cid) error {
log.Warnf("Marking block %s as bad", bcid)
a.Syncer.MarkBad(bcid)
return nil
}
+func (a *SyncAPI) SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error {
+ log.Warnf("Unmarking block %s as bad", bcid)
+ a.Syncer.UnmarkBad(bcid)
+ return nil
+}
+
func (a *SyncAPI) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) {
reason, ok := a.Syncer.CheckBadBlockCache(bcid)
if !ok {
@@ -111,3 +126,22 @@ func (a *SyncAPI) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error
return reason, nil
}
+
+func (a *SyncAPI) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) {
+ ts, err := a.Syncer.ChainStore().LoadTipSet(tsk)
+ if err != nil {
+ return false, err
+ }
+
+ fts, err := a.Syncer.ChainStore().TryFillTipSet(ts)
+ if err != nil {
+ return false, err
+ }
+
+ err = a.Syncer.ValidateTipSet(ctx, fts, false)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
diff --git a/node/impl/full/wallet.go b/node/impl/full/wallet.go
index 263452cf3..6252e12a4 100644
--- a/node/impl/full/wallet.go
+++ b/node/impl/full/wallet.go
@@ -7,8 +7,8 @@ import (
"go.uber.org/fx"
"golang.org/x/xerrors"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/stmgr"
@@ -26,16 +26,13 @@ type WalletAPI struct {
}
func (a *WalletAPI) WalletBalance(ctx context.Context, addr address.Address) (types.BigInt, error) {
- var bal types.BigInt
- err := a.StateManager.WithParentStateTsk(types.EmptyTSK, a.StateManager.WithActor(addr, func(act *types.Actor) error {
- bal = act.Balance
- return nil
- }))
-
+ act, err := a.StateManager.LoadActorTsk(ctx, addr, types.EmptyTSK)
if xerrors.Is(err, types.ErrActorNotFound) {
return big.Zero(), nil
+ } else if err != nil {
+ return big.Zero(), err
}
- return bal, err
+ return act.Balance, nil
}
func (a *WalletAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) {
@@ -54,8 +51,8 @@ func (a *WalletAPI) WalletSignMessage(ctx context.Context, k address.Address, ms
return a.WalletAPI.WalletSignMessage(ctx, keyAddr, msg)
}
-func (a *WalletAPI) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) bool {
- return sigs.Verify(sig, k, msg) == nil
+func (a *WalletAPI) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) {
+ return sigs.Verify(sig, k, msg) == nil, nil
}
func (a *WalletAPI) WalletDefaultAddress(ctx context.Context) (address.Address, error) {
@@ -65,3 +62,7 @@ func (a *WalletAPI) WalletDefaultAddress(ctx context.Context) (address.Address,
func (a *WalletAPI) WalletSetDefault(ctx context.Context, addr address.Address) error {
return a.Default.SetDefault(addr)
}
+
+func (a *WalletAPI) WalletValidateAddress(ctx context.Context, str string) (address.Address, error) {
+ return address.NewFromString(str)
+}
diff --git a/node/impl/paych/paych.go b/node/impl/paych/paych.go
index e998f969f..af0a1db15 100644
--- a/node/impl/paych/paych.go
+++ b/node/impl/paych/paych.go
@@ -9,9 +9,9 @@ import (
"go.uber.org/fx"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
full "github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/paychmgr"
@@ -39,8 +39,12 @@ func (a *PaychAPI) PaychGet(ctx context.Context, from, to address.Address, amt t
}, nil
}
-func (a *PaychAPI) PaychAvailableFunds(from, to address.Address) (*api.ChannelAvailableFunds, error) {
- return a.PaychMgr.AvailableFunds(from, to)
+func (a *PaychAPI) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
+ return a.PaychMgr.AvailableFunds(ch)
+}
+
+func (a *PaychAPI) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
+ return a.PaychMgr.AvailableFundsByFromTo(from, to)
}
func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) {
diff --git a/node/impl/remoteworker.go b/node/impl/remoteworker.go
index 8111413ba..b6ef43c7c 100644
--- a/node/impl/remoteworker.go
+++ b/node/impl/remoteworker.go
@@ -8,7 +8,7 @@ import (
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-jsonrpc/auth"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
diff --git a/node/impl/storminer.go b/node/impl/storminer.go
index c688ff677..6090e8a58 100644
--- a/node/impl/storminer.go
+++ b/node/impl/storminer.go
@@ -8,18 +8,18 @@ import (
"strconv"
"time"
- datatransfer "github.com/filecoin-project/go-data-transfer"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/host"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/piecestore"
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-jsonrpc/auth"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
@@ -56,6 +56,8 @@ type StorageMinerAPI struct {
DataTransfer dtypes.ProviderDataTransfer
Host host.Host
+ DS dtypes.MetadataDS
+
ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc
SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc
ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc
@@ -305,8 +307,30 @@ func (sm *StorageMinerAPI) MarketImportDealData(ctx context.Context, propCid cid
return sm.StorageProvider.ImportDataForDeal(ctx, propCid, fi)
}
-func (sm *StorageMinerAPI) MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) {
- return sm.StorageProvider.ListDeals(ctx)
+func (sm *StorageMinerAPI) listDeals(ctx context.Context) ([]api.MarketDeal, error) {
+ ts, err := sm.Full.ChainHead(ctx)
+ if err != nil {
+ return nil, err
+ }
+ tsk := ts.Key()
+ allDeals, err := sm.Full.StateMarketDeals(ctx, tsk)
+ if err != nil {
+ return nil, err
+ }
+
+ var out []api.MarketDeal
+
+ for _, deal := range allDeals {
+ if deal.Proposal.Provider == sm.Miner.Address() {
+ out = append(out, deal)
+ }
+ }
+
+ return out, nil
+}
+
+func (sm *StorageMinerAPI) MarketListDeals(ctx context.Context) ([]api.MarketDeal, error) {
+ return sm.listDeals(ctx)
}
func (sm *StorageMinerAPI) MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) {
@@ -395,8 +419,8 @@ func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-cha
return channels, nil
}
-func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) {
- return sm.StorageProvider.ListDeals(ctx)
+func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]api.MarketDeal, error) {
+ return sm.listDeals(ctx)
}
func (sm *StorageMinerAPI) RetrievalDealsList(ctx context.Context) (map[retrievalmarket.ProviderDealIdentifier]retrievalmarket.ProviderDealState, error) {
@@ -494,4 +518,8 @@ func (sm *StorageMinerAPI) PiecesGetCIDInfo(ctx context.Context, payloadCid cid.
return &ci, nil
}
+func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error {
+ return backup(sm.DS, fpath)
+}
+
var _ api.StorageMiner = &StorageMinerAPI{}
diff --git a/node/modules/chain.go b/node/modules/chain.go
index ea04945ef..66f54a76a 100644
--- a/node/modules/chain.go
+++ b/node/modules/chain.go
@@ -18,9 +18,10 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/beacon"
- "github.com/filecoin-project/lotus/chain/blocksync"
+ "github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
@@ -33,7 +34,7 @@ import (
"github.com/filecoin-project/lotus/node/repo"
)
-func ChainExchange(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ChainGCBlockstore) dtypes.ChainExchange {
+func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ChainGCBlockstore) dtypes.ChainBitswap {
// prefix protocol for chain bitswap
// (so bitswap uses /chain/ipfs/bitswap/1.0.0 internally for chain sync stuff)
bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain"))
@@ -83,7 +84,7 @@ func ChainGCBlockstore(bs dtypes.ChainBlockstore, gcl dtypes.ChainGCLocker) dtyp
return blockstore.NewGCBlockstore(bs, gcl)
}
-func ChainBlockservice(bs dtypes.ChainBlockstore, rem dtypes.ChainExchange) dtypes.ChainBlockService {
+func ChainBlockService(bs dtypes.ChainBlockstore, rem dtypes.ChainBitswap) dtypes.ChainBlockService {
return blockservice.New(bs, rem)
}
@@ -157,14 +158,41 @@ func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error)
}
func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore, _ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) {
+ if !build.Devnet {
+ return "testnetnet", nil
+ }
+
ctx := helpers.LifecycleCtx(mctx, lc)
netName, err := stmgr.GetNetworkName(ctx, stmgr.NewStateManager(cs), cs.GetHeaviestTipSet().ParentState())
return netName, err
}
-func NewSyncer(lc fx.Lifecycle, sm *stmgr.StateManager, bsync *blocksync.BlockSync, h host.Host, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*chain.Syncer, error) {
- syncer, err := chain.NewSyncer(sm, bsync, h.ConnManager(), h.ID(), beacon, verifier)
+type SyncerParams struct {
+ fx.In
+
+ Lifecycle fx.Lifecycle
+ MetadataDS dtypes.MetadataDS
+ StateManager *stmgr.StateManager
+ ChainXchg exchange.Client
+ SyncMgrCtor chain.SyncManagerCtor
+ Host host.Host
+ Beacon beacon.Schedule
+ Verifier ffiwrapper.Verifier
+}
+
+func NewSyncer(params SyncerParams) (*chain.Syncer, error) {
+ var (
+ lc = params.Lifecycle
+ ds = params.MetadataDS
+ sm = params.StateManager
+ ex = params.ChainXchg
+ smCtor = params.SyncMgrCtor
+ h = params.Host
+ b = params.Beacon
+ v = params.Verifier
+ )
+ syncer, err := chain.NewSyncer(ds, sm, ex, smCtor, h.ConnManager(), h.ID(), b, v)
if err != nil {
return nil, err
}
diff --git a/node/modules/client.go b/node/modules/client.go
index bf534350e..6972ca36e 100644
--- a/node/modules/client.go
+++ b/node/modules/client.go
@@ -12,8 +12,9 @@ import (
dtimpl "github.com/filecoin-project/go-data-transfer/impl"
dtnet "github.com/filecoin-project/go-data-transfer/network"
dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync"
+ "github.com/filecoin-project/go-fil-markets/discovery"
+ discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
- "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery"
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
"github.com/filecoin-project/go-fil-markets/storagemarket"
@@ -26,7 +27,9 @@ import (
"github.com/ipfs/go-datastore/namespace"
"github.com/libp2p/go-libp2p-core/host"
+ "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/blockstore"
+ "github.com/filecoin-project/lotus/markets"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/markets/retrievaladapter"
"github.com/filecoin-project/lotus/node/impl/full"
@@ -35,7 +38,6 @@ import (
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/node/repo/importmgr"
"github.com/filecoin-project/lotus/node/repo/retrievalstoremgr"
- "github.com/filecoin-project/lotus/paychmgr"
)
func ClientMultiDatastore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.ClientMultiDstore, error) {
@@ -111,15 +113,20 @@ func NewClientDealFunds(ds dtypes.MetadataDS) (ClientDealFunds, error) {
return funds.NewDealFunds(ds, datastore.NewKey("/marketfunds/client"))
}
-func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discovery.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, dealFunds ClientDealFunds) (storagemarket.StorageClient, error) {
+func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, dealFunds ClientDealFunds) (storagemarket.StorageClient, error) {
net := smnet.NewFromLibp2pHost(h)
c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, dealFunds, storageimpl.DealPollingInterval(time.Second))
if err != nil {
return nil, err
}
+ c.OnReady(marketevents.ReadyLogger("storage client"))
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
c.SubscribeToEvents(marketevents.StorageClientLogger)
+
+ evtType := journal.J.RegisterEventType("markets/storage/client", "state_change")
+ c.SubscribeToEvents(markets.StorageClientJournaler(evtType))
+
return c.Start(ctx)
},
OnStop: func(context.Context) error {
@@ -130,18 +137,23 @@ func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, md
}
// RetrievalClient creates a new retrieval client attached to the client blockstore
-func RetrievalClient(lc fx.Lifecycle, h host.Host, mds dtypes.ClientMultiDstore, dt dtypes.ClientDataTransfer, pmgr *paychmgr.Manager, payAPI payapi.PaychAPI, resolver retrievalmarket.PeerResolver, ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI) (retrievalmarket.RetrievalClient, error) {
- adapter := retrievaladapter.NewRetrievalClientNode(pmgr, payAPI, chainAPI, stateAPI)
+func RetrievalClient(lc fx.Lifecycle, h host.Host, mds dtypes.ClientMultiDstore, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver, ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI) (retrievalmarket.RetrievalClient, error) {
+ adapter := retrievaladapter.NewRetrievalClientNode(payAPI, chainAPI, stateAPI)
network := rmnet.NewFromLibp2pHost(h)
sc := storedcounter.New(ds, datastore.NewKey("/retr"))
client, err := retrievalimpl.NewClient(network, mds, dt, adapter, resolver, namespace.Wrap(ds, datastore.NewKey("/retrievals/client")), sc)
if err != nil {
return nil, err
}
+ client.OnReady(marketevents.ReadyLogger("retrieval client"))
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
client.SubscribeToEvents(marketevents.RetrievalClientLogger)
- return nil
+
+ evtType := journal.J.RegisterEventType("markets/retrieval/client", "state_change")
+ client.SubscribeToEvents(markets.RetrievalClientJournaler(evtType))
+
+ return client.Start(ctx)
},
})
return client, nil
diff --git a/node/modules/core.go b/node/modules/core.go
index d73e4e25d..a695d8651 100644
--- a/node/modules/core.go
+++ b/node/modules/core.go
@@ -6,10 +6,10 @@ import (
"errors"
"io"
"io/ioutil"
- "path/filepath"
"github.com/gbrlsnchs/jwt/v3"
logging "github.com/ipfs/go-log/v2"
+ "github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
record "github.com/libp2p/go-libp2p-record"
"golang.org/x/xerrors"
@@ -19,7 +19,6 @@ import (
"github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/addrutil"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo"
@@ -93,16 +92,16 @@ func BuiltinBootstrap() (dtypes.BootstrapPeers, error) {
return build.BuiltinBootstrap()
}
-func DrandBootstrap(d dtypes.DrandConfig) (dtypes.DrandBootstrap, error) {
+func DrandBootstrap(ds dtypes.DrandSchedule) (dtypes.DrandBootstrap, error) {
// TODO: retry resolving, don't fail if at least one resolve succeeds
- addrs, err := addrutil.ParseAddresses(context.TODO(), d.Relays)
- if err != nil {
- log.Errorf("reoslving drand relays addresses: %+v", err)
- return nil, nil
+ res := []peer.AddrInfo{}
+ for _, d := range ds {
+ addrs, err := addrutil.ParseAddresses(context.TODO(), d.Config.Relays)
+ if err != nil {
+ log.Errorf("reoslving drand relays addresses: %+v", err)
+ return res, nil
+ }
+ res = append(res, addrs...)
}
- return addrs, nil
-}
-
-func SetupJournal(lr repo.LockedRepo) error {
- return journal.InitializeSystemJournal(filepath.Join(lr.Path(), "journal"))
+ return res, nil
}
diff --git a/node/modules/dtypes/beacon.go b/node/modules/dtypes/beacon.go
index 2231f0e08..28bbdf281 100644
--- a/node/modules/dtypes/beacon.go
+++ b/node/modules/dtypes/beacon.go
@@ -1,5 +1,14 @@
package dtypes
+import "github.com/filecoin-project/go-state-types/abi"
+
+type DrandSchedule []DrandPoint
+
+type DrandPoint struct {
+ Start abi.ChainEpoch
+ Config DrandConfig
+}
+
type DrandConfig struct {
Servers []string
Relays []string
diff --git a/node/modules/dtypes/miner.go b/node/modules/dtypes/miner.go
index d559a2de1..5bb439b4d 100644
--- a/node/modules/dtypes/miner.go
+++ b/node/modules/dtypes/miner.go
@@ -8,7 +8,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/storagemarket"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
)
diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go
index b8c1c3081..13defda8d 100644
--- a/node/modules/dtypes/storage.go
+++ b/node/modules/dtypes/storage.go
@@ -27,7 +27,7 @@ type ChainBlockstore blockstore.Blockstore
type ChainGCLocker blockstore.GCLocker
type ChainGCBlockstore blockstore.GCBlockstore
-type ChainExchange exchange.Interface
+type ChainBitswap exchange.Interface
type ChainBlockService bserv.BlockService
type ClientMultiDstore *multistore.MultiStore
diff --git a/node/modules/lp2p/pubsub.go b/node/modules/lp2p/pubsub.go
index d4464e4de..9724eb3b4 100644
--- a/node/modules/lp2p/pubsub.go
+++ b/node/modules/lp2p/pubsub.go
@@ -33,6 +33,7 @@ func init() {
pubsub.GossipSubDirectConnectInitialDelay = 30 * time.Second
pubsub.GossipSubIWantFollowupTime = 5 * time.Second
pubsub.GossipSubHistoryLength = 10
+ pubsub.GossipSubGossipFactor = 0.1
}
func ScoreKeeper() *dtypes.ScoreKeeper {
return new(dtypes.ScoreKeeper)
@@ -48,7 +49,7 @@ type GossipIn struct {
Db dtypes.DrandBootstrap
Cfg *config.Pubsub
Sk *dtypes.ScoreKeeper
- Dr dtypes.DrandConfig
+ Dr dtypes.DrandSchedule
}
func getDrandTopic(chainInfoJSON string) (string, error) {
@@ -73,9 +74,128 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) {
}
isBootstrapNode := in.Cfg.Bootstrapper
- drandTopic, err := getDrandTopic(in.Dr.ChainInfoJSON)
- if err != nil {
- return nil, err
+
+ drandTopicParams := &pubsub.TopicScoreParams{
+ // expected 2 beaconsn/min
+ TopicWeight: 0.5, // 5x block topic; max cap is 62.5
+
+ // 1 tick per second, maxes at 1 after 1 hour
+ TimeInMeshWeight: 0.00027, // ~1/3600
+ TimeInMeshQuantum: time.Second,
+ TimeInMeshCap: 1,
+
+ // deliveries decay after 1 hour, cap at 25 beacons
+ FirstMessageDeliveriesWeight: 5, // max value is 125
+ FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
+ FirstMessageDeliveriesCap: 25, // the maximum expected in an hour is ~26, including the decay
+
+ // Mesh Delivery Failure is currently turned off for beacons
+ // This is on purpose as
+ // - the traffic is very low for meaningful distribution of incoming edges.
+ // - the reaction time needs to be very slow -- in the order of 10 min at least
+ // so we might as well let opportunistic grafting repair the mesh on its own
+ // pace.
+ // - the network is too small, so large asymmetries can be expected between mesh
+ // edges.
+ // We should revisit this once the network grows.
+
+ // invalid messages decay after 1 hour
+ InvalidMessageDeliveriesWeight: -1000,
+ InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
+ }
+
+ topicParams := map[string]*pubsub.TopicScoreParams{
+ build.BlocksTopic(in.Nn): {
+ // expected 10 blocks/min
+ TopicWeight: 0.1, // max cap is 50, max mesh penalty is -10, single invalid message is -100
+
+ // 1 tick per second, maxes at 1 after 1 hour
+ TimeInMeshWeight: 0.00027, // ~1/3600
+ TimeInMeshQuantum: time.Second,
+ TimeInMeshCap: 1,
+
+ // deliveries decay after 1 hour, cap at 100 blocks
+ FirstMessageDeliveriesWeight: 5, // max value is 500
+ FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
+ FirstMessageDeliveriesCap: 100, // 100 blocks in an hour
+
+ // Mesh Delivery Failure is currently turned off for blocks
+ // This is on purpose as
+ // - the traffic is very low for meaningful distribution of incoming edges.
+ // - the reaction time needs to be very slow -- in the order of 10 min at least
+ // so we might as well let opportunistic grafting repair the mesh on its own
+ // pace.
+ // - the network is too small, so large asymmetries can be expected between mesh
+ // edges.
+ // We should revisit this once the network grows.
+ //
+ // // tracks deliveries in the last minute
+ // // penalty activates at 1 minute and expects ~0.4 blocks
+ // MeshMessageDeliveriesWeight: -576, // max penalty is -100
+ // MeshMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Minute),
+ // MeshMessageDeliveriesCap: 10, // 10 blocks in a minute
+ // MeshMessageDeliveriesThreshold: 0.41666, // 10/12/2 blocks/min
+ // MeshMessageDeliveriesWindow: 10 * time.Millisecond,
+ // MeshMessageDeliveriesActivation: time.Minute,
+ //
+ // // decays after 15 min
+ // MeshFailurePenaltyWeight: -576,
+ // MeshFailurePenaltyDecay: pubsub.ScoreParameterDecay(15 * time.Minute),
+
+ // invalid messages decay after 1 hour
+ InvalidMessageDeliveriesWeight: -1000,
+ InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
+ },
+ build.MessagesTopic(in.Nn): {
+ // expected > 1 tx/second
+ TopicWeight: 0.1, // max cap is 5, single invalid message is -100
+
+ // 1 tick per second, maxes at 1 hour
+ TimeInMeshWeight: 0.0002778, // ~1/3600
+ TimeInMeshQuantum: time.Second,
+ TimeInMeshCap: 1,
+
+ // deliveries decay after 10min, cap at 100 tx
+ FirstMessageDeliveriesWeight: 0.5, // max value is 50
+ FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(10 * time.Minute),
+ FirstMessageDeliveriesCap: 100, // 100 messages in 10 minutes
+
+ // Mesh Delivery Failure is currently turned off for messages
+ // This is on purpose as the network is still too small, which results in
+ // asymmetries and potential unmeshing from negative scores.
+ // // tracks deliveries in the last minute
+ // // penalty activates at 1 min and expects 2.5 txs
+ // MeshMessageDeliveriesWeight: -16, // max penalty is -100
+ // MeshMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Minute),
+ // MeshMessageDeliveriesCap: 100, // 100 txs in a minute
+ // MeshMessageDeliveriesThreshold: 2.5, // 60/12/2 txs/minute
+ // MeshMessageDeliveriesWindow: 10 * time.Millisecond,
+ // MeshMessageDeliveriesActivation: time.Minute,
+
+ // // decays after 5min
+ // MeshFailurePenaltyWeight: -16,
+ // MeshFailurePenaltyDecay: pubsub.ScoreParameterDecay(5 * time.Minute),
+
+ // invalid messages decay after 1 hour
+ InvalidMessageDeliveriesWeight: -1000,
+ InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
+ },
+ }
+
+ pgTopicWeights := map[string]float64{
+ build.BlocksTopic(in.Nn): 10,
+ build.MessagesTopic(in.Nn): 1,
+ }
+
+ var drandTopics []string
+ for _, d := range in.Dr {
+ topic, err := getDrandTopic(d.Config.ChainInfoJSON)
+ if err != nil {
+ return nil, err
+ }
+ topicParams[topic] = drandTopicParams
+ pgTopicWeights[topic] = 5
+ drandTopics = append(drandTopics, topic)
}
options := []pubsub.Option{
@@ -105,8 +225,8 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) {
},
AppSpecificWeight: 1,
- // This sets the IP colocation threshold to 1 peer per
- IPColocationFactorThreshold: 1,
+ // This sets the IP colocation threshold to 5 peers before we apply penalties
+ IPColocationFactorThreshold: 5,
IPColocationFactorWeight: -100,
// TODO we want to whitelist IPv6 /64s that belong to datacenters etc
// IPColocationFactorWhitelist: map[string]struct{}{},
@@ -123,111 +243,7 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) {
RetainScore: 6 * time.Hour,
// topic parameters
- Topics: map[string]*pubsub.TopicScoreParams{
- drandTopic: {
- // expected 2 beaconsn/min
- TopicWeight: 0.5, // 5x block topic; max cap is 62.5
-
- // 1 tick per second, maxes at 1 after 1 hour
- TimeInMeshWeight: 0.00027, // ~1/3600
- TimeInMeshQuantum: time.Second,
- TimeInMeshCap: 1,
-
- // deliveries decay after 1 hour, cap at 25 beacons
- FirstMessageDeliveriesWeight: 5, // max value is 125
- FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
- FirstMessageDeliveriesCap: 25, // the maximum expected in an hour is ~26, including the decay
-
- // Mesh Delivery Failure is currently turned off for beacons
- // This is on purpose as
- // - the traffic is very low for meaningful distribution of incoming edges.
- // - the reaction time needs to be very slow -- in the order of 10 min at least
- // so we might as well let opportunistic grafting repair the mesh on its own
- // pace.
- // - the network is too small, so large asymmetries can be expected between mesh
- // edges.
- // We should revisit this once the network grows.
-
- // invalid messages decay after 1 hour
- InvalidMessageDeliveriesWeight: -1000,
- InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
- },
- build.BlocksTopic(in.Nn): {
- // expected 10 blocks/min
- TopicWeight: 0.1, // max cap is 50, max mesh penalty is -10, single invalid message is -100
-
- // 1 tick per second, maxes at 1 after 1 hour
- TimeInMeshWeight: 0.00027, // ~1/3600
- TimeInMeshQuantum: time.Second,
- TimeInMeshCap: 1,
-
- // deliveries decay after 1 hour, cap at 100 blocks
- FirstMessageDeliveriesWeight: 5, // max value is 500
- FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
- FirstMessageDeliveriesCap: 100, // 100 blocks in an hour
-
- // Mesh Delivery Failure is currently turned off for blocks
- // This is on purpose as
- // - the traffic is very low for meaningful distribution of incoming edges.
- // - the reaction time needs to be very slow -- in the order of 10 min at least
- // so we might as well let opportunistic grafting repair the mesh on its own
- // pace.
- // - the network is too small, so large asymmetries can be expected between mesh
- // edges.
- // We should revisit this once the network grows.
- //
- // // tracks deliveries in the last minute
- // // penalty activates at 1 minute and expects ~0.4 blocks
- // MeshMessageDeliveriesWeight: -576, // max penalty is -100
- // MeshMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Minute),
- // MeshMessageDeliveriesCap: 10, // 10 blocks in a minute
- // MeshMessageDeliveriesThreshold: 0.41666, // 10/12/2 blocks/min
- // MeshMessageDeliveriesWindow: 10 * time.Millisecond,
- // MeshMessageDeliveriesActivation: time.Minute,
- //
- // // decays after 15 min
- // MeshFailurePenaltyWeight: -576,
- // MeshFailurePenaltyDecay: pubsub.ScoreParameterDecay(15 * time.Minute),
-
- // invalid messages decay after 1 hour
- InvalidMessageDeliveriesWeight: -1000,
- InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
- },
- build.MessagesTopic(in.Nn): {
- // expected > 1 tx/second
- TopicWeight: 0.1, // max cap is 5, single invalid message is -100
-
- // 1 tick per second, maxes at 1 hour
- TimeInMeshWeight: 0.0002778, // ~1/3600
- TimeInMeshQuantum: time.Second,
- TimeInMeshCap: 1,
-
- // deliveries decay after 10min, cap at 100 tx
- FirstMessageDeliveriesWeight: 0.5, // max value is 50
- FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(10 * time.Minute),
- FirstMessageDeliveriesCap: 100, // 100 messages in 10 minutes
-
- // Mesh Delivery Failure is currently turned off for messages
- // This is on purpose as the network is still too small, which results in
- // asymmetries and potential unmeshing from negative scores.
- // // tracks deliveries in the last minute
- // // penalty activates at 1 min and expects 2.5 txs
- // MeshMessageDeliveriesWeight: -16, // max penalty is -100
- // MeshMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Minute),
- // MeshMessageDeliveriesCap: 100, // 100 txs in a minute
- // MeshMessageDeliveriesThreshold: 2.5, // 60/12/2 txs/minute
- // MeshMessageDeliveriesWindow: 10 * time.Millisecond,
- // MeshMessageDeliveriesActivation: time.Minute,
-
- // // decays after 5min
- // MeshFailurePenaltyWeight: -16,
- // MeshFailurePenaltyDecay: pubsub.ScoreParameterDecay(5 * time.Minute),
-
- // invalid messages decay after 1 hour
- InvalidMessageDeliveriesWeight: -1000,
- InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour),
- },
- },
+ Topics: topicParams,
},
&pubsub.PeerScoreThresholds{
GossipThreshold: -500,
@@ -248,8 +264,8 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) {
pubsub.GossipSubDlo = 0
pubsub.GossipSubDhi = 0
pubsub.GossipSubDout = 0
- pubsub.GossipSubDlazy = 1024
- pubsub.GossipSubGossipFactor = 0.5
+ pubsub.GossipSubDlazy = 64
+ pubsub.GossipSubGossipFactor = 0.25
pubsub.GossipSubPruneBackoff = 5 * time.Minute
// turn on PX
options = append(options, pubsub.WithPeerExchange(true))
@@ -276,6 +292,36 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) {
options = append(options, pubsub.WithDirectPeers(directPeerInfo))
}
+ // validation queue RED
+ var pgParams *pubsub.PeerGaterParams
+
+ if isBootstrapNode {
+ pgParams = pubsub.NewPeerGaterParams(
+ 0.33,
+ pubsub.ScoreParameterDecay(2*time.Minute),
+ pubsub.ScoreParameterDecay(10*time.Minute),
+ ).WithTopicDeliveryWeights(pgTopicWeights)
+ } else {
+ pgParams = pubsub.NewPeerGaterParams(
+ 0.33,
+ pubsub.ScoreParameterDecay(2*time.Minute),
+ pubsub.ScoreParameterDecay(time.Hour),
+ ).WithTopicDeliveryWeights(pgTopicWeights)
+ }
+
+ options = append(options, pubsub.WithPeerGater(pgParams))
+
+ allowTopics := []string{
+ build.BlocksTopic(in.Nn),
+ build.MessagesTopic(in.Nn),
+ }
+ allowTopics = append(allowTopics, drandTopics...)
+ options = append(options,
+ pubsub.WithSubscriptionFilter(
+ pubsub.WrapLimitSubscriptionFilter(
+ pubsub.NewAllowlistSubscriptionFilter(allowTopics...),
+ 100)))
+
// tracer
if in.Cfg.RemoteTracer != "" {
a, err := ma.NewMultiaddr(in.Cfg.RemoteTracer)
@@ -326,14 +372,9 @@ type tracerWrapper struct {
topics map[string]struct{}
}
-func (trw *tracerWrapper) traceMessage(topics []string) bool {
- for _, topic := range topics {
- _, ok := trw.topics[topic]
- if ok {
- return true
- }
- }
- return false
+func (trw *tracerWrapper) traceMessage(topic string) bool {
+ _, ok := trw.topics[topic]
+ return ok
}
func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
@@ -346,12 +387,12 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
switch evt.GetType() {
case pubsub_pb.TraceEvent_PUBLISH_MESSAGE:
stats.Record(context.TODO(), metrics.PubsubPublishMessage.M(1))
- if trw.tr != nil && trw.traceMessage(evt.GetPublishMessage().Topics) {
+ if trw.tr != nil && trw.traceMessage(evt.GetPublishMessage().GetTopic()) {
trw.tr.Trace(evt)
}
case pubsub_pb.TraceEvent_DELIVER_MESSAGE:
stats.Record(context.TODO(), metrics.PubsubDeliverMessage.M(1))
- if trw.tr != nil && trw.traceMessage(evt.GetDeliverMessage().Topics) {
+ if trw.tr != nil && trw.traceMessage(evt.GetDeliverMessage().GetTopic()) {
trw.tr.Trace(evt)
}
case pubsub_pb.TraceEvent_REJECT_MESSAGE:
diff --git a/node/modules/services.go b/node/modules/services.go
index fc7486abe..4ee0abacc 100644
--- a/node/modules/services.go
+++ b/node/modules/services.go
@@ -1,6 +1,8 @@
package modules
import (
+ "context"
+
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
eventbus "github.com/libp2p/go-eventbus"
@@ -11,21 +13,25 @@ import (
"go.uber.org/fx"
"golang.org/x/xerrors"
- "github.com/filecoin-project/go-fil-markets/retrievalmarket"
- "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery"
+ "github.com/filecoin-project/go-fil-markets/discovery"
+ discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
+
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/beacon/drand"
- "github.com/filecoin-project/lotus/chain/blocksync"
+ "github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/sub"
+ "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/peermgr"
+ marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/hello"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/helpers"
+ "github.com/filecoin-project/lotus/node/repo"
)
func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.Service) error {
@@ -69,8 +75,9 @@ func RunPeerMgr(mctx helpers.MetricsCtx, lc fx.Lifecycle, pmgr *peermgr.PeerMgr)
go pmgr.Run(helpers.LifecycleCtx(mctx, lc))
}
-func RunBlockSync(h host.Host, svc *blocksync.BlockSyncService) {
- h.SetStreamHandler(blocksync.BlockSyncProtocolID, svc.HandleStream)
+func RunChainExchange(h host.Host, svc exchange.Server) {
+ h.SetStreamHandler(exchange.BlockSyncProtocolID, svc.HandleStream) // old
+ h.SetStreamHandler(exchange.ChainExchangeProtocolID, svc.HandleStream) // new
}
func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, s *chain.Syncer, bserv dtypes.ChainBlockService, chain *store.ChainStore, stmgr *stmgr.StateManager, h host.Host, nn dtypes.NetworkName) {
@@ -112,12 +119,22 @@ func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub
go sub.HandleIncomingMessages(ctx, mpool, msgsub)
}
-func NewLocalDiscovery(ds dtypes.MetadataDS) *discovery.Local {
- return discovery.NewLocal(namespace.Wrap(ds, datastore.NewKey("/deals/local")))
+func NewLocalDiscovery(lc fx.Lifecycle, ds dtypes.MetadataDS) (*discoveryimpl.Local, error) {
+ local, err := discoveryimpl.NewLocal(namespace.Wrap(ds, datastore.NewKey("/deals/local")))
+ if err != nil {
+ return nil, err
+ }
+ local.OnReady(marketevents.ReadyLogger("discovery"))
+ lc.Append(fx.Hook{
+ OnStart: func(ctx context.Context) error {
+ return local.Start(ctx)
+ },
+ })
+ return local, nil
}
-func RetrievalResolver(l *discovery.Local) retrievalmarket.PeerResolver {
- return discovery.Multi(l)
+func RetrievalResolver(l *discoveryimpl.Local) discovery.PeerResolver {
+ return discoveryimpl.Multi(l)
}
type RandomBeaconParams struct {
@@ -125,19 +142,40 @@ type RandomBeaconParams struct {
PubSub *pubsub.PubSub `optional:"true"`
Cs *store.ChainStore
- DrandConfig dtypes.DrandConfig
+ DrandConfig dtypes.DrandSchedule
}
-func BuiltinDrandConfig() dtypes.DrandConfig {
- return build.DrandConfig()
+func BuiltinDrandConfig() dtypes.DrandSchedule {
+ return build.DrandConfigSchedule()
}
-func RandomBeacon(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.RandomBeacon, error) {
+func RandomSchedule(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Schedule, error) {
gen, err := p.Cs.GetGenesis()
if err != nil {
return nil, err
}
- //return beacon.NewMockBeacon(build.BlockDelaySecs * time.Second)
- return drand.NewDrandBeacon(gen.Timestamp, build.BlockDelaySecs, p.PubSub, p.DrandConfig)
+ shd := beacon.Schedule{}
+ for _, dc := range p.DrandConfig {
+ bc, err := drand.NewDrandBeacon(gen.Timestamp, build.BlockDelaySecs, p.PubSub, dc.Config)
+ if err != nil {
+ return nil, xerrors.Errorf("creating drand beacon: %w", err)
+ }
+ shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ }
+
+ return shd, nil
+}
+
+func OpenFilesystemJournal(lr repo.LockedRepo, lc fx.Lifecycle, disabled journal.DisabledEvents) (journal.Journal, error) {
+ jrnl, err := journal.OpenFSJournal(lr, disabled)
+ if err != nil {
+ return nil, err
+ }
+
+ lc.Append(fx.Hook{
+ OnStop: func(_ context.Context) error { return jrnl.Close() },
+ })
+
+ return jrnl, err
}
diff --git a/node/modules/storage.go b/node/modules/storage.go
index 1bdce1d2f..9c1a18368 100644
--- a/node/modules/storage.go
+++ b/node/modules/storage.go
@@ -6,6 +6,7 @@ import (
"go.uber.org/fx"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/lib/backupds"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -27,5 +28,10 @@ func KeyStore(lr repo.LockedRepo) (types.KeyStore, error) {
}
func Datastore(r repo.LockedRepo) (dtypes.MetadataDS, error) {
- return r.Datastore("/metadata")
+ mds, err := r.Datastore("/metadata")
+ if err != nil {
+ return nil, err
+ }
+
+ return backupds.Wrap(mds), nil
}
diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go
index f79c8a370..95bfb6c11 100644
--- a/node/modules/storageminer.go
+++ b/node/modules/storageminer.go
@@ -29,10 +29,11 @@ import (
dtnet "github.com/filecoin-project/go-data-transfer/network"
dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync"
piecefilestore "github.com/filecoin-project/go-fil-markets/filestore"
- "github.com/filecoin-project/go-fil-markets/piecestore"
+ piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
+ "github.com/filecoin-project/go-fil-markets/shared"
"github.com/filecoin-project/go-fil-markets/storagemarket"
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/funds"
@@ -41,14 +42,17 @@ import (
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-multistore"
paramfetch "github.com/filecoin-project/go-paramfetch"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-storedcounter"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/specs-actors/actors/builtin"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
+ "github.com/filecoin-project/lotus/journal"
+ "github.com/filecoin-project/lotus/markets"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@@ -107,6 +111,9 @@ func MinerID(ma dtypes.MinerAddress) (dtypes.MinerID, error) {
}
func StorageNetworkName(ctx helpers.MetricsCtx, a lapi.FullNode) (dtypes.NetworkName, error) {
+ if !build.Devnet {
+ return "testnetnet", nil
+ }
return a.StateNetworkName(ctx)
}
@@ -142,8 +149,34 @@ func SectorIDCounter(ds dtypes.MetadataDS) sealing.SectorIDCounter {
return &sidsc{sc}
}
-func StorageMiner(fc config.MinerFeeConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h host.Host, ds dtypes.MetadataDS, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc) (*storage.Miner, error) {
- return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h host.Host, ds dtypes.MetadataDS, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc) (*storage.Miner, error) {
+type StorageMinerParams struct {
+ fx.In
+
+ Lifecycle fx.Lifecycle
+ MetricsCtx helpers.MetricsCtx
+ API lapi.FullNode
+ Host host.Host
+ MetadataDS dtypes.MetadataDS
+ Sealer sectorstorage.SectorManager
+ SectorIDCounter sealing.SectorIDCounter
+ Verifier ffiwrapper.Verifier
+ GetSealingConfigFn dtypes.GetSealingConfigFunc
+}
+
+func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*storage.Miner, error) {
+ return func(params StorageMinerParams) (*storage.Miner, error) {
+ var (
+ ds = params.MetadataDS
+ mctx = params.MetricsCtx
+ lc = params.Lifecycle
+ api = params.API
+ sealer = params.Sealer
+ h = params.Host
+ sc = params.SectorIDCounter
+ verif = params.Verifier
+ gsd = params.GetSealingConfigFn
+ )
+
maddr, err := minerAddrFromDS(ds)
if err != nil {
return nil, err
@@ -184,10 +217,16 @@ func StorageMiner(fc config.MinerFeeConfig) func(mctx helpers.MetricsCtx, lc fx.
}
func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider) {
+ m.OnReady(marketevents.ReadyLogger("retrieval provider"))
lc.Append(fx.Hook{
- OnStart: func(context.Context) error {
+
+ OnStart: func(ctx context.Context) error {
m.SubscribeToEvents(marketevents.RetrievalProviderLogger)
- return m.Start()
+
+ evtType := journal.J.RegisterEventType("markets/retrieval/provider", "state_change")
+ m.SubscribeToEvents(markets.RetrievalProviderJournaler(evtType))
+
+ return m.Start(ctx)
},
OnStop: func(context.Context) error {
return m.Stop()
@@ -197,10 +236,14 @@ func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.Retrieva
func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h storagemarket.StorageProvider) {
ctx := helpers.LifecycleCtx(mctx, lc)
-
+ h.OnReady(marketevents.ReadyLogger("storage provider"))
lc.Append(fx.Hook{
OnStart: func(context.Context) error {
h.SubscribeToEvents(marketevents.StorageProviderLogger)
+
+ evtType := journal.J.RegisterEventType("markets/storage/provider", "state_change")
+ h.SubscribeToEvents(markets.StorageProviderJournaler(evtType))
+
return h.Start(ctx)
},
OnStop: func(context.Context) error {
@@ -235,8 +278,18 @@ func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.S
// NewProviderPieceStore creates a statestore for storing metadata about pieces
// shared by the storage and retrieval providers
-func NewProviderPieceStore(ds dtypes.MetadataDS) dtypes.ProviderPieceStore {
- return piecestore.NewPieceStore(namespace.Wrap(ds, datastore.NewKey("/storagemarket")))
+func NewProviderPieceStore(lc fx.Lifecycle, ds dtypes.MetadataDS) (dtypes.ProviderPieceStore, error) {
+ ps, err := piecestoreimpl.NewPieceStore(namespace.Wrap(ds, datastore.NewKey("/storagemarket")))
+ if err != nil {
+ return nil, err
+ }
+ ps.OnReady(marketevents.ReadyLogger("piecestore"))
+ lc.Append(fx.Hook{
+ OnStart: func(ctx context.Context) error {
+ return ps.Start(ctx)
+ },
+ })
+ return ps, nil
}
func StagingMultiDatastore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.StagingMultiDstore, error) {
@@ -331,7 +384,13 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat
return nil, err
}
- storedAsk, err := storedask.NewStoredAsk(namespace.Wrap(ds, datastore.NewKey("/deals/provider")), datastore.NewKey("latest-ask"), spn, address.Address(minerAddress))
+ providerDs := namespace.Wrap(ds, datastore.NewKey("/deals/provider"))
+ // legacy this was mistake where this key was place -- so we move the legacy key if need be
+ err = shared.MoveKey(providerDs, "/latest-ask", "/storage-ask/latest")
+ if err != nil {
+ return nil, err
+ }
+ storedAsk, err := storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress))
if err != nil {
return nil, err
}
@@ -410,6 +469,13 @@ func BasicDealFilter(user dtypes.DealFilter) func(onlineOk dtypes.ConsiderOnline
return false, fmt.Sprintf("cannot seal a sector before %s", deal.Proposal.StartEpoch), nil
}
+ // Reject if it's more than 7 days in the future
+ // TODO: read from cfg
+ maxStartEpoch := ht + abi.ChainEpoch(7*builtin.EpochsInDay)
+ if deal.Proposal.StartEpoch > maxStartEpoch {
+ return false, fmt.Sprintf("deal start epoch is too far in the future: %s > %s", deal.Proposal.StartEpoch, maxStartEpoch), nil
+ }
+
if user != nil {
return user(ctx, deal)
}
diff --git a/node/modules/testing/beacon.go b/node/modules/testing/beacon.go
index a4ef822fc..7876e1d05 100644
--- a/node/modules/testing/beacon.go
+++ b/node/modules/testing/beacon.go
@@ -7,6 +7,9 @@ import (
"github.com/filecoin-project/lotus/chain/beacon"
)
-func RandomBeacon() (beacon.RandomBeacon, error) {
- return beacon.NewMockBeacon(time.Duration(build.BlockDelaySecs) * time.Second), nil
+func RandomBeacon() (beacon.Schedule, error) {
+ return beacon.Schedule{
+ {Start: 0,
+ Beacon: beacon.NewMockBeacon(time.Duration(build.BlockDelaySecs) * time.Second),
+ }}, nil
}
diff --git a/node/node_test.go b/node/node_test.go
index 31a14bc20..001b99c04 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -7,25 +7,20 @@ import (
builder "github.com/filecoin-project/lotus/node/test"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/lib/lotuslog"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/lotus/api/test"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
)
func init() {
_ = logging.SetLogLevel("*", "INFO")
- power.ConsensusMinerMinPower = big.NewInt(2048)
- saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
- abi.RegisteredSealProof_StackedDrg2KiBV1: {},
- }
- verifreg.MinVerifiedDealSize = big.NewInt(256)
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
func TestAPI(t *testing.T) {
@@ -68,7 +63,12 @@ func TestAPIDealFlowReal(t *testing.T) {
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
- saminer.PreCommitChallengeDelay = 5
+ // TODO: just set this globally?
+ oldDelay := policy.GetPreCommitChallengeDelay()
+ policy.SetPreCommitChallengeDelay(5)
+ t.Cleanup(func() {
+ policy.SetPreCommitChallengeDelay(oldDelay)
+ })
t.Run("basic", func(t *testing.T) {
test.TestDealFlow(t, builder.Builder, time.Second, false, false)
diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go
index 2b7fa9ed3..c1b6b5233 100644
--- a/node/repo/fsrepo.go
+++ b/node/repo/fsrepo.go
@@ -68,7 +68,8 @@ var ErrRepoExists = xerrors.New("repo exists")
// FsRepo is struct for repo, use NewFS to create
type FsRepo struct {
- path string
+ path string
+ configPath string
}
var _ Repo = &FsRepo{}
@@ -81,10 +82,15 @@ func NewFS(path string) (*FsRepo, error) {
}
return &FsRepo{
- path: path,
+ path: path,
+ configPath: filepath.Join(path, fsConfig),
}, nil
}
+func (fsr *FsRepo) SetConfigPath(cfgPath string) {
+ fsr.configPath = cfgPath
+}
+
func (fsr *FsRepo) Exists() (bool, error) {
_, err := os.Stat(filepath.Join(fsr.path, fsDatastore))
notexist := os.IsNotExist(err)
@@ -110,7 +116,7 @@ func (fsr *FsRepo) Init(t RepoType) error {
}
log.Infof("Initializing repo at '%s'", fsr.path)
- err = os.Mkdir(fsr.path, 0755) //nolint: gosec
+ err = os.MkdirAll(fsr.path, 0755) //nolint: gosec
if err != nil && !os.IsExist(err) {
return err
}
@@ -124,9 +130,7 @@ func (fsr *FsRepo) Init(t RepoType) error {
}
func (fsr *FsRepo) initConfig(t RepoType) error {
- cfgP := filepath.Join(fsr.path, fsConfig)
-
- _, err := os.Stat(cfgP)
+ _, err := os.Stat(fsr.configPath)
if err == nil {
// exists
return nil
@@ -134,7 +138,7 @@ func (fsr *FsRepo) initConfig(t RepoType) error {
return err
}
- c, err := os.Create(cfgP)
+ c, err := os.Create(fsr.configPath)
if err != nil {
return err
}
@@ -224,16 +228,30 @@ func (fsr *FsRepo) Lock(repoType RepoType) (LockedRepo, error) {
return nil, xerrors.Errorf("could not lock the repo: %w", err)
}
return &fsLockedRepo{
- path: fsr.path,
- repoType: repoType,
- closer: closer,
+ path: fsr.path,
+ configPath: fsr.configPath,
+ repoType: repoType,
+ closer: closer,
}, nil
}
+// Like Lock, except datastores will work in read-only mode
+func (fsr *FsRepo) LockRO(repoType RepoType) (LockedRepo, error) {
+ lr, err := fsr.Lock(repoType)
+ if err != nil {
+ return nil, err
+ }
+
+ lr.(*fsLockedRepo).readonly = true
+ return lr, nil
+}
+
type fsLockedRepo struct {
- path string
- repoType RepoType
- closer io.Closer
+ path string
+ configPath string
+ repoType RepoType
+ closer io.Closer
+ readonly bool
ds map[string]datastore.Batching
dsErr error
@@ -286,7 +304,7 @@ func (fsr *fsLockedRepo) Config() (interface{}, error) {
}
func (fsr *fsLockedRepo) loadConfigFromDisk() (interface{}, error) {
- return config.FromFile(fsr.join(fsConfig), defConfForType(fsr.repoType))
+ return config.FromFile(fsr.configPath, defConfForType(fsr.repoType))
}
func (fsr *fsLockedRepo) SetConfig(c func(interface{})) error {
@@ -315,7 +333,7 @@ func (fsr *fsLockedRepo) SetConfig(c func(interface{})) error {
}
// write buffer of TOML bytes to config file
- err = ioutil.WriteFile(fsr.join(fsConfig), buf.Bytes(), 0644)
+ err = ioutil.WriteFile(fsr.configPath, buf.Bytes(), 0644)
if err != nil {
return err
}
diff --git a/node/repo/fsrepo_ds.go b/node/repo/fsrepo_ds.go
index f4afe0dee..aa91d2514 100644
--- a/node/repo/fsrepo_ds.go
+++ b/node/repo/fsrepo_ds.go
@@ -14,7 +14,7 @@ import (
ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
)
-type dsCtor func(path string) (datastore.Batching, error)
+type dsCtor func(path string, readonly bool) (datastore.Batching, error)
var fsDatastores = map[string]dsCtor{
"chain": chainBadgerDs,
@@ -26,9 +26,10 @@ var fsDatastores = map[string]dsCtor{
"client": badgerDs, // client specific
}
-func chainBadgerDs(path string) (datastore.Batching, error) {
+func chainBadgerDs(path string, readonly bool) (datastore.Batching, error) {
opts := badger.DefaultOptions
opts.GcInterval = 0 // disable GC for chain datastore
+ opts.ReadOnly = readonly
opts.Options = dgbadger.DefaultOptions("").WithTruncate(true).
WithValueThreshold(1 << 10)
@@ -36,23 +37,26 @@ func chainBadgerDs(path string) (datastore.Batching, error) {
return badger.NewDatastore(path, &opts)
}
-func badgerDs(path string) (datastore.Batching, error) {
+func badgerDs(path string, readonly bool) (datastore.Batching, error) {
opts := badger.DefaultOptions
+ opts.ReadOnly = readonly
+
opts.Options = dgbadger.DefaultOptions("").WithTruncate(true).
WithValueThreshold(1 << 10)
return badger.NewDatastore(path, &opts)
}
-func levelDs(path string) (datastore.Batching, error) {
+func levelDs(path string, readonly bool) (datastore.Batching, error) {
return levelds.NewDatastore(path, &levelds.Options{
Compression: ldbopts.NoCompression,
NoSync: false,
Strict: ldbopts.StrictAll,
+ ReadOnly: readonly,
})
}
-func (fsr *fsLockedRepo) openDatastores() (map[string]datastore.Batching, error) {
+func (fsr *fsLockedRepo) openDatastores(readonly bool) (map[string]datastore.Batching, error) {
if err := os.MkdirAll(fsr.join(fsDatastore), 0755); err != nil {
return nil, xerrors.Errorf("mkdir %s: %w", fsr.join(fsDatastore), err)
}
@@ -63,7 +67,7 @@ func (fsr *fsLockedRepo) openDatastores() (map[string]datastore.Batching, error)
prefix := datastore.NewKey(p)
// TODO: optimization: don't init datastores we don't need
- ds, err := ctor(fsr.join(filepath.Join(fsDatastore, p)))
+ ds, err := ctor(fsr.join(filepath.Join(fsDatastore, p)), readonly)
if err != nil {
return nil, xerrors.Errorf("opening datastore %s: %w", prefix, err)
}
@@ -78,7 +82,7 @@ func (fsr *fsLockedRepo) openDatastores() (map[string]datastore.Batching, error)
func (fsr *fsLockedRepo) Datastore(ns string) (datastore.Batching, error) {
fsr.dsOnce.Do(func() {
- fsr.ds, fsr.dsErr = fsr.openDatastores()
+ fsr.ds, fsr.dsErr = fsr.openDatastores(fsr.readonly)
})
if fsr.dsErr != nil {
diff --git a/node/test/builder.go b/node/test/builder.go
index 64e20e4f2..a3455f376 100644
--- a/node/test/builder.go
+++ b/node/test/builder.go
@@ -13,6 +13,8 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-storedcounter"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
@@ -34,10 +36,8 @@ import (
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/mockstorage"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/ipfs/go-datastore"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
@@ -83,7 +83,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
peerid, err := peer.IDFromPrivateKey(pk)
require.NoError(t, err)
- enc, err := actors.SerializeParams(&miner.ChangePeerIDParams{NewID: abi.PeerID(peerid)})
+ enc, err := actors.SerializeParams(&miner0.ChangePeerIDParams{NewID: abi.PeerID(peerid)})
require.NoError(t, err)
msg := &types.Message{
@@ -137,7 +137,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
return test.TestStorageNode{StorageMiner: minerapi, MineOne: mineOne}
}
-func Builder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
+func Builder(t *testing.T, nFull int, storage []test.StorageMiner, opts ...node.Option) ([]test.TestNode, []test.TestStorageNode) {
ctx := context.Background()
mn := mocknet.New(ctx)
@@ -198,6 +198,7 @@ func Builder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test.TestN
templ := &genesis.Template{
Accounts: genaccs,
Miners: genms,
+ NetworkName: "test",
Timestamp: uint64(time.Now().Unix() - 10000), // some time sufficiently far in the past
VerifregRootKey: gen.DefaultVerifregRootkeyActor,
RemainderAccount: gen.DefaultRemainderAccountActor,
@@ -223,6 +224,7 @@ func Builder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test.TestN
node.Test(),
genesis,
+ node.Options(opts...),
)
if err != nil {
t.Fatal(err)
@@ -284,7 +286,7 @@ func Builder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test.TestN
return fulls, storers
}
-func MockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
+func MockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner, options ...node.Option) ([]test.TestNode, []test.TestStorageNode) {
ctx := context.Background()
mn := mocknet.New(ctx)
@@ -344,6 +346,7 @@ func MockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test
templ := &genesis.Template{
Accounts: genaccs,
Miners: genms,
+ NetworkName: "test",
Timestamp: uint64(time.Now().Unix()) - (build.BlockDelaySecs * 20000),
VerifregRootKey: gen.DefaultVerifregRootkeyActor,
RemainderAccount: gen.DefaultRemainderAccountActor,
@@ -371,6 +374,7 @@ func MockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
genesis,
+ node.Options(options...),
)
if err != nil {
t.Fatalf("%+v", err)
@@ -433,16 +437,16 @@ func MockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test
return fulls, storers
}
-func RPCBuilder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
- return rpcWithBuilder(t, Builder, nFull, storage)
+func RPCBuilder(t *testing.T, nFull int, storage []test.StorageMiner, opts ...node.Option) ([]test.TestNode, []test.TestStorageNode) {
+ return rpcWithBuilder(t, Builder, nFull, storage, opts...)
}
-func RPCMockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
- return rpcWithBuilder(t, MockSbBuilder, nFull, storage)
+func RPCMockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner, opts ...node.Option) ([]test.TestNode, []test.TestStorageNode) {
+ return rpcWithBuilder(t, MockSbBuilder, nFull, storage, opts...)
}
-func rpcWithBuilder(t *testing.T, b test.APIBuilder, nFull int, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
- fullApis, storaApis := b(t, nFull, storage)
+func rpcWithBuilder(t *testing.T, b test.APIBuilder, nFull int, storage []test.StorageMiner, opts ...node.Option) ([]test.TestNode, []test.TestStorageNode) {
+ fullApis, storaApis := b(t, nFull, storage, opts...)
fulls := make([]test.TestNode, nFull)
storers := make([]test.TestStorageNode, len(storage))
diff --git a/paychmgr/manager.go b/paychmgr/manager.go
index d1fd715ef..f2fc190c7 100644
--- a/paychmgr/manager.go
+++ b/paychmgr/manager.go
@@ -2,34 +2,32 @@ package paychmgr
import (
"context"
+ "errors"
"sync"
- "github.com/filecoin-project/specs-actors/actors/crypto"
-
- "github.com/filecoin-project/lotus/node/modules/helpers"
-
- "github.com/ipfs/go-datastore"
-
- xerrors "golang.org/x/xerrors"
-
- "github.com/filecoin-project/lotus/api"
-
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
-
"github.com/ipfs/go-cid"
+ "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
"go.uber.org/fx"
+ xerrors "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl/full"
+ "github.com/filecoin-project/lotus/node/modules/helpers"
)
var log = logging.Logger("paych")
+var errProofNotSupported = errors.New("payment channel proof parameter is not supported")
+
// PaychAPI is used by dependency injection to pass the consituent APIs to NewManager()
type PaychAPI struct {
fx.In
@@ -40,9 +38,9 @@ type PaychAPI struct {
// stateManagerAPI defines the methods needed from StateManager
type stateManagerAPI interface {
- LoadActorState(ctx context.Context, a address.Address, out interface{}, ts *types.TipSet) (*types.Actor, error)
+ ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error)
+ GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error)
Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error)
- AdtStore(ctx context.Context) adt.Store
}
// paychAPI defines the API methods needed by the payment channel manager
@@ -52,6 +50,7 @@ type paychAPI interface {
MpoolPushMessage(ctx context.Context, msg *types.Message, maxFee *api.MessageSendSpec) (*types.SignedMessage, error)
WalletHas(ctx context.Context, addr address.Address) (bool, error)
WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error)
+ StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
}
// managerAPI defines all methods needed by the manager
@@ -141,13 +140,48 @@ func (pm *Manager) GetPaych(ctx context.Context, from, to address.Address, amt t
return chanAccessor.getPaych(ctx, amt)
}
-func (pm *Manager) AvailableFunds(from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) {
- chanAccessor, err := pm.accessorByFromTo(from, to)
+func (pm *Manager) AvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) {
+ ca, err := pm.accessorByAddress(ch)
if err != nil {
return nil, err
}
- return chanAccessor.availableFunds()
+ ci, err := ca.getChannelInfo(ch)
+ if err != nil {
+ return nil, err
+ }
+
+ return ca.availableFunds(ci.ChannelID)
+}
+
+func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) {
+ ca, err := pm.accessorByFromTo(from, to)
+ if err != nil {
+ return nil, err
+ }
+
+ ci, err := ca.outboundActiveByFromTo(from, to)
+ if err == ErrChannelNotTracked {
+ // If there is no active channel between from / to we still want to
+ // return an empty ChannelAvailableFunds, so that clients can check
+ // for the existence of a channel between from / to without getting
+ // an error.
+ return &api.ChannelAvailableFunds{
+ Channel: nil,
+ From: from,
+ To: to,
+ ConfirmedAmt: types.NewInt(0),
+ PendingAmt: types.NewInt(0),
+ PendingWaitSentinel: nil,
+ QueuedAmt: types.NewInt(0),
+ VoucherReedeemedAmt: types.NewInt(0),
+ }, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return ca.availableFunds(ci.ChannelID)
}
// GetPaychWaitReady waits until the create channel / add funds message with the
@@ -216,34 +250,43 @@ func (pm *Manager) CheckVoucherValid(ctx context.Context, ch address.Address, sv
// CheckVoucherSpendable checks if the given voucher is currently spendable
func (pm *Manager) CheckVoucherSpendable(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) {
+ if len(proof) > 0 {
+ return false, errProofNotSupported
+ }
ca, err := pm.accessorByAddress(ch)
if err != nil {
return false, err
}
- return ca.checkVoucherSpendable(ctx, ch, sv, secret, proof)
+ return ca.checkVoucherSpendable(ctx, ch, sv, secret)
}
// AddVoucherOutbound adds a voucher for an outbound channel.
// Returns an error if the channel is not already in the store.
func (pm *Manager) AddVoucherOutbound(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) {
+ if len(proof) > 0 {
+ return types.NewInt(0), errProofNotSupported
+ }
ca, err := pm.accessorByAddress(ch)
if err != nil {
return types.NewInt(0), err
}
- return ca.addVoucher(ctx, ch, sv, proof, minDelta)
+ return ca.addVoucher(ctx, ch, sv, minDelta)
}
// AddVoucherInbound adds a voucher for an inbound channel.
// If the channel is not in the store, fetches the channel from state (and checks that
// the channel To address is owned by the wallet).
func (pm *Manager) AddVoucherInbound(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) {
+ if len(proof) > 0 {
+ return types.NewInt(0), errProofNotSupported
+ }
// Get an accessor for the channel, creating it from state if necessary
ca, err := pm.inboundChannelAccessor(ctx, ch)
if err != nil {
return types.BigInt{}, err
}
- return ca.addVoucher(ctx, ch, sv, proof, minDelta)
+ return ca.addVoucher(ctx, ch, sv, minDelta)
}
// inboundChannelAccessor gets an accessor for the given channel. The channel
@@ -307,11 +350,14 @@ func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address)
}
func (pm *Manager) SubmitVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) {
+ if len(proof) > 0 {
+ return cid.Undef, errProofNotSupported
+ }
ca, err := pm.accessorByAddress(ch)
if err != nil {
return cid.Undef, err
}
- return ca.submitVoucher(ctx, ch, sv, secret, proof)
+ return ca.submitVoucher(ctx, ch, sv, secret)
}
func (pm *Manager) AllocateLane(ch address.Address) (uint64, error) {
diff --git a/paychmgr/mock_test.go b/paychmgr/mock_test.go
index d2aa047ee..3393a3072 100644
--- a/paychmgr/mock_test.go
+++ b/paychmgr/mock_test.go
@@ -2,23 +2,20 @@ package paychmgr
import (
"context"
- "fmt"
+ "errors"
"sync"
- "github.com/filecoin-project/lotus/lib/sigs"
-
- "github.com/filecoin-project/specs-actors/actors/crypto"
-
- cbornode "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- "github.com/ipfs/go-cid"
+ "github.com/filecoin-project/lotus/lib/sigs"
)
type mockManagerAPI struct {
@@ -40,29 +37,23 @@ type mockPchState struct {
type mockStateManager struct {
lk sync.Mutex
- accountState map[address.Address]account.State
+ accountState map[address.Address]address.Address
paychState map[address.Address]mockPchState
- store adt.Store
response *api.InvocResult
lastCall *types.Message
}
func newMockStateManager() *mockStateManager {
return &mockStateManager{
- accountState: make(map[address.Address]account.State),
+ accountState: make(map[address.Address]address.Address),
paychState: make(map[address.Address]mockPchState),
- store: adt.WrapStore(context.Background(), cbornode.NewMemCborStore()),
}
}
-func (sm *mockStateManager) AdtStore(ctx context.Context) adt.Store {
- return sm.store
-}
-
-func (sm *mockStateManager) setAccountState(a address.Address, state account.State) {
+func (sm *mockStateManager) setAccountAddress(a address.Address, lookup address.Address) {
sm.lk.Lock()
defer sm.lk.Unlock()
- sm.accountState[a] = state
+ sm.accountState[a] = lookup
}
func (sm *mockStateManager) setPaychState(a address.Address, actor *types.Actor, state paych.State) {
@@ -71,31 +62,24 @@ func (sm *mockStateManager) setPaychState(a address.Address, actor *types.Actor,
sm.paychState[a] = mockPchState{actor, state}
}
-func (sm *mockStateManager) storeLaneStates(laneStates map[uint64]paych.LaneState) (cid.Cid, error) {
- arr := adt.MakeEmptyArray(sm.store)
- for i, ls := range laneStates {
- ls := ls
- if err := arr.Set(i, &ls); err != nil {
- return cid.Undef, err
- }
- }
- return arr.Root()
-}
-
-func (sm *mockStateManager) LoadActorState(ctx context.Context, a address.Address, out interface{}, ts *types.TipSet) (*types.Actor, error) {
+func (sm *mockStateManager) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
sm.lk.Lock()
defer sm.lk.Unlock()
+ keyAddr, ok := sm.accountState[addr]
+ if !ok {
+ return address.Undef, errors.New("not found")
+ }
+ return keyAddr, nil
+}
- if outState, ok := out.(*account.State); ok {
- *outState = sm.accountState[a]
- return nil, nil
+func (sm *mockStateManager) GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) {
+ sm.lk.Lock()
+ defer sm.lk.Unlock()
+ info, ok := sm.paychState[addr]
+ if !ok {
+ return nil, nil, errors.New("not found")
}
- if outState, ok := out.(*paych.State); ok {
- info := sm.paychState[a]
- *outState = info.state
- return info.actor, nil
- }
- panic(fmt.Sprintf("unexpected state type %v", out))
+ return info.actor, info.state, nil
}
func (sm *mockStateManager) setCallResponse(response *api.InvocResult) {
@@ -259,3 +243,7 @@ func (pchapi *mockPaychAPI) addSigningKey(key []byte) {
pchapi.signingKey = key
}
+
+func (pchapi *mockPaychAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) {
+ return build.NewestNetworkVersion, nil
+}
diff --git a/paychmgr/msglistener_test.go b/paychmgr/msglistener_test.go
index 2c3ae16e4..4b8ae6f30 100644
--- a/paychmgr/msglistener_test.go
+++ b/paychmgr/msglistener_test.go
@@ -4,9 +4,7 @@ import (
"testing"
"github.com/ipfs/go-cid"
-
"github.com/stretchr/testify/require"
-
"golang.org/x/xerrors"
)
diff --git a/paychmgr/paych.go b/paychmgr/paych.go
index be43aaf9b..c4ef3deb0 100644
--- a/paychmgr/paych.go
+++ b/paychmgr/paych.go
@@ -1,26 +1,21 @@
package paychmgr
import (
- "bytes"
"context"
"fmt"
- "github.com/filecoin-project/lotus/api"
-
- "github.com/filecoin-project/specs-actors/actors/util/adt"
-
"github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
cborutil "github.com/filecoin-project/go-cbor-util"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- xerrors "golang.org/x/xerrors"
)
// insufficientFundsErr indicates that there are not enough funds in the
@@ -45,6 +40,19 @@ func (e *ErrInsufficientFunds) Shortfall() types.BigInt {
return e.shortfall
}
+type laneState struct {
+ redeemed big.Int
+ nonce uint64
+}
+
+func (ls laneState) Redeemed() (big.Int, error) {
+ return ls.redeemed, nil
+}
+
+func (ls laneState) Nonce() (uint64, error) {
+ return ls.nonce, nil
+}
+
// channelAccessor is used to simplify locking when accessing a channel
type channelAccessor struct {
from address.Address
@@ -74,6 +82,15 @@ func newChannelAccessor(pm *Manager, from address.Address, to address.Address) *
}
}
+func (ca *channelAccessor) messageBuilder(ctx context.Context, from address.Address) (paych.MessageBuilder, error) {
+ nwVersion, err := ca.api.StateNetworkVersion(ctx, types.EmptyTSK)
+ if err != nil {
+ return nil, err
+ }
+
+ return paych.Message(actors.VersionForNetwork(nwVersion), from), nil
+}
+
func (ca *channelAccessor) getChannelInfo(addr address.Address) (*ChannelInfo, error) {
ca.lk.Lock()
defer ca.lk.Unlock()
@@ -81,6 +98,13 @@ func (ca *channelAccessor) getChannelInfo(addr address.Address) (*ChannelInfo, e
return ca.store.ByAddress(addr)
}
+func (ca *channelAccessor) outboundActiveByFromTo(from, to address.Address) (*ChannelInfo, error) {
+ ca.lk.Lock()
+ defer ca.lk.Unlock()
+
+ return ca.store.OutboundActiveByFromTo(from, to)
+}
+
// createVoucher creates a voucher with the given specification, setting its
// nonce, signing the voucher and storing it in the local datastore.
// If there are not enough funds in the channel to create the voucher, returns
@@ -115,7 +139,7 @@ func (ca *channelAccessor) createVoucher(ctx context.Context, ch address.Address
sv.Signature = sig
// Store the voucher
- if _, err := ca.addVoucherUnlocked(ctx, ch, sv, nil, types.NewInt(0)); err != nil {
+ if _, err := ca.addVoucherUnlocked(ctx, ch, sv, types.NewInt(0)); err != nil {
// If there are not enough funds in the channel to cover the voucher,
// return a voucher create result with the shortfall
var ife insufficientFundsErr
@@ -144,14 +168,14 @@ func (ca *channelAccessor) nextNonceForLane(ci *ChannelInfo, lane uint64) uint64
return maxnonce + 1
}
-func (ca *channelAccessor) checkVoucherValid(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (map[uint64]*paych.LaneState, error) {
+func (ca *channelAccessor) checkVoucherValid(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (map[uint64]paych.LaneState, error) {
ca.lk.Lock()
defer ca.lk.Unlock()
return ca.checkVoucherValidUnlocked(ctx, ch, sv)
}
-func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (map[uint64]*paych.LaneState, error) {
+func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (map[uint64]paych.LaneState, error) {
if sv.ChannelAddr != ch {
return nil, xerrors.Errorf("voucher ChannelAddr doesn't match channel address, got %s, expected %s", sv.ChannelAddr, ch)
}
@@ -163,12 +187,15 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add
}
// Load channel "From" account actor state
- var actState account.State
- _, err = ca.api.LoadActorState(ctx, pchState.From, &actState, nil)
+ f, err := pchState.From()
+ if err != nil {
+ return nil, err
+ }
+
+ from, err := ca.api.ResolveToKeyAddress(ctx, f, nil)
if err != nil {
return nil, err
}
- from := actState.Address
// verify voucher signature
vb, err := sv.SigningBytes()
@@ -184,7 +211,7 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add
}
// Check the voucher against the highest known voucher nonce / value
- laneStates, err := ca.laneState(ctx, pchState, ch)
+ laneStates, err := ca.laneState(pchState, ch)
if err != nil {
return nil, err
}
@@ -192,13 +219,24 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add
// If the new voucher nonce value is less than the highest known
// nonce for the lane
ls, lsExists := laneStates[sv.Lane]
- if lsExists && sv.Nonce <= ls.Nonce {
- return nil, fmt.Errorf("nonce too low")
- }
+ if lsExists {
+ n, err := ls.Nonce()
+ if err != nil {
+ return nil, err
+ }
- // If the voucher amount is less than the highest known voucher amount
- if lsExists && sv.Amount.LessThanEqual(ls.Redeemed) {
- return nil, fmt.Errorf("voucher amount is lower than amount for voucher with lower nonce")
+ if sv.Nonce <= n {
+ return nil, fmt.Errorf("nonce too low")
+ }
+
+ // If the voucher amount is less than the highest known voucher amount
+ r, err := ls.Redeemed()
+ if err != nil {
+ return nil, err
+ }
+ if sv.Amount.LessThanEqual(r) {
+ return nil, fmt.Errorf("voucher amount is lower than amount for voucher with lower nonce")
+ }
}
// Total redeemed is the total redeemed amount for all lanes, including
@@ -221,11 +259,9 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add
return nil, err
}
- // Total required balance = total redeemed + toSend
- // Must not exceed actor balance
- newTotal := types.BigAdd(totalRedeemed, pchState.ToSend)
- if act.Balance.LessThan(newTotal) {
- return nil, newErrInsufficientFunds(types.BigSub(newTotal, act.Balance))
+ // Total required balance must not exceed actor balance
+ if act.Balance.LessThan(totalRedeemed) {
+ return nil, newErrInsufficientFunds(types.BigSub(totalRedeemed, act.Balance))
}
if len(sv.Merges) != 0 {
@@ -235,7 +271,7 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add
return laneStates, nil
}
-func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) {
+func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte) (bool, error) {
ca.lk.Lock()
defer ca.lk.Unlock()
@@ -258,37 +294,17 @@ func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address
return false, nil
}
- // If proof is needed and wasn't supplied as a parameter, get it from the
- // datastore
- if sv.Extra != nil && proof == nil {
- vi, err := ci.infoForVoucher(sv)
- if err != nil {
- return false, err
- }
-
- if vi.Proof != nil {
- log.Info("CheckVoucherSpendable: using stored proof")
- proof = vi.Proof
- } else {
- log.Warn("CheckVoucherSpendable: nil proof for voucher with validation")
- }
- }
-
- enc, err := actors.SerializeParams(&paych.UpdateChannelStateParams{
- Sv: *sv,
- Secret: secret,
- Proof: proof,
- })
+ mb, err := ca.messageBuilder(ctx, recipient)
if err != nil {
return false, err
}
- ret, err := ca.api.Call(ctx, &types.Message{
- From: recipient,
- To: ch,
- Method: builtin.MethodsPaych.UpdateChannelState,
- Params: enc,
- }, nil)
+ mes, err := mb.Update(ch, sv, secret)
+ if err != nil {
+ return false, err
+ }
+
+ ret, err := ca.api.Call(ctx, mes, nil)
if err != nil {
return false, err
}
@@ -301,52 +317,39 @@ func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address
}
func (ca *channelAccessor) getPaychRecipient(ctx context.Context, ch address.Address) (address.Address, error) {
- var state paych.State
- if _, err := ca.api.LoadActorState(ctx, ch, &state, nil); err != nil {
+ _, state, err := ca.api.GetPaychState(ctx, ch, nil)
+ if err != nil {
return address.Address{}, err
}
- return state.To, nil
+ return state.To()
}
-func (ca *channelAccessor) addVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) {
+func (ca *channelAccessor) addVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, minDelta types.BigInt) (types.BigInt, error) {
ca.lk.Lock()
defer ca.lk.Unlock()
- return ca.addVoucherUnlocked(ctx, ch, sv, proof, minDelta)
+ return ca.addVoucherUnlocked(ctx, ch, sv, minDelta)
}
-func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) {
+func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, minDelta types.BigInt) (types.BigInt, error) {
ci, err := ca.store.ByAddress(ch)
if err != nil {
return types.BigInt{}, err
}
// Check if the voucher has already been added
- for i, v := range ci.Vouchers {
+ for _, v := range ci.Vouchers {
eq, err := cborutil.Equals(sv, v.Voucher)
if err != nil {
return types.BigInt{}, err
}
- if !eq {
- continue
+ if eq {
+ // Ignore the duplicate voucher.
+ log.Warnf("AddVoucher: voucher re-added")
+ return types.NewInt(0), nil
}
- // This is a duplicate voucher.
- // Update the proof on the existing voucher
- if len(proof) > 0 && !bytes.Equal(v.Proof, proof) {
- log.Warnf("AddVoucher: adding proof to stored voucher")
- ci.Vouchers[i] = &VoucherInfo{
- Voucher: v.Voucher,
- Proof: proof,
- }
-
- return types.NewInt(0), ca.store.putChannelInfo(ci)
- }
-
- // Otherwise just ignore the duplicate voucher
- log.Warnf("AddVoucher: voucher re-added with matching proof")
- return types.NewInt(0), nil
}
// Check voucher validity
@@ -360,7 +363,10 @@ func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Ad
laneState, exists := laneStates[sv.Lane]
redeemed := big.NewInt(0)
if exists {
- redeemed = laneState.Redeemed
+ redeemed, err = laneState.Redeemed()
+ if err != nil {
+ return types.NewInt(0), err
+ }
}
delta := types.BigSub(sv.Amount, redeemed)
@@ -370,7 +376,6 @@ func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Ad
ci.Vouchers = append(ci.Vouchers, &VoucherInfo{
Voucher: sv,
- Proof: proof,
})
if ci.NextLane <= sv.Lane {
@@ -380,7 +385,7 @@ func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Ad
return delta, ca.store.putChannelInfo(ci)
}
-func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) {
+func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte) (cid.Cid, error) {
ca.lk.Lock()
defer ca.lk.Unlock()
@@ -389,21 +394,6 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address
return cid.Undef, err
}
- // If voucher needs proof, and none was supplied, check datastore for proof
- if sv.Extra != nil && proof == nil {
- vi, err := ci.infoForVoucher(sv)
- if err != nil {
- return cid.Undef, err
- }
-
- if vi.Proof != nil {
- log.Info("SubmitVoucher: using stored proof")
- proof = vi.Proof
- } else {
- log.Warn("SubmitVoucher: nil proof for voucher with validation")
- }
- }
-
has, err := ci.hasVoucher(sv)
if err != nil {
return cid.Undef, err
@@ -421,21 +411,14 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address
}
}
- enc, err := actors.SerializeParams(&paych.UpdateChannelStateParams{
- Sv: *sv,
- Secret: secret,
- Proof: proof,
- })
+ mb, err := ca.messageBuilder(ctx, ci.Control)
if err != nil {
return cid.Undef, err
}
- msg := &types.Message{
- From: ci.Control,
- To: ch,
- Value: types.NewInt(0),
- Method: builtin.MethodsPaych.UpdateChannelState,
- Params: enc,
+ msg, err := mb.Update(ch, sv, secret)
+ if err != nil {
+ return cid.Undef, err
}
smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil)
@@ -448,7 +431,6 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address
// Add the voucher to the channel
ci.Vouchers = append(ci.Vouchers, &VoucherInfo{
Voucher: sv,
- Proof: proof,
})
}
@@ -465,7 +447,6 @@ func (ca *channelAccessor) allocateLane(ch address.Address) (uint64, error) {
ca.lk.Lock()
defer ca.lk.Unlock()
- // TODO: should this take into account lane state?
return ca.store.AllocateLane(ch)
}
@@ -480,13 +461,11 @@ func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address)
// laneState gets the LaneStates from chain, then applies all vouchers in
// the data store over the chain state
-func (ca *channelAccessor) laneState(ctx context.Context, state *paych.State, ch address.Address) (map[uint64]*paych.LaneState, error) {
+func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) {
// TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct
// (but technically dont't need to)
- // Get the lane state from the chain
- store := ca.api.AdtStore(ctx)
- lsamt, err := adt.AsArray(store, state.LaneStates)
+ laneCount, err := state.LaneCount()
if err != nil {
return nil, err
}
@@ -494,11 +473,9 @@ func (ca *channelAccessor) laneState(ctx context.Context, state *paych.State, ch
// Note: we use a map instead of an array to store laneStates because the
// client sets the lane ID (the index) and potentially they could use a
// very large index.
- var ls paych.LaneState
- laneStates := make(map[uint64]*paych.LaneState, lsamt.Length())
- err = lsamt.ForEach(&ls, func(i int64) error {
- current := ls
- laneStates[uint64(i)] = ¤t
+ laneStates := make(map[uint64]paych.LaneState, laneCount)
+ err = state.ForEachLaneState(func(idx uint64, ls paych.LaneState) error {
+ laneStates[idx] = ls
return nil
})
if err != nil {
@@ -516,30 +493,31 @@ func (ca *channelAccessor) laneState(ctx context.Context, state *paych.State, ch
return nil, xerrors.Errorf("paych merges not handled yet")
}
- // If there's a voucher for a lane that isn't in chain state just
- // create it
+ // Check if there is an existing laneState in the payment channel
+ // for this voucher's lane
ls, ok := laneStates[v.Voucher.Lane]
- if !ok {
- ls = &paych.LaneState{
- Redeemed: types.NewInt(0),
- Nonce: 0,
+
+ // If the voucher does not have a higher nonce than the existing
+ // laneState for this lane, ignore it
+ if ok {
+ n, err := ls.Nonce()
+ if err != nil {
+ return nil, err
+ }
+ if v.Voucher.Nonce < n {
+ continue
}
- laneStates[v.Voucher.Lane] = ls
}
- if v.Voucher.Nonce < ls.Nonce {
- continue
- }
-
- ls.Nonce = v.Voucher.Nonce
- ls.Redeemed = v.Voucher.Amount
+ // Voucher has a higher nonce, so replace laneState with this voucher
+ laneStates[v.Voucher.Lane] = laneState{v.Voucher.Amount, v.Voucher.Nonce}
}
return laneStates, nil
}
// Get the total redeemed amount across all lanes, after applying the voucher
-func (ca *channelAccessor) totalRedeemedWithVoucher(laneStates map[uint64]*paych.LaneState, sv *paych.SignedVoucher) (big.Int, error) {
+func (ca *channelAccessor) totalRedeemedWithVoucher(laneStates map[uint64]paych.LaneState, sv *paych.SignedVoucher) (big.Int, error) {
// TODO: merges
if len(sv.Merges) != 0 {
return big.Int{}, xerrors.Errorf("dont currently support paych lane merges")
@@ -547,17 +525,31 @@ func (ca *channelAccessor) totalRedeemedWithVoucher(laneStates map[uint64]*paych
total := big.NewInt(0)
for _, ls := range laneStates {
- total = big.Add(total, ls.Redeemed)
+ r, err := ls.Redeemed()
+ if err != nil {
+ return big.Int{}, err
+ }
+ total = big.Add(total, r)
}
lane, ok := laneStates[sv.Lane]
if ok {
// If the voucher is for an existing lane, and the voucher nonce
// is higher than the lane nonce
- if sv.Nonce > lane.Nonce {
+ n, err := lane.Nonce()
+ if err != nil {
+ return big.Int{}, err
+ }
+
+ if sv.Nonce > n {
// Add the delta between the redeemed amount and the voucher
// amount to the total
- delta := big.Sub(sv.Amount, lane.Redeemed)
+ r, err := lane.Redeemed()
+ if err != nil {
+ return big.Int{}, err
+ }
+
+ delta := big.Sub(sv.Amount, r)
total = big.Add(total, delta)
}
} else {
@@ -578,11 +570,13 @@ func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid.
return cid.Undef, err
}
- msg := &types.Message{
- To: ch,
- From: ci.Control,
- Value: types.NewInt(0),
- Method: builtin.MethodsPaych.Settle,
+ mb, err := ca.messageBuilder(ctx, ci.Control)
+ if err != nil {
+ return cid.Undef, err
+ }
+ msg, err := mb.Settle(ch)
+ if err != nil {
+ return cid.Undef, err
}
smgs, err := ca.api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
@@ -607,11 +601,14 @@ func (ca *channelAccessor) collect(ctx context.Context, ch address.Address) (cid
return cid.Undef, err
}
- msg := &types.Message{
- To: ch,
- From: ci.Control,
- Value: types.NewInt(0),
- Method: builtin.MethodsPaych.Collect,
+ mb, err := ca.messageBuilder(ctx, ci.Control)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ msg, err := mb.Collect(ch)
+ if err != nil {
+ return cid.Undef, err
}
smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil)
diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go
index e1ae487e1..fcd3d50a8 100644
--- a/paychmgr/paych_test.go
+++ b/paychmgr/paych_test.go
@@ -5,31 +5,24 @@ import (
"context"
"testing"
- "github.com/filecoin-project/lotus/api"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/ipfs/go-cid"
-
- "github.com/filecoin-project/lotus/lib/sigs"
- "github.com/filecoin-project/specs-actors/actors/crypto"
-
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/specs-actors/actors/abi/big"
-
- "github.com/filecoin-project/specs-actors/actors/abi"
- tutils "github.com/filecoin-project/specs-actors/support/testing"
-
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
-
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/chain/types"
-
ds "github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-datastore/sync"
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+ tutils "github.com/filecoin-project/specs-actors/support/testing"
+ paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ paychmock "github.com/filecoin-project/lotus/chain/actors/builtin/paych/mock"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/lib/sigs"
)
func TestCheckVoucherValid(t *testing.T) {
@@ -46,15 +39,14 @@ func TestCheckVoucherValid(t *testing.T) {
toAcct := tutils.NewActorAddr(t, "toAct")
mock := newMockManagerAPI()
- mock.setAccountState(fromAcct, account.State{Address: from})
- mock.setAccountState(toAcct, account.State{Address: to})
+ mock.setAccountAddress(fromAcct, from)
+ mock.setAccountAddress(toAcct, to)
tcases := []struct {
name string
expectError bool
key []byte
actorBalance big.Int
- toSend big.Int
voucherAmount big.Int
voucherLane uint64
voucherNonce uint64
@@ -63,143 +55,147 @@ func TestCheckVoucherValid(t *testing.T) {
name: "passes when voucher amount < balance",
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(5),
}, {
name: "fails when funds too low",
expectError: true,
key: fromKeyPrivate,
actorBalance: big.NewInt(5),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(10),
}, {
name: "fails when invalid signature",
expectError: true,
key: randKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(5),
}, {
name: "fails when signed by channel To account (instead of From account)",
expectError: true,
key: toKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(5),
}, {
name: "fails when nonce too low",
expectError: true,
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(5),
voucherLane: 1,
voucherNonce: 2,
laneStates: map[uint64]paych.LaneState{
- 1: {
- Redeemed: big.NewInt(2),
- Nonce: 3,
- },
+ 1: paychmock.NewMockLaneState(big.NewInt(2), 3),
},
}, {
name: "passes when nonce higher",
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(5),
voucherLane: 1,
voucherNonce: 3,
laneStates: map[uint64]paych.LaneState{
- 1: {
- Redeemed: big.NewInt(2),
- Nonce: 2,
- },
+ 1: paychmock.NewMockLaneState(big.NewInt(2), 2),
},
}, {
name: "passes when nonce for different lane",
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(5),
voucherLane: 2,
voucherNonce: 2,
laneStates: map[uint64]paych.LaneState{
- 1: {
- Redeemed: big.NewInt(2),
- Nonce: 3,
- },
+ 1: paychmock.NewMockLaneState(big.NewInt(2), 3),
},
}, {
name: "fails when voucher has higher nonce but lower value than lane state",
expectError: true,
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(0),
voucherAmount: big.NewInt(5),
voucherLane: 1,
voucherNonce: 3,
laneStates: map[uint64]paych.LaneState{
- 1: {
- Redeemed: big.NewInt(6),
- Nonce: 2,
- },
+ 1: paychmock.NewMockLaneState(big.NewInt(6), 2),
},
- }, {
- name: "fails when voucher + ToSend > balance",
- expectError: true,
- key: fromKeyPrivate,
- actorBalance: big.NewInt(10),
- toSend: big.NewInt(9),
- voucherAmount: big.NewInt(2),
}, {
// voucher supersedes lane 1 redeemed so
// lane 1 effective redeemed = voucher amount
//
- // required balance = toSend + total redeemed
- // = 1 + 6 (lane1)
+ // required balance = voucher amt
// = 7
// So required balance: 7 < actor balance: 10
- name: "passes when voucher + total redeemed <= balance",
+ name: "passes when voucher total redeemed <= balance",
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(1),
voucherAmount: big.NewInt(6),
voucherLane: 1,
voucherNonce: 2,
laneStates: map[uint64]paych.LaneState{
// Lane 1 (same as voucher lane 1)
- 1: {
- Redeemed: big.NewInt(4),
- Nonce: 1,
- },
+ 1: paychmock.NewMockLaneState(big.NewInt(4), 1),
},
}, {
- // required balance = toSend + total redeemed
- // = 1 + 4 (lane 2) + 6 (voucher lane 1)
+ // required balance = total redeemed
+ // = 6 (voucher lane 1) + 5 (lane 2)
// = 11
// So required balance: 11 > actor balance: 10
- name: "fails when voucher + total redeemed > balance",
+ name: "fails when voucher total redeemed > balance",
expectError: true,
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
- toSend: big.NewInt(1),
voucherAmount: big.NewInt(6),
voucherLane: 1,
voucherNonce: 1,
laneStates: map[uint64]paych.LaneState{
// Lane 2 (different from voucher lane 1)
- 2: {
- Redeemed: big.NewInt(4),
- Nonce: 1,
- },
+ 2: paychmock.NewMockLaneState(big.NewInt(5), 1),
+ },
+ }, {
+ // voucher supersedes lane 1 redeemed so
+ // lane 1 effective redeemed = voucher amount
+ //
+ // required balance = total redeemed
+ // = 6 (new voucher lane 1) + 5 (lane 2)
+ // = 11
+ // So required balance: 11 > actor balance: 10
+ name: "fails when voucher total redeemed > balance",
+ expectError: true,
+ key: fromKeyPrivate,
+ actorBalance: big.NewInt(10),
+ voucherAmount: big.NewInt(6),
+ voucherLane: 1,
+ voucherNonce: 2,
+ laneStates: map[uint64]paych.LaneState{
+ // Lane 1 (superseded by new voucher in voucher lane 1)
+ 1: paychmock.NewMockLaneState(big.NewInt(5), 1),
+ // Lane 2 (different from voucher lane 1)
+ 2: paychmock.NewMockLaneState(big.NewInt(5), 1),
+ },
+ }, {
+ // voucher supersedes lane 1 redeemed so
+ // lane 1 effective redeemed = voucher amount
+ //
+ // required balance = total redeemed
+ // = 5 (new voucher lane 1) + 5 (lane 2)
+ // = 10
+ // So required balance: 10 <= actor balance: 10
+ name: "passes when voucher total redeemed <= balance",
+ expectError: false,
+ key: fromKeyPrivate,
+ actorBalance: big.NewInt(10),
+ voucherAmount: big.NewInt(5),
+ voucherLane: 1,
+ voucherNonce: 2,
+ laneStates: map[uint64]paych.LaneState{
+ // Lane 1 (superseded by new voucher in voucher lane 1)
+ 1: paychmock.NewMockLaneState(big.NewInt(4), 1),
+ // Lane 2 (different from voucher lane 1)
+ 2: paychmock.NewMockLaneState(big.NewInt(5), 1),
},
}}
for _, tcase := range tcases {
tcase := tcase
t.Run(tcase.name, func(t *testing.T) {
- store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
-
// Create an actor for the channel with the test case balance
act := &types.Actor{
Code: builtin.AccountActorCodeID,
@@ -208,27 +204,18 @@ func TestCheckVoucherValid(t *testing.T) {
Balance: tcase.actorBalance,
}
- // Set the state of the channel's lanes
- laneStates, err := mock.storeLaneStates(tcase.laneStates)
- require.NoError(t, err)
-
- mock.setPaychState(ch, act, paych.State{
- From: fromAcct,
- To: toAcct,
- ToSend: tcase.toSend,
- SettlingAt: abi.ChainEpoch(0),
- MinSettleHeight: abi.ChainEpoch(0),
- LaneStates: laneStates,
- })
+ mock.setPaychState(ch, act, paychmock.NewMockPayChState(
+ fromAcct, toAcct, abi.ChainEpoch(0), tcase.laneStates))
// Create a manager
+ store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
mgr, err := newManager(store, mock)
require.NoError(t, err)
// Add channel To address to wallet
mock.addWalletAddress(to)
- // Create a signed voucher
+ // Create the test case signed voucher
sv := createTestVoucher(t, ch, tcase.voucherLane, tcase.voucherNonce, tcase.voucherAmount, tcase.key)
// Check the voucher's validity
@@ -242,154 +229,15 @@ func TestCheckVoucherValid(t *testing.T) {
}
}
-func TestCheckVoucherValidCountingAllLanes(t *testing.T) {
- ctx := context.Background()
-
- fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t)
-
- ch := tutils.NewIDAddr(t, 100)
- from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic))
- to := tutils.NewSECP256K1Addr(t, "secpTo")
- fromAcct := tutils.NewActorAddr(t, "fromAct")
- toAcct := tutils.NewActorAddr(t, "toAct")
- minDelta := big.NewInt(0)
-
- mock := newMockManagerAPI()
- mock.setAccountState(fromAcct, account.State{Address: from})
- mock.setAccountState(toAcct, account.State{Address: to})
-
- store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
-
- actorBalance := big.NewInt(10)
- toSend := big.NewInt(1)
- laneStates := map[uint64]paych.LaneState{
- 1: {
- Nonce: 1,
- Redeemed: big.NewInt(3),
- },
- 2: {
- Nonce: 1,
- Redeemed: big.NewInt(4),
- },
- }
-
- act := &types.Actor{
- Code: builtin.AccountActorCodeID,
- Head: cid.Cid{},
- Nonce: 0,
- Balance: actorBalance,
- }
-
- lsCid, err := mock.storeLaneStates(laneStates)
- require.NoError(t, err)
- mock.setPaychState(ch, act, paych.State{
- From: fromAcct,
- To: toAcct,
- ToSend: toSend,
- SettlingAt: abi.ChainEpoch(0),
- MinSettleHeight: abi.ChainEpoch(0),
- LaneStates: lsCid,
- })
-
- mgr, err := newManager(store, mock)
- require.NoError(t, err)
-
- // Add channel To address to wallet
- mock.addWalletAddress(to)
-
- //
- // Should not be possible to add a voucher with a value such that
- // + toSend >
- //
- // lane 1 redeemed: 3
- // voucher amount (lane 1): 6
- // lane 1 redeemed (with voucher): 6
- //
- // Lane 1: 6
- // Lane 2: 4
- // toSend: 1
- // --
- // total: 11
- //
- // actor balance is 10 so total is too high.
- //
- voucherLane := uint64(1)
- voucherNonce := uint64(2)
- voucherAmount := big.NewInt(6)
- sv := createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
- err = mgr.CheckVoucherValid(ctx, ch, sv)
- require.Error(t, err)
-
- //
- // lane 1 redeemed: 3
- // voucher amount (lane 1): 4
- // lane 1 redeemed (with voucher): 4
- //
- // Lane 1: 4
- // Lane 2: 4
- // toSend: 1
- // --
- // total: 9
- //
- // actor balance is 10 so total is ok.
- //
- voucherAmount = big.NewInt(4)
- sv = createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
- err = mgr.CheckVoucherValid(ctx, ch, sv)
- require.NoError(t, err)
-
- // Add voucher to lane 1, so Lane 1 effective redeemed
- // (with first voucher) is now 4
- _, err = mgr.AddVoucherOutbound(ctx, ch, sv, nil, minDelta)
- require.NoError(t, err)
-
- //
- // lane 1 redeemed: 4
- // voucher amount (lane 1): 6
- // lane 1 redeemed (with voucher): 6
- //
- // Lane 1: 6
- // Lane 2: 4
- // toSend: 1
- // --
- // total: 11
- //
- // actor balance is 10 so total is too high.
- //
- voucherNonce++
- voucherAmount = big.NewInt(6)
- sv = createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
- err = mgr.CheckVoucherValid(ctx, ch, sv)
- require.Error(t, err)
-
- //
- // lane 1 redeemed: 4
- // voucher amount (lane 1): 5
- // lane 1 redeemed (with voucher): 5
- //
- // Lane 1: 5
- // Lane 2: 4
- // toSend: 1
- // --
- // total: 10
- //
- // actor balance is 10 so total is ok.
- //
- voucherAmount = big.NewInt(5)
- sv = createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
- err = mgr.CheckVoucherValid(ctx, ch, sv)
- require.NoError(t, err)
-}
-
func TestCreateVoucher(t *testing.T) {
ctx := context.Background()
// Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
+ s := testSetupMgrWithChannel(t)
// Create a voucher in lane 1
voucherLane1Amt := big.NewInt(5)
- voucher := paych.SignedVoucher{
+ voucher := paych2.SignedVoucher{
Lane: 1,
Amount: voucherLane1Amt,
}
@@ -404,7 +252,7 @@ func TestCreateVoucher(t *testing.T) {
// Create a voucher in lane 1 again, with a higher amount
voucherLane1Amt = big.NewInt(8)
- voucher = paych.SignedVoucher{
+ voucher = paych2.SignedVoucher{
Lane: 1,
Amount: voucherLane1Amt,
}
@@ -419,7 +267,7 @@ func TestCreateVoucher(t *testing.T) {
// Create a voucher in lane 2 that covers all the remaining funds
// in the channel
voucherLane2Amt := big.Sub(s.amt, voucherLane1Amt)
- voucher = paych.SignedVoucher{
+ voucher = paych2.SignedVoucher{
Lane: 2,
Amount: voucherLane2Amt,
}
@@ -433,7 +281,7 @@ func TestCreateVoucher(t *testing.T) {
// Create a voucher in lane 2 that exceeds the remaining funds in the
// channel
voucherLane2Amt = big.Add(voucherLane2Amt, big.NewInt(1))
- voucher = paych.SignedVoucher{
+ voucher = paych2.SignedVoucher{
Lane: 2,
Amount: voucherLane2Amt,
}
@@ -450,7 +298,7 @@ func TestAddVoucherDelta(t *testing.T) {
ctx := context.Background()
// Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
+ s := testSetupMgrWithChannel(t)
voucherLane := uint64(1)
@@ -492,7 +340,7 @@ func TestAddVoucherNextLane(t *testing.T) {
ctx := context.Background()
// Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
+ s := testSetupMgrWithChannel(t)
minDelta := big.NewInt(0)
voucherAmount := big.NewInt(2)
@@ -539,10 +387,8 @@ func TestAddVoucherNextLane(t *testing.T) {
}
func TestAllocateLane(t *testing.T) {
- ctx := context.Background()
-
// Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
+ s := testSetupMgrWithChannel(t)
// First lane should be 0
lane, err := s.mgr.AllocateLane(s.ch)
@@ -567,15 +413,14 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) {
toAcct := tutils.NewActorAddr(t, "toAct")
mock := newMockManagerAPI()
- mock.setAccountState(fromAcct, account.State{Address: from})
- mock.setAccountState(toAcct, account.State{Address: to})
+ mock.setAccountAddress(fromAcct, from)
+ mock.setAccountAddress(toAcct, to)
mock.addWalletAddress(to)
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
// Create a channel that will be retrieved from state
actorBalance := big.NewInt(10)
- toSend := big.NewInt(1)
act := &types.Actor{
Code: builtin.AccountActorCodeID,
@@ -584,16 +429,7 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) {
Balance: actorBalance,
}
- arr, err := adt.MakeEmptyArray(mock.store).Root()
- require.NoError(t, err)
- mock.setPaychState(ch, act, paych.State{
- From: fromAcct,
- To: toAcct,
- ToSend: toSend,
- SettlingAt: abi.ChainEpoch(0),
- MinSettleHeight: abi.ChainEpoch(0),
- LaneStates: arr,
- })
+ mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
mgr, err := newManager(store, mock)
require.NoError(t, err)
@@ -614,53 +450,6 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) {
require.EqualValues(t, 3, lane)
}
-func TestAddVoucherProof(t *testing.T) {
- ctx := context.Background()
-
- // Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
-
- nonce := uint64(1)
- voucherAmount := big.NewInt(1)
- minDelta := big.NewInt(0)
- voucherAmount = big.NewInt(2)
- voucherLane := uint64(1)
-
- // Add a voucher with no proof
- var proof []byte
- sv := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate)
- _, err := s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta)
- require.NoError(t, err)
-
- // Expect one voucher with no proof
- ci, err := s.mgr.GetChannelInfo(s.ch)
- require.NoError(t, err)
- require.Len(t, ci.Vouchers, 1)
- require.Len(t, ci.Vouchers[0].Proof, 0)
-
- // Add same voucher with no proof
- voucherLane = uint64(1)
- _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, proof, minDelta)
- require.NoError(t, err)
-
- // Expect one voucher with no proof
- ci, err = s.mgr.GetChannelInfo(s.ch)
- require.NoError(t, err)
- require.Len(t, ci.Vouchers, 1)
- require.Len(t, ci.Vouchers[0].Proof, 0)
-
- // Add same voucher with proof
- proof = []byte{1}
- _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, proof, minDelta)
- require.NoError(t, err)
-
- // Should add proof to existing voucher
- ci, err = s.mgr.GetChannelInfo(s.ch)
- require.NoError(t, err)
- require.Len(t, ci.Vouchers, 1)
- require.Len(t, ci.Vouchers[0].Proof, 1)
-}
-
func TestAddVoucherInboundWalletKey(t *testing.T) {
ctx := context.Background()
@@ -681,19 +470,11 @@ func TestAddVoucherInboundWalletKey(t *testing.T) {
}
mock := newMockManagerAPI()
- arr, err := adt.MakeEmptyArray(mock.store).Root()
- require.NoError(t, err)
- mock.setAccountState(fromAcct, account.State{Address: from})
- mock.setAccountState(toAcct, account.State{Address: to})
- mock.setPaychState(ch, act, paych.State{
- From: fromAcct,
- To: toAcct,
- ToSend: types.NewInt(0),
- SettlingAt: abi.ChainEpoch(0),
- MinSettleHeight: abi.ChainEpoch(0),
- LaneStates: arr,
- })
+ mock.setAccountAddress(fromAcct, from)
+ mock.setAccountAddress(toAcct, to)
+
+ mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
// Create a manager
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
@@ -728,7 +509,7 @@ func TestBestSpendable(t *testing.T) {
ctx := context.Background()
// Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
+ s := testSetupMgrWithChannel(t)
// Add vouchers to lane 1 with amounts: [1, 2, 3]
voucherLane := uint64(1)
@@ -808,7 +589,7 @@ func TestCheckSpendable(t *testing.T) {
ctx := context.Background()
// Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
+ s := testSetupMgrWithChannel(t)
// Create voucher with Extra
voucherLane := uint64(1)
@@ -816,10 +597,9 @@ func TestCheckSpendable(t *testing.T) {
voucherAmount := big.NewInt(1)
voucher := createTestVoucherWithExtra(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate)
- // Add voucher with proof
+ // Add voucher
minDelta := big.NewInt(0)
- proof := []byte("proof")
- _, err := s.mgr.AddVoucherInbound(ctx, s.ch, voucher, proof, minDelta)
+ _, err := s.mgr.AddVoucherInbound(ctx, s.ch, voucher, nil, minDelta)
require.NoError(t, err)
// Return success exit code from VM call, which indicates that voucher is
@@ -833,33 +613,17 @@ func TestCheckSpendable(t *testing.T) {
// Check that spendable is true
secret := []byte("secret")
- otherProof := []byte("other proof")
- spendable, err := s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret, otherProof)
+ spendable, err := s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret, nil)
require.NoError(t, err)
require.True(t, spendable)
- // Check that the secret and proof were passed through correctly
+ // Check that the secret was passed through correctly
lastCall := s.mock.getLastCall()
- var p paych.UpdateChannelStateParams
+ var p paych2.UpdateChannelStateParams
err = p.UnmarshalCBOR(bytes.NewReader(lastCall.Params))
require.NoError(t, err)
- require.Equal(t, otherProof, p.Proof)
require.Equal(t, secret, p.Secret)
- // Check that if no proof is supplied, the proof supplied to add voucher
- // above is used
- secret2 := []byte("secret2")
- spendable, err = s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret2, nil)
- require.NoError(t, err)
- require.True(t, spendable)
-
- lastCall = s.mock.getLastCall()
- var p2 paych.UpdateChannelStateParams
- err = p2.UnmarshalCBOR(bytes.NewReader(lastCall.Params))
- require.NoError(t, err)
- require.Equal(t, proof, p2.Proof)
- require.Equal(t, secret2, p2.Secret)
-
// Check that if VM call returns non-success exit code, spendable is false
s.mock.setCallResponse(&api.InvocResult{
MsgRct: &types.MessageReceipt{
@@ -889,7 +653,7 @@ func TestSubmitVoucher(t *testing.T) {
ctx := context.Background()
// Set up a manager with a single payment channel
- s := testSetupMgrWithChannel(ctx, t)
+ s := testSetupMgrWithChannel(t)
// Create voucher with Extra
voucherLane := uint64(1)
@@ -897,73 +661,48 @@ func TestSubmitVoucher(t *testing.T) {
voucherAmount := big.NewInt(1)
voucher := createTestVoucherWithExtra(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate)
- // Add voucher with proof
+ // Add voucher
minDelta := big.NewInt(0)
- addVoucherProof := []byte("proof")
- _, err := s.mgr.AddVoucherInbound(ctx, s.ch, voucher, addVoucherProof, minDelta)
+ _, err := s.mgr.AddVoucherInbound(ctx, s.ch, voucher, nil, minDelta)
require.NoError(t, err)
// Submit voucher
secret := []byte("secret")
- submitProof := []byte("submit proof")
- submitCid, err := s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret, submitProof)
+ submitCid, err := s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret, nil)
require.NoError(t, err)
- // Check that the secret and proof were passed through correctly
+ // Check that the secret was passed through correctly
msg := s.mock.pushedMessages(submitCid)
- var p paych.UpdateChannelStateParams
+ var p paych2.UpdateChannelStateParams
err = p.UnmarshalCBOR(bytes.NewReader(msg.Message.Params))
require.NoError(t, err)
- require.Equal(t, submitProof, p.Proof)
require.Equal(t, secret, p.Secret)
- // Check that if no proof is supplied to submit voucher, the proof supplied
- // to add voucher is used
- nonce++
- voucherAmount = big.NewInt(2)
- addVoucherProof2 := []byte("proof2")
- secret2 := []byte("secret2")
- voucher = createTestVoucherWithExtra(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate)
- _, err = s.mgr.AddVoucherInbound(ctx, s.ch, voucher, addVoucherProof2, minDelta)
- require.NoError(t, err)
-
- submitCid, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret2, nil)
- require.NoError(t, err)
-
- msg = s.mock.pushedMessages(submitCid)
- var p2 paych.UpdateChannelStateParams
- err = p2.UnmarshalCBOR(bytes.NewReader(msg.Message.Params))
- require.NoError(t, err)
- require.Equal(t, addVoucherProof2, p2.Proof)
- require.Equal(t, secret2, p2.Secret)
-
// Submit a voucher without first adding it
nonce++
voucherAmount = big.NewInt(3)
secret3 := []byte("secret2")
- proof3 := []byte("proof3")
voucher = createTestVoucherWithExtra(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate)
- submitCid, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret3, proof3)
+ submitCid, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret3, nil)
require.NoError(t, err)
msg = s.mock.pushedMessages(submitCid)
- var p3 paych.UpdateChannelStateParams
+ var p3 paych2.UpdateChannelStateParams
err = p3.UnmarshalCBOR(bytes.NewReader(msg.Message.Params))
require.NoError(t, err)
- require.Equal(t, proof3, p3.Proof)
require.Equal(t, secret3, p3.Secret)
// Verify that vouchers are marked as submitted
vis, err := s.mgr.ListVouchers(ctx, s.ch)
require.NoError(t, err)
- require.Len(t, vis, 3)
+ require.Len(t, vis, 2)
for _, vi := range vis {
require.True(t, vi.Submitted)
}
// Attempting to submit the same voucher again should fail
- _, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret2, nil)
+ _, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret3, nil)
require.Error(t, err)
}
@@ -976,7 +715,7 @@ type testScaffold struct {
fromKeyPrivate []byte
}
-func testSetupMgrWithChannel(ctx context.Context, t *testing.T) *testScaffold {
+func testSetupMgrWithChannel(t *testing.T) *testScaffold {
fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t)
ch := tutils.NewIDAddr(t, 100)
@@ -986,10 +725,8 @@ func testSetupMgrWithChannel(ctx context.Context, t *testing.T) *testScaffold {
toAcct := tutils.NewActorAddr(t, "toAct")
mock := newMockManagerAPI()
- arr, err := adt.MakeEmptyArray(mock.store).Root()
- require.NoError(t, err)
- mock.setAccountState(fromAcct, account.State{Address: from})
- mock.setAccountState(toAcct, account.State{Address: to})
+ mock.setAccountAddress(fromAcct, from)
+ mock.setAccountAddress(toAcct, to)
// Create channel in state
balance := big.NewInt(20)
@@ -999,14 +736,7 @@ func testSetupMgrWithChannel(ctx context.Context, t *testing.T) *testScaffold {
Nonce: 0,
Balance: balance,
}
- mock.setPaychState(ch, act, paych.State{
- From: fromAcct,
- To: toAcct,
- ToSend: big.NewInt(0),
- SettlingAt: abi.ChainEpoch(0),
- MinSettleHeight: abi.ChainEpoch(0),
- LaneStates: arr,
- })
+ mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
mgr, err := newManager(store, mock)
@@ -1043,8 +773,8 @@ func testGenerateKeyPair(t *testing.T) ([]byte, []byte) {
return priv, pub
}
-func createTestVoucher(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paych.SignedVoucher {
- sv := &paych.SignedVoucher{
+func createTestVoucher(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paych2.SignedVoucher {
+ sv := &paych2.SignedVoucher{
ChannelAddr: ch,
Lane: voucherLane,
Nonce: nonce,
@@ -1059,13 +789,13 @@ func createTestVoucher(t *testing.T, ch address.Address, voucherLane uint64, non
return sv
}
-func createTestVoucherWithExtra(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paych.SignedVoucher {
- sv := &paych.SignedVoucher{
+func createTestVoucherWithExtra(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paych2.SignedVoucher {
+ sv := &paych2.SignedVoucher{
ChannelAddr: ch,
Lane: voucherLane,
Nonce: nonce,
Amount: voucherAmount,
- Extra: &paych.ModVerifyParams{
+ Extra: &paych2.ModVerifyParams{
Actor: tutils.NewActorAddr(t, "act"),
},
}
@@ -1083,13 +813,13 @@ type mockBestSpendableAPI struct {
mgr *Manager
}
-func (m *mockBestSpendableAPI) PaychVoucherList(ctx context.Context, ch address.Address) ([]*paych.SignedVoucher, error) {
+func (m *mockBestSpendableAPI) PaychVoucherList(ctx context.Context, ch address.Address) ([]*paych2.SignedVoucher, error) {
vi, err := m.mgr.ListVouchers(ctx, ch)
if err != nil {
return nil, err
}
- out := make([]*paych.SignedVoucher, len(vi))
+ out := make([]*paych2.SignedVoucher, len(vi))
for k, v := range vi {
out[k] = v.Voucher
}
@@ -1097,7 +827,7 @@ func (m *mockBestSpendableAPI) PaychVoucherList(ctx context.Context, ch address.
return out, nil
}
-func (m *mockBestSpendableAPI) PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, voucher *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) {
+func (m *mockBestSpendableAPI) PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, voucher *paych2.SignedVoucher, secret []byte, proof []byte) (bool, error) {
return m.mgr.CheckVoucherSpendable(ctx, ch, voucher, secret, proof)
}
diff --git a/paychmgr/paychget_test.go b/paychmgr/paychget_test.go
index 8eff08bdd..9f19dd13d 100644
--- a/paychmgr/paychget_test.go
+++ b/paychmgr/paychget_test.go
@@ -6,33 +6,27 @@ import (
"testing"
"time"
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
-
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
-
cborrpc "github.com/filecoin-project/go-cbor-util"
-
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
-
- "github.com/filecoin-project/lotus/chain/types"
-
- "github.com/filecoin-project/go-address"
-
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- tutils "github.com/filecoin-project/specs-actors/support/testing"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-datastore/sync"
-
"github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
+ tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
+
+ lotusinit "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ paychmock "github.com/filecoin-project/lotus/chain/actors/builtin/paych/mock"
+ "github.com/filecoin-project/lotus/chain/types"
)
func testChannelResponse(t *testing.T, ch address.Address) types.MessageReceipt {
- createChannelRet := init_.ExecReturn{
+ createChannelRet := init2.ExecReturn{
IDAddress: ch,
RobustAddress: ch,
}
@@ -67,7 +61,7 @@ func TestPaychGetCreateChannelMsg(t *testing.T) {
pushedMsg := mock.pushedMessages(mcid)
require.Equal(t, from, pushedMsg.Message.From)
- require.Equal(t, builtin.InitActorAddr, pushedMsg.Message.To)
+ require.Equal(t, lotusinit.Address, pushedMsg.Message.To)
require.Equal(t, amt, pushedMsg.Message.Value)
}
@@ -719,7 +713,7 @@ func TestPaychGetMergeAddFunds(t *testing.T) {
// Check create message amount is correct
createMsg := mock.pushedMessages(createMsgCid)
require.Equal(t, from, createMsg.Message.From)
- require.Equal(t, builtin.InitActorAddr, createMsg.Message.To)
+ require.Equal(t, lotusinit.Address, createMsg.Message.To)
require.Equal(t, createAmt, createMsg.Message.Value)
// Check merged add funds amount is the sum of the individual
@@ -815,7 +809,7 @@ func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) {
// Check create message amount is correct
createMsg := mock.pushedMessages(createMsgCid)
require.Equal(t, from, createMsg.Message.From)
- require.Equal(t, builtin.InitActorAddr, createMsg.Message.To)
+ require.Equal(t, lotusinit.Address, createMsg.Message.To)
require.Equal(t, createAmt, createMsg.Message.Value)
// Check merged add funds amount only includes the second add funds amount
@@ -897,7 +891,7 @@ func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) {
// Check create message amount is correct
createMsg := mock.pushedMessages(createMsgCid)
require.Equal(t, from, createMsg.Message.From)
- require.Equal(t, builtin.InitActorAddr, createMsg.Message.To)
+ require.Equal(t, lotusinit.Address, createMsg.Message.To)
require.Equal(t, createAmt, createMsg.Message.Value)
}
@@ -921,7 +915,7 @@ func TestPaychAvailableFunds(t *testing.T) {
require.NoError(t, err)
// No channel created yet so available funds should be all zeroes
- av, err := mgr.AvailableFunds(from, to)
+ av, err := mgr.AvailableFundsByFromTo(from, to)
require.NoError(t, err)
require.Nil(t, av.Channel)
require.Nil(t, av.PendingWaitSentinel)
@@ -936,7 +930,7 @@ func TestPaychAvailableFunds(t *testing.T) {
require.NoError(t, err)
// Available funds should reflect create channel message sent
- av, err = mgr.AvailableFunds(from, to)
+ av, err = mgr.AvailableFundsByFromTo(from, to)
require.NoError(t, err)
require.Nil(t, av.Channel)
require.EqualValues(t, 0, av.ConfirmedAmt.Int64())
@@ -964,7 +958,7 @@ func TestPaychAvailableFunds(t *testing.T) {
waitForQueueSize(t, mgr, from, to, 1)
// Available funds should now include queued funds
- av, err = mgr.AvailableFunds(from, to)
+ av, err = mgr.AvailableFundsByFromTo(from, to)
require.NoError(t, err)
require.Nil(t, av.Channel)
require.NotNil(t, av.PendingWaitSentinel)
@@ -976,25 +970,15 @@ func TestPaychAvailableFunds(t *testing.T) {
require.EqualValues(t, 0, av.VoucherReedeemedAmt.Int64())
// Create channel in state
- arr, err := adt.MakeEmptyArray(mock.store).Root()
- require.NoError(t, err)
- mock.setAccountState(fromAcct, account.State{Address: from})
- mock.setAccountState(toAcct, account.State{Address: to})
+ mock.setAccountAddress(fromAcct, from)
+ mock.setAccountAddress(toAcct, to)
act := &types.Actor{
Code: builtin.AccountActorCodeID,
Head: cid.Cid{},
Nonce: 0,
Balance: createAmt,
}
- mock.setPaychState(ch, act, paych.State{
- From: fromAcct,
- To: toAcct,
- ToSend: big.NewInt(0),
- SettlingAt: abi.ChainEpoch(0),
- MinSettleHeight: abi.ChainEpoch(0),
- LaneStates: arr,
- })
-
+ mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
// Send create channel response
response := testChannelResponse(t, ch)
mock.receiveMsgResponse(createMsgCid, response)
@@ -1009,7 +993,7 @@ func TestPaychAvailableFunds(t *testing.T) {
// Available funds should now include the channel and also a wait sentinel
// for the add funds message
- av, err = mgr.AvailableFunds(from, to)
+ av, err = mgr.AvailableFunds(ch)
require.NoError(t, err)
require.NotNil(t, av.Channel)
require.NotNil(t, av.PendingWaitSentinel)
@@ -1031,7 +1015,7 @@ func TestPaychAvailableFunds(t *testing.T) {
require.NoError(t, err)
// Available funds should no longer have a wait sentinel
- av, err = mgr.AvailableFunds(from, to)
+ av, err = mgr.AvailableFunds(ch)
require.NoError(t, err)
require.NotNil(t, av.Channel)
require.Nil(t, av.PendingWaitSentinel)
@@ -1052,7 +1036,7 @@ func TestPaychAvailableFunds(t *testing.T) {
_, err = mgr.AddVoucherOutbound(ctx, ch, voucher, nil, types.NewInt(0))
require.NoError(t, err)
- av, err = mgr.AvailableFunds(from, to)
+ av, err = mgr.AvailableFunds(ch)
require.NoError(t, err)
require.NotNil(t, av.Channel)
require.Nil(t, av.PendingWaitSentinel)
diff --git a/paychmgr/paychvoucherfunds_test.go b/paychmgr/paychvoucherfunds_test.go
new file mode 100644
index 000000000..dcbb4acc9
--- /dev/null
+++ b/paychmgr/paychvoucherfunds_test.go
@@ -0,0 +1,103 @@
+package paychmgr
+
+import (
+ "context"
+ "testing"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ paychmock "github.com/filecoin-project/lotus/chain/actors/builtin/paych/mock"
+
+ "github.com/filecoin-project/lotus/chain/types"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+ tutils "github.com/filecoin-project/specs-actors/support/testing"
+ "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ ds_sync "github.com/ipfs/go-datastore/sync"
+ "github.com/stretchr/testify/require"
+)
+
+// TestPaychAddVoucherAfterAddFunds tests adding a voucher to a channel with
+// insufficient funds, then adding funds to the channel, then adding the
+// voucher again
+func TestPaychAddVoucherAfterAddFunds(t *testing.T) {
+ ctx := context.Background()
+ store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
+
+ fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t)
+ ch := tutils.NewIDAddr(t, 100)
+ from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic))
+ to := tutils.NewSECP256K1Addr(t, "secpTo")
+ fromAcct := tutils.NewActorAddr(t, "fromAct")
+ toAcct := tutils.NewActorAddr(t, "toAct")
+
+ mock := newMockManagerAPI()
+ defer mock.close()
+
+ // Add the from signing key to the wallet
+ mock.setAccountAddress(fromAcct, from)
+ mock.setAccountAddress(toAcct, to)
+ mock.addSigningKey(fromKeyPrivate)
+
+ mgr, err := newManager(store, mock)
+ require.NoError(t, err)
+
+ // Send create message for a channel with value 10
+ createAmt := big.NewInt(10)
+ _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt)
+ require.NoError(t, err)
+
+ // Send create channel response
+ response := testChannelResponse(t, ch)
+ mock.receiveMsgResponse(createMsgCid, response)
+
+ // Create an actor in state for the channel with the initial channel balance
+ act := &types.Actor{
+ Code: builtin.AccountActorCodeID,
+ Head: cid.Cid{},
+ Nonce: 0,
+ Balance: createAmt,
+ }
+ mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
+
+ // Wait for create response to be processed by manager
+ _, err = mgr.GetPaychWaitReady(ctx, createMsgCid)
+ require.NoError(t, err)
+
+ // Create a voucher with a value equal to the channel balance
+ voucher := paych.SignedVoucher{Amount: createAmt, Lane: 1}
+ res, err := mgr.CreateVoucher(ctx, ch, voucher)
+ require.NoError(t, err)
+ require.NotNil(t, res.Voucher)
+
+ // Create a voucher in a different lane with an amount that exceeds the
+ // channel balance
+ excessAmt := types.NewInt(5)
+ voucher = paych.SignedVoucher{Amount: excessAmt, Lane: 2}
+ res, err = mgr.CreateVoucher(ctx, ch, voucher)
+ require.NoError(t, err)
+ require.Nil(t, res.Voucher)
+ require.Equal(t, res.Shortfall, excessAmt)
+
+ // Add funds so as to cover the voucher shortfall
+ _, addFundsMsgCid, err := mgr.GetPaych(ctx, from, to, excessAmt)
+ require.NoError(t, err)
+
+ // Trigger add funds confirmation
+ mock.receiveMsgResponse(addFundsMsgCid, types.MessageReceipt{ExitCode: 0})
+
+ // Update actor test case balance to reflect added funds
+ act.Balance = types.BigAdd(createAmt, excessAmt)
+
+ // Wait for add funds confirmation to be processed by manager
+ _, err = mgr.GetPaychWaitReady(ctx, addFundsMsgCid)
+ require.NoError(t, err)
+
+ // Adding same voucher that previously exceeded channel balance
+ // should succeed now that the channel balance has been increased
+ res, err = mgr.CreateVoucher(ctx, ch, voucher)
+ require.NoError(t, err)
+ require.NotNil(t, res.Voucher)
+}
diff --git a/paychmgr/settle_test.go b/paychmgr/settle_test.go
index f922dcccb..f17f961e2 100644
--- a/paychmgr/settle_test.go
+++ b/paychmgr/settle_test.go
@@ -6,7 +6,7 @@ import (
"github.com/ipfs/go-cid"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
+ "github.com/filecoin-project/go-state-types/big"
tutils "github.com/filecoin-project/specs-actors/support/testing"
ds "github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-datastore/sync"
diff --git a/paychmgr/settler/settler.go b/paychmgr/settler/settler.go
index d5f8bf54e..02fe9256e 100644
--- a/paychmgr/settler/settler.go
+++ b/paychmgr/settler/settler.go
@@ -12,12 +12,13 @@ import (
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl/full"
@@ -106,7 +107,7 @@ func (pcs *paymentChannelSettler) revertHandler(ctx context.Context, ts *types.T
func (pcs *paymentChannelSettler) matcher(msg *types.Message) (matchOnce bool, matched bool, err error) {
// Check if this is a settle payment channel message
- if msg.Method != builtin.MethodsPaych.Settle {
+ if msg.Method != builtin0.MethodsPaych.Settle {
return false, false, nil
}
// Check if this payment channel is of concern to this node (i.e. tracked in payment channel store),
diff --git a/paychmgr/simple.go b/paychmgr/simple.go
index 88d94645e..253075604 100644
--- a/paychmgr/simple.go
+++ b/paychmgr/simple.go
@@ -6,22 +6,17 @@ import (
"fmt"
"sync"
- "github.com/filecoin-project/lotus/api"
-
- "golang.org/x/sync/errgroup"
-
- "github.com/filecoin-project/specs-actors/actors/abi/big"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/ipfs/go-cid"
+ "golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
+
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -187,11 +182,11 @@ func (ca *channelAccessor) enqueue(task *fundsReq) {
defer ca.lk.Unlock()
ca.fundsReqQueue = append(ca.fundsReqQueue, task)
- go ca.processQueue() // nolint: errcheck
+ go ca.processQueue("") // nolint: errcheck
}
// Run the operations in the queue
-func (ca *channelAccessor) processQueue() (*api.ChannelAvailableFunds, error) {
+func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailableFunds, error) {
ca.lk.Lock()
defer ca.lk.Unlock()
@@ -200,7 +195,7 @@ func (ca *channelAccessor) processQueue() (*api.ChannelAvailableFunds, error) {
// If there's nothing in the queue, bail out
if len(ca.fundsReqQueue) == 0 {
- return ca.currentAvailableFunds(types.NewInt(0))
+ return ca.currentAvailableFunds(channelID, types.NewInt(0))
}
// Merge all pending requests into one.
@@ -211,7 +206,7 @@ func (ca *channelAccessor) processQueue() (*api.ChannelAvailableFunds, error) {
if amt.IsZero() {
// Note: The amount can be zero if requests are cancelled as we're
// building the mergedFundsReq
- return ca.currentAvailableFunds(amt)
+ return ca.currentAvailableFunds(channelID, amt)
}
res := ca.processTask(merged.ctx, amt)
@@ -221,7 +216,7 @@ func (ca *channelAccessor) processQueue() (*api.ChannelAvailableFunds, error) {
if res == nil {
// Stop processing the fundsReqQueue and wait. When the event occurs it will
// call processQueue() again
- return ca.currentAvailableFunds(amt)
+ return ca.currentAvailableFunds(channelID, amt)
}
// Finished processing so clear the queue
@@ -230,7 +225,7 @@ func (ca *channelAccessor) processQueue() (*api.ChannelAvailableFunds, error) {
// Call the task callback with its results
merged.onComplete(res)
- return ca.currentAvailableFunds(types.NewInt(0))
+ return ca.currentAvailableFunds(channelID, types.NewInt(0))
}
// filterQueue filters cancelled requests out of the queue
@@ -283,25 +278,16 @@ func (ca *channelAccessor) msgWaitComplete(mcid cid.Cid, err error) {
// The queue may have been waiting for msg completion to proceed, so
// process the next queue item
if len(ca.fundsReqQueue) > 0 {
- go ca.processQueue() // nolint: errcheck
+ go ca.processQueue("") // nolint: errcheck
}
}
-func (ca *channelAccessor) currentAvailableFunds(queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) {
- channelInfo, err := ca.store.OutboundActiveByFromTo(ca.from, ca.to)
- if err == ErrChannelNotTracked {
- // If the channel does not exist we still want to return an empty
- // ChannelAvailableFunds, so that clients can check for the existence
- // of a channel between from / to without getting an error.
- return &api.ChannelAvailableFunds{
- Channel: nil,
- ConfirmedAmt: types.NewInt(0),
- PendingAmt: types.NewInt(0),
- PendingWaitSentinel: nil,
- QueuedAmt: queuedAmt,
- VoucherReedeemedAmt: types.NewInt(0),
- }, nil
+func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) {
+ if len(channelID) == 0 {
+ return nil, nil
}
+
+ channelInfo, err := ca.store.ByChannelID(channelID)
if err != nil {
return nil, err
}
@@ -323,18 +309,24 @@ func (ca *channelAccessor) currentAvailableFunds(queuedAmt types.BigInt) (*api.C
return nil, err
}
- laneStates, err := ca.laneState(ca.chctx, pchState, ch)
+ laneStates, err := ca.laneState(pchState, ch)
if err != nil {
return nil, err
}
for _, ls := range laneStates {
- totalRedeemed = types.BigAdd(totalRedeemed, ls.Redeemed)
+ r, err := ls.Redeemed()
+ if err != nil {
+ return nil, err
+ }
+ totalRedeemed = types.BigAdd(totalRedeemed, r)
}
}
return &api.ChannelAvailableFunds{
Channel: channelInfo.Channel,
+ From: channelInfo.from(),
+ To: channelInfo.to(),
ConfirmedAmt: channelInfo.Amount,
PendingAmt: channelInfo.PendingAmount,
PendingWaitSentinel: waitSentinel,
@@ -392,25 +384,13 @@ func (ca *channelAccessor) processTask(ctx context.Context, amt types.BigInt) *p
// createPaych sends a message to create the channel and returns the message cid
func (ca *channelAccessor) createPaych(ctx context.Context, amt types.BigInt) (cid.Cid, error) {
- params, aerr := actors.SerializeParams(&paych.ConstructorParams{From: ca.from, To: ca.to})
- if aerr != nil {
- return cid.Undef, aerr
+ mb, err := ca.messageBuilder(ctx, ca.from)
+ if err != nil {
+ return cid.Undef, err
}
-
- enc, aerr := actors.SerializeParams(&init_.ExecParams{
- CodeCID: builtin.PaymentChannelActorCodeID,
- ConstructorParams: params,
- })
- if aerr != nil {
- return cid.Undef, aerr
- }
-
- msg := &types.Message{
- To: builtin.InitActorAddr,
- From: ca.from,
- Value: amt,
- Method: builtin.MethodsInit.Exec,
- Params: enc,
+ msg, err := mb.Create(ca.to, amt)
+ if err != nil {
+ return cid.Undef, err
}
smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil)
@@ -462,7 +442,10 @@ func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) er
return err
}
- var decodedReturn init_.ExecReturn
+ // TODO: ActorUpgrade abstract over this.
+ // This "works" because it hasn't changed from v0 to v2, but we still
+ // need an abstraction here.
+ var decodedReturn init2.ExecReturn
err = decodedReturn.UnmarshalCBOR(bytes.NewReader(mwait.Receipt.Return))
if err != nil {
log.Error(err)
@@ -713,6 +696,6 @@ func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan on
return promise
}
-func (ca *channelAccessor) availableFunds() (*api.ChannelAvailableFunds, error) {
- return ca.processQueue()
+func (ca *channelAccessor) availableFunds(channelID string) (*api.ChannelAvailableFunds, error) {
+ return ca.processQueue(channelID)
}
diff --git a/paychmgr/state.go b/paychmgr/state.go
index 00fe2adce..65963d2a0 100644
--- a/paychmgr/state.go
+++ b/paychmgr/state.go
@@ -3,13 +3,9 @@ package paychmgr
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
-
- "github.com/filecoin-project/specs-actors/actors/builtin/account"
-
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -17,14 +13,8 @@ type stateAccessor struct {
sm stateManagerAPI
}
-func (ca *stateAccessor) loadPaychActorState(ctx context.Context, ch address.Address) (*types.Actor, *paych.State, error) {
- var pcast paych.State
- act, err := ca.sm.LoadActorState(ctx, ch, &pcast, nil)
- if err != nil {
- return nil, nil, err
- }
-
- return act, &pcast, nil
+func (ca *stateAccessor) loadPaychActorState(ctx context.Context, ch address.Address) (*types.Actor, paych.State, error) {
+ return ca.sm.GetPaychState(ctx, ch, nil)
}
func (ca *stateAccessor) loadStateChannelInfo(ctx context.Context, ch address.Address, dir uint64) (*ChannelInfo, error) {
@@ -33,17 +23,23 @@ func (ca *stateAccessor) loadStateChannelInfo(ctx context.Context, ch address.Ad
return nil, err
}
- var account account.State
- _, err = ca.sm.LoadActorState(ctx, st.From, &account, nil)
+ // Load channel "From" account actor state
+ f, err := st.From()
if err != nil {
return nil, err
}
- from := account.Address
- _, err = ca.sm.LoadActorState(ctx, st.To, &account, nil)
+ from, err := ca.sm.ResolveToKeyAddress(ctx, f, nil)
+ if err != nil {
+ return nil, err
+ }
+ t, err := st.To()
+ if err != nil {
+ return nil, err
+ }
+ to, err := ca.sm.ResolveToKeyAddress(ctx, t, nil)
if err != nil {
return nil, err
}
- to := account.Address
nextLane, err := ca.nextLaneFromState(ctx, st)
if err != nil {
@@ -67,25 +63,24 @@ func (ca *stateAccessor) loadStateChannelInfo(ctx context.Context, ch address.Ad
return ci, nil
}
-func (ca *stateAccessor) nextLaneFromState(ctx context.Context, st *paych.State) (uint64, error) {
- store := ca.sm.AdtStore(ctx)
- laneStates, err := adt.AsArray(store, st.LaneStates)
+func (ca *stateAccessor) nextLaneFromState(ctx context.Context, st paych.State) (uint64, error) {
+ laneCount, err := st.LaneCount()
if err != nil {
return 0, err
}
- if laneStates.Length() == 0 {
+ if laneCount == 0 {
return 0, nil
}
- maxID := int64(0)
- if err := laneStates.ForEach(nil, func(i int64) error {
- if i > maxID {
- maxID = i
+ maxID := uint64(0)
+ if err := st.ForEachLaneState(func(idx uint64, _ paych.LaneState) error {
+ if idx > maxID {
+ maxID = idx
}
return nil
}); err != nil {
return 0, err
}
- return uint64(maxID + 1), nil
+ return maxID + 1, nil
}
diff --git a/paychmgr/store.go b/paychmgr/store.go
index 4a5a4f49f..a17ad1fcd 100644
--- a/paychmgr/store.go
+++ b/paychmgr/store.go
@@ -12,7 +12,6 @@ import (
"github.com/filecoin-project/lotus/chain/types"
cborutil "github.com/filecoin-project/go-cbor-util"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
@@ -21,6 +20,7 @@ import (
"github.com/filecoin-project/go-address"
cborrpc "github.com/filecoin-project/go-cbor-util"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -49,7 +49,7 @@ const (
type VoucherInfo struct {
Voucher *paych.SignedVoucher
- Proof []byte
+ Proof []byte // ignored
Submitted bool
}
@@ -86,6 +86,20 @@ type ChannelInfo struct {
Settling bool
}
+func (ci *ChannelInfo) from() address.Address {
+ if ci.Direction == DirOutbound {
+ return ci.Control
+ }
+ return ci.Target
+}
+
+func (ci *ChannelInfo) to() address.Address {
+ if ci.Direction == DirOutbound {
+ return ci.Target
+ }
+ return ci.Control
+}
+
// infoForVoucher gets the VoucherInfo for the given voucher.
// returns nil if the channel doesn't have the voucher.
func (ci *ChannelInfo) infoForVoucher(sv *paych.SignedVoucher) (*VoucherInfo, error) {
diff --git a/paychmgr/store_test.go b/paychmgr/store_test.go
index 36ff7a5b0..1ec8895fa 100644
--- a/paychmgr/store_test.go
+++ b/paychmgr/store_test.go
@@ -53,8 +53,12 @@ func TestStore(t *testing.T) {
addrs, err = store.ListChannels()
require.NoError(t, err)
require.Len(t, addrs, 2)
- require.Contains(t, addrsStrings(addrs), "t0100")
- require.Contains(t, addrsStrings(addrs), "t0200")
+ t0100, err := address.NewIDAddress(100)
+ require.NoError(t, err)
+ t0200, err := address.NewIDAddress(200)
+ require.NoError(t, err)
+ require.Contains(t, addrs, t0100)
+ require.Contains(t, addrs, t0200)
// Request vouchers for channel
vouchers, err := store.VouchersForPaych(*ci.Channel)
@@ -79,11 +83,3 @@ func TestStore(t *testing.T) {
_, err = store.AllocateLane(tutils.NewIDAddr(t, 300))
require.Equal(t, err, ErrChannelNotTracked)
}
-
-func addrsStrings(addrs []address.Address) []string {
- str := make([]string, len(addrs))
- for i, a := range addrs {
- str[i] = a.String()
- }
- return str
-}
diff --git a/paychmgr/util.go b/paychmgr/util.go
index 0509f8a24..8e5dd4fab 100644
--- a/paychmgr/util.go
+++ b/paychmgr/util.go
@@ -4,7 +4,8 @@ import (
"context"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
)
type BestSpendableAPI interface {
diff --git a/scripts/lotus-miner.service b/scripts/lotus-miner.service
index c079f44a9..54c48d411 100644
--- a/scripts/lotus-miner.service
+++ b/scripts/lotus-miner.service
@@ -2,7 +2,7 @@
Description=Lotus Miner
After=network.target
After=lotus-daemon.service
-Requires=lotus-daemon.service
+Wants=lotus-daemon.service
[Service]
ExecStart=/usr/local/bin/lotus-miner run
diff --git a/storage/adapter_events.go b/storage/adapter_events.go
index 42622e855..ff69c1e51 100644
--- a/storage/adapter_events.go
+++ b/storage/adapter_events.go
@@ -3,7 +3,7 @@ package storage
import (
"context"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/types"
diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go
index 0963b07e6..380fb4471 100644
--- a/storage/adapter_storage_miner.go
+++ b/storage/adapter_storage_miner.go
@@ -4,23 +4,26 @@ import (
"bytes"
"context"
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
@@ -82,7 +85,7 @@ func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr ad
return mi.Worker, nil
}
-func (s SealingAPIAdapter) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]*miner.Deadline, error) {
+func (s SealingAPIAdapter) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]api.Deadline, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
@@ -135,7 +138,7 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr
return cid.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
}
- ccparams, err := actors.SerializeParams(&market.ComputeDataCommitmentParams{
+ ccparams, err := actors.SerializeParams(&market0.ComputeDataCommitmentParams{
DealIDs: deals,
SectorType: sectorType,
})
@@ -144,10 +147,10 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr
}
ccmt := &types.Message{
- To: builtin.StorageMarketActorAddr,
+ To: market.Address,
From: maddr,
Value: types.NewInt(0),
- Method: builtin.MethodsMarket.ComputeDataCommitment,
+ Method: builtin0.MethodsMarket.ComputeDataCommitment,
Params: ccparams,
}
r, err := s.delegate.StateCall(ctx, ccmt, tsk)
@@ -177,32 +180,19 @@ func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr a
return nil, xerrors.Errorf("handleSealFailed(%d): temp error: %+v", sectorNumber, err)
}
- st, err := s.delegate.ChainReadObj(ctx, act.Head)
- if err != nil {
- return nil, xerrors.Errorf("handleSealFailed(%d): temp error: %+v", sectorNumber, err)
- }
-
- var state miner.State
- if err := state.UnmarshalCBOR(bytes.NewReader(st)); err != nil {
- return nil, xerrors.Errorf("handleSealFailed(%d): temp error: unmarshaling miner state: %+v", sectorNumber, err)
- }
stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(s.delegate))
- precommits, err := adt.AsMap(stor, state.PreCommittedSectors)
+
+ state, err := miner.Load(stor, act)
if err != nil {
- return nil, err
+ return nil, xerrors.Errorf("handleSealFailed(%d): temp error: loading miner state: %+v", sectorNumber, err)
}
- var pci miner.SectorPreCommitOnChainInfo
- ok, err := precommits.Get(adt.UIntKey(uint64(sectorNumber)), &pci)
+ pci, err := state.GetPrecommittedSector(sectorNumber)
if err != nil {
return nil, err
}
- if !ok {
- var allocated abi.BitField
- if err := stor.Get(ctx, state.AllocatedSectors, &allocated); err != nil {
- return nil, xerrors.Errorf("loading allocated sector bitfield: %w", err)
- }
- set, err := allocated.IsSet(uint64(sectorNumber))
+ if pci == nil {
+ set, err := state.IsAllocated(sectorNumber)
if err != nil {
return nil, xerrors.Errorf("checking if sector is allocated: %w", err)
}
@@ -213,7 +203,7 @@ func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr a
return nil, nil
}
- return &pci, nil
+ return pci, nil
}
func (s SealingAPIAdapter) StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok sealing.TipSetToken) (*miner.SectorOnChainInfo, error) {
@@ -259,6 +249,15 @@ func (s SealingAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID ab
return deal.Proposal, nil
}
+func (s SealingAPIAdapter) StateNetworkVersion(ctx context.Context, tok sealing.TipSetToken) (network.Version, error) {
+ tsk, err := types.TipSetKeyFromBytes(tok)
+ if err != nil {
+ return network.VersionMax, err
+ }
+
+ return s.delegate.StateNetworkVersion(ctx, tsk)
+}
+
func (s SealingAPIAdapter) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) {
msg := types.Message{
To: to,
diff --git a/storage/addresses.go b/storage/addresses.go
index a1c05660f..f5640794e 100644
--- a/storage/addresses.go
+++ b/storage/addresses.go
@@ -3,12 +3,13 @@ package storage
import (
"context"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -27,7 +28,7 @@ type addrSelectApi interface {
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
}
-func AddressFor(ctx context.Context, a addrSelectApi, mi api.MinerInfo, use AddrUse, minFunds abi.TokenAmount) (address.Address, error) {
+func AddressFor(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, use AddrUse, minFunds abi.TokenAmount) (address.Address, error) {
switch use {
case PreCommitAddr, CommitAddr:
// always use worker, at least for now
@@ -60,7 +61,7 @@ func AddressFor(ctx context.Context, a addrSelectApi, mi api.MinerInfo, use Addr
return addr, nil
}
- log.Warnw("control address didn't have enough funds for PoSt message", "address", addr, "required", types.FIL(minFunds), "balance", types.FIL(b))
+ log.Warnw("control address didn't have enough funds for window post message", "address", addr, "required", types.FIL(minFunds), "balance", types.FIL(b))
}
// Try to use the owner account if we can, fallback to worker if we can't
diff --git a/storage/miner.go b/storage/miner.go
index eb548eb78..c1b50fe89 100644
--- a/storage/miner.go
+++ b/storage/miner.go
@@ -5,6 +5,15 @@ import (
"errors"
"time"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/go-state-types/dline"
+
+ "github.com/filecoin-project/go-bitfield"
+
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
@@ -12,19 +21,20 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
+ "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -45,19 +55,30 @@ type Miner struct {
getSealConfig dtypes.GetSealingConfigFunc
sealing *sealing.Sealing
+
+ sealingEvtType journal.EventType
+}
+
+// SealingStateEvt is a journal event that records a sector state transition.
+type SealingStateEvt struct {
+ SectorNumber abi.SectorNumber
+ SectorType abi.RegisteredSealProof
+ From sealing.SectorState
+ After sealing.SectorState
+ Error string
}
type storageMinerApi interface {
// Call a read only method on actors (no interaction with the chain required)
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error)
- StateMinerDeadlines(ctx context.Context, maddr address.Address, tok types.TipSetKey) ([]*miner.Deadline, error)
- StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error)
- StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error)
+ StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
- StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*api.SectorLocation, error)
- StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
- StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error)
+ StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
+ StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
+ StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error)
+ StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error)
+ StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error)
@@ -65,9 +86,10 @@ type storageMinerApi interface {
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error)
- StateMinerFaults(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
- StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
+ StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
+ StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
+ StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)
@@ -98,9 +120,10 @@ func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, d
sc: sc,
verif: verif,
- maddr: maddr,
- worker: worker,
- getSealConfig: gsd,
+ maddr: maddr,
+ worker: worker,
+ getSealConfig: gsd,
+ sealingEvtType: journal.J.RegisterEventType("storage", "sealing_states"),
}
return m, nil
@@ -123,14 +146,27 @@ func (m *Miner) Run(ctx context.Context) error {
evts := events.NewEvents(ctx, m.api)
adaptedAPI := NewSealingAPIAdapter(m.api)
- pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, miner.MaxSectorExpirationExtension-(miner.WPoStProvingPeriod*2), md.PeriodStart%miner.WPoStProvingPeriod)
- m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig))
+ // TODO: Maybe we update this policy after actor upgrades?
+ pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, miner0.MaxSectorExpirationExtension-(miner0.WPoStProvingPeriod*2), md.PeriodStart%miner0.WPoStProvingPeriod)
+ m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications)
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function
return nil
}
+func (m *Miner) handleSealingNotifications(before, after sealing.SectorInfo) {
+ journal.J.RecordEvent(m.sealingEvtType, func() interface{} {
+ return SealingStateEvt{
+ SectorNumber: before.SectorNumber,
+ SectorType: before.SectorType,
+ From: before.State,
+ After: after.State,
+ Error: after.LastErr,
+ }
+ })
+}
+
func (m *Miner) Stop(ctx context.Context) error {
return m.sealing.Stop(ctx)
}
@@ -199,9 +235,9 @@ func (wpp *StorageWpp) GenerateCandidates(ctx context.Context, randomness abi.Po
return cds, nil
}
-func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []abi.SectorInfo, rand abi.PoStRandomness) ([]abi.PoStProof, error) {
+func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []proof0.SectorInfo, rand abi.PoStRandomness) ([]proof0.PoStProof, error) {
if build.InsecurePoStValidation {
- return []abi.PoStProof{{ProofBytes: []byte("valid proof")}}, nil
+ return []proof0.PoStProof{{ProofBytes: []byte("valid proof")}}, nil
}
log.Infof("Computing WinningPoSt ;%+v; %v", ssi, rand)
diff --git a/storage/mockstorage/preseal.go b/storage/mockstorage/preseal.go
index fd4d0d69b..8ca789ba6 100644
--- a/storage/mockstorage/preseal.go
+++ b/storage/mockstorage/preseal.go
@@ -5,11 +5,12 @@ import (
"github.com/filecoin-project/go-address"
commcid "github.com/filecoin-project/go-fil-commcid"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
@@ -48,7 +49,7 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis
r := mock.CommDR(d)
preseal.CommR, _ = commcid.ReplicaCommitmentV1ToCID(r[:])
preseal.SectorID = abi.SectorNumber(i + 1)
- preseal.Deal = market.DealProposal{
+ preseal.Deal = market0.DealProposal{
PieceCID: preseal.CommD,
PieceSize: abi.PaddedPieceSize(ssize),
Client: k.Address,
diff --git a/storage/sealing.go b/storage/sealing.go
index 7d7140b98..2cd454e5b 100644
--- a/storage/sealing.go
+++ b/storage/sealing.go
@@ -5,7 +5,7 @@ import (
"io"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
)
diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go
index b88ebcbae..bc8456a1f 100644
--- a/storage/sectorblocks/blocks.go
+++ b/storage/sectorblocks/blocks.go
@@ -15,8 +15,8 @@ import (
"golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util"
+ "github.com/filecoin-project/go-state-types/abi"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
- "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/node/modules/dtypes"
diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go
new file mode 100644
index 000000000..285995757
--- /dev/null
+++ b/storage/wdpost_changehandler.go
@@ -0,0 +1,537 @@
+package storage
+
+import (
+ "context"
+ "sync"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+const SubmitConfidence = 4
+
+type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error)
+type CompleteSubmitPoSTCb func(err error)
+
+type changeHandlerAPI interface {
+ StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
+ startGeneratePoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, onComplete CompleteGeneratePoSTCb) context.CancelFunc
+ startSubmitPoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, posts []miner.SubmitWindowedPoStParams, onComplete CompleteSubmitPoSTCb) context.CancelFunc
+ onAbort(ts *types.TipSet, deadline *dline.Info)
+ failPost(err error, ts *types.TipSet, deadline *dline.Info)
+}
+
+type changeHandler struct {
+ api changeHandlerAPI
+ actor address.Address
+ proveHdlr *proveHandler
+ submitHdlr *submitHandler
+}
+
+func newChangeHandler(api changeHandlerAPI, actor address.Address) *changeHandler {
+ posts := newPostsCache()
+ p := newProver(api, posts)
+ s := newSubmitter(api, posts)
+ return &changeHandler{api: api, actor: actor, proveHdlr: p, submitHdlr: s}
+}
+
+func (ch *changeHandler) start() {
+ go ch.proveHdlr.run()
+ go ch.submitHdlr.run()
+}
+
+func (ch *changeHandler) update(ctx context.Context, revert *types.TipSet, advance *types.TipSet) error {
+ // Get the current deadline period
+ di, err := ch.api.StateMinerProvingDeadline(ctx, ch.actor, advance.Key())
+ if err != nil {
+ return err
+ }
+
+ if !di.PeriodStarted() {
+ return nil // not proving anything yet
+ }
+
+ hc := &headChange{
+ ctx: ctx,
+ revert: revert,
+ advance: advance,
+ di: di,
+ }
+
+ select {
+ case ch.proveHdlr.hcs <- hc:
+ case <-ch.proveHdlr.shutdownCtx.Done():
+ case <-ctx.Done():
+ }
+
+ select {
+ case ch.submitHdlr.hcs <- hc:
+ case <-ch.submitHdlr.shutdownCtx.Done():
+ case <-ctx.Done():
+ }
+
+ return nil
+}
+
+func (ch *changeHandler) shutdown() {
+ ch.proveHdlr.shutdown()
+ ch.submitHdlr.shutdown()
+}
+
+func (ch *changeHandler) currentTSDI() (*types.TipSet, *dline.Info) {
+ return ch.submitHdlr.currentTSDI()
+}
+
+// postsCache keeps a cache of PoSTs for each proving window
+type postsCache struct {
+ added chan *postInfo
+ lk sync.RWMutex
+ cache map[abi.ChainEpoch][]miner.SubmitWindowedPoStParams
+}
+
+func newPostsCache() *postsCache {
+ return &postsCache{
+ added: make(chan *postInfo, 16),
+ cache: make(map[abi.ChainEpoch][]miner.SubmitWindowedPoStParams),
+ }
+}
+
+func (c *postsCache) add(di *dline.Info, posts []miner.SubmitWindowedPoStParams) {
+ c.lk.Lock()
+ defer c.lk.Unlock()
+
+ // TODO: clear cache entries older than chain finality
+ c.cache[di.Open] = posts
+
+ c.added <- &postInfo{
+ di: di,
+ posts: posts,
+ }
+}
+
+func (c *postsCache) get(di *dline.Info) ([]miner.SubmitWindowedPoStParams, bool) {
+ c.lk.RLock()
+ defer c.lk.RUnlock()
+
+ posts, ok := c.cache[di.Open]
+ return posts, ok
+}
+
+type headChange struct {
+ ctx context.Context
+ revert *types.TipSet
+ advance *types.TipSet
+ di *dline.Info
+}
+
+type currentPost struct {
+ di *dline.Info
+ abort context.CancelFunc
+}
+
+type postResult struct {
+ ts *types.TipSet
+ currPost *currentPost
+ posts []miner.SubmitWindowedPoStParams
+ err error
+}
+
+// proveHandler generates proofs
+type proveHandler struct {
+ api changeHandlerAPI
+ posts *postsCache
+
+ postResults chan *postResult
+ hcs chan *headChange
+
+ current *currentPost
+
+ shutdownCtx context.Context
+ shutdown context.CancelFunc
+
+ // Used for testing
+ processedHeadChanges chan *headChange
+ processedPostResults chan *postResult
+}
+
+func newProver(
+ api changeHandlerAPI,
+ posts *postsCache,
+) *proveHandler {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &proveHandler{
+ api: api,
+ posts: posts,
+ postResults: make(chan *postResult),
+ hcs: make(chan *headChange),
+ shutdownCtx: ctx,
+ shutdown: cancel,
+ }
+}
+
+func (p *proveHandler) run() {
+ // Abort proving on shutdown
+ defer func() {
+ if p.current != nil {
+ p.current.abort()
+ }
+ }()
+
+ for p.shutdownCtx.Err() == nil {
+ select {
+ case <-p.shutdownCtx.Done():
+ return
+
+ case hc := <-p.hcs:
+ // Head changed
+ p.processHeadChange(hc.ctx, hc.advance, hc.di)
+ if p.processedHeadChanges != nil {
+ p.processedHeadChanges <- hc
+ }
+
+ case res := <-p.postResults:
+ // Proof generation complete
+ p.processPostResult(res)
+ if p.processedPostResults != nil {
+ p.processedPostResults <- res
+ }
+ }
+ }
+}
+
+func (p *proveHandler) processHeadChange(ctx context.Context, newTS *types.TipSet, di *dline.Info) {
+ // If the post window has expired, abort the current proof
+ if p.current != nil && newTS.Height() >= p.current.di.Close {
+ // Cancel the context on the current proof
+ p.current.abort()
+
+ // Clear out the reference to the proof so that we can immediately
+ // start generating a new proof, without having to worry about state
+ // getting clobbered when the abort completes
+ p.current = nil
+ }
+
+ // Only generate one proof at a time
+ if p.current != nil {
+ return
+ }
+
+ // If the proof for the current post window has been generated, check the
+ // next post window
+ _, complete := p.posts.get(di)
+ for complete {
+ di = nextDeadline(di)
+ _, complete = p.posts.get(di)
+ }
+
+ // Check if the chain is above the Challenge height for the post window
+ if newTS.Height() < di.Challenge {
+ return
+ }
+
+ p.current = ¤tPost{di: di}
+ curr := p.current
+ p.current.abort = p.api.startGeneratePoST(ctx, newTS, di, func(posts []miner.SubmitWindowedPoStParams, err error) {
+ p.postResults <- &postResult{ts: newTS, currPost: curr, posts: posts, err: err}
+ })
+}
+
+func (p *proveHandler) processPostResult(res *postResult) {
+ di := res.currPost.di
+ if res.err != nil {
+ // Proving failed so inform the API
+ p.api.failPost(res.err, res.ts, di)
+ log.Warnf("Aborted window post Proving (Deadline: %+v)", di)
+ p.api.onAbort(res.ts, di)
+
+ // Check if the current post has already been aborted
+ if p.current == res.currPost {
+ // If the current post was not already aborted, setting it to nil
+ // marks it as complete so that a new post can be started
+ p.current = nil
+ }
+ return
+ }
+
+ // Completed processing this proving window
+ p.current = nil
+
+ // Add the proofs to the cache
+ p.posts.add(di, res.posts)
+}
+
+type submitResult struct {
+ pw *postWindow
+ err error
+}
+
+type SubmitState string
+
+const (
+ SubmitStateStart SubmitState = "SubmitStateStart"
+ SubmitStateSubmitting SubmitState = "SubmitStateSubmitting"
+ SubmitStateComplete SubmitState = "SubmitStateComplete"
+)
+
+type postWindow struct {
+ ts *types.TipSet
+ di *dline.Info
+ submitState SubmitState
+ abort context.CancelFunc
+}
+
+type postInfo struct {
+ di *dline.Info
+ posts []miner.SubmitWindowedPoStParams
+}
+
+// submitHandler submits proofs on-chain
+type submitHandler struct {
+ api changeHandlerAPI
+ posts *postsCache
+
+ submitResults chan *submitResult
+ hcs chan *headChange
+
+ postWindows map[abi.ChainEpoch]*postWindow
+ getPostWindowReqs chan *getPWReq
+
+ shutdownCtx context.Context
+ shutdown context.CancelFunc
+
+ currentCtx context.Context
+ currentTS *types.TipSet
+ currentDI *dline.Info
+ getTSDIReq chan chan *tsdi
+
+ // Used for testing
+ processedHeadChanges chan *headChange
+ processedSubmitResults chan *submitResult
+ processedPostReady chan *postInfo
+}
+
+func newSubmitter(
+ api changeHandlerAPI,
+ posts *postsCache,
+) *submitHandler {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &submitHandler{
+ api: api,
+ posts: posts,
+ submitResults: make(chan *submitResult),
+ hcs: make(chan *headChange),
+ postWindows: make(map[abi.ChainEpoch]*postWindow),
+ getPostWindowReqs: make(chan *getPWReq),
+ getTSDIReq: make(chan chan *tsdi),
+ shutdownCtx: ctx,
+ shutdown: cancel,
+ }
+}
+
+func (s *submitHandler) run() {
+ // On shutdown, abort in-progress submits
+ defer func() {
+ for _, pw := range s.postWindows {
+ if pw.abort != nil {
+ pw.abort()
+ }
+ }
+ }()
+
+ for s.shutdownCtx.Err() == nil {
+ select {
+ case <-s.shutdownCtx.Done():
+ return
+
+ case hc := <-s.hcs:
+ // Head change
+ s.processHeadChange(hc.ctx, hc.revert, hc.advance, hc.di)
+ if s.processedHeadChanges != nil {
+ s.processedHeadChanges <- hc
+ }
+
+ case pi := <-s.posts.added:
+ // Proof generated
+ s.processPostReady(pi)
+ if s.processedPostReady != nil {
+ s.processedPostReady <- pi
+ }
+
+ case res := <-s.submitResults:
+ // Submit complete
+ s.processSubmitResult(res)
+ if s.processedSubmitResults != nil {
+ s.processedSubmitResults <- res
+ }
+
+ case pwreq := <-s.getPostWindowReqs:
+ // used by getPostWindow() to sync with run loop
+ pwreq.out <- s.postWindows[pwreq.di.Open]
+
+ case out := <-s.getTSDIReq:
+ // used by currentTSDI() to sync with run loop
+ out <- &tsdi{ts: s.currentTS, di: s.currentDI}
+ }
+ }
+}
+
+// processHeadChange is called when the chain head changes
+func (s *submitHandler) processHeadChange(ctx context.Context, revert *types.TipSet, advance *types.TipSet, di *dline.Info) {
+ s.currentCtx = ctx
+ s.currentTS = advance
+ s.currentDI = di
+
+ // Start tracking the current post window if we're not already
+ // TODO: clear post windows older than chain finality
+ if _, ok := s.postWindows[di.Open]; !ok {
+ s.postWindows[di.Open] = &postWindow{
+ di: di,
+ ts: advance,
+ submitState: SubmitStateStart,
+ }
+ }
+
+ // Apply the change to all post windows
+ for _, pw := range s.postWindows {
+ s.processHeadChangeForPW(ctx, revert, advance, pw)
+ }
+}
+
+func (s *submitHandler) processHeadChangeForPW(ctx context.Context, revert *types.TipSet, advance *types.TipSet, pw *postWindow) {
+ revertedToPrevDL := revert != nil && revert.Height() < pw.di.Open
+ expired := advance.Height() >= pw.di.Close
+
+ // If the chain was reverted back to the previous deadline, or if the post
+ // window has expired, abort submit
+ if pw.submitState == SubmitStateSubmitting && (revertedToPrevDL || expired) {
+ // Replace the aborted postWindow with a new one so that we can
+ // submit again at any time without the state getting clobbered
+ // when the abort completes
+ abort := pw.abort
+ if abort != nil {
+ pw = &postWindow{
+ di: pw.di,
+ ts: advance,
+ submitState: SubmitStateStart,
+ }
+ s.postWindows[pw.di.Open] = pw
+
+ // Abort the current submit
+ abort()
+ }
+ } else if pw.submitState == SubmitStateComplete && revertedToPrevDL {
+ // If submit for this deadline has completed, but the chain was
+ // reverted back to the previous deadline, reset the submit state to the
+ // starting state, so that it can be resubmitted
+ pw.submitState = SubmitStateStart
+ }
+
+ // Submit the proof to chain if the proof has been generated and the chain
+ // height is above confidence
+ s.submitIfReady(ctx, advance, pw)
+}
+
+// processPostReady is called when a proof generation completes
+func (s *submitHandler) processPostReady(pi *postInfo) {
+ pw, ok := s.postWindows[pi.di.Open]
+ if ok {
+ s.submitIfReady(s.currentCtx, s.currentTS, pw)
+ }
+}
+
+// submitIfReady submits a proof if the chain is high enough and the proof
+// has been generated for this deadline
+func (s *submitHandler) submitIfReady(ctx context.Context, advance *types.TipSet, pw *postWindow) {
+ // If the window has expired, there's nothing more to do.
+ if advance.Height() >= pw.di.Close {
+ return
+ }
+
+ // Check if we're already submitting, or already completed submit
+ if pw.submitState != SubmitStateStart {
+ return
+ }
+
+ // Check if we've reached the confidence height to submit
+ if advance.Height() < pw.di.Open+SubmitConfidence {
+ return
+ }
+
+ // Check if the proofs have been generated for this deadline
+ posts, ok := s.posts.get(pw.di)
+ if !ok {
+ return
+ }
+
+ // If there was nothing to prove, move straight to the complete state
+ if len(posts) == 0 {
+ pw.submitState = SubmitStateComplete
+ return
+ }
+
+ // Start submitting post
+ pw.submitState = SubmitStateSubmitting
+ pw.abort = s.api.startSubmitPoST(ctx, advance, pw.di, posts, func(err error) {
+ s.submitResults <- &submitResult{pw: pw, err: err}
+ })
+}
+
+// processSubmitResult is called with the response to a submit
+func (s *submitHandler) processSubmitResult(res *submitResult) {
+ if res.err != nil {
+ // Submit failed so inform the API and go back to the start state
+ s.api.failPost(res.err, res.pw.ts, res.pw.di)
+ log.Warnf("Aborted window post Submitting (Deadline: %+v)", res.pw.di)
+ s.api.onAbort(res.pw.ts, res.pw.di)
+
+ res.pw.submitState = SubmitStateStart
+ return
+ }
+
+ // Submit succeeded so move to complete state
+ res.pw.submitState = SubmitStateComplete
+}
+
+type tsdi struct {
+ ts *types.TipSet
+ di *dline.Info
+}
+
+func (s *submitHandler) currentTSDI() (*types.TipSet, *dline.Info) {
+ out := make(chan *tsdi)
+ s.getTSDIReq <- out
+ res := <-out
+ return res.ts, res.di
+}
+
+type getPWReq struct {
+ di *dline.Info
+ out chan *postWindow
+}
+
+func (s *submitHandler) getPostWindow(di *dline.Info) *postWindow {
+ out := make(chan *postWindow)
+ s.getPostWindowReqs <- &getPWReq{di: di, out: out}
+ return <-out
+}
+
+// nextDeadline gets deadline info for the subsequent deadline
+func nextDeadline(currentDeadline *dline.Info) *dline.Info {
+ periodStart := currentDeadline.PeriodStart
+ newDeadline := currentDeadline.Index + 1
+ if newDeadline == miner.WPoStPeriodDeadlines {
+ newDeadline = 0
+ periodStart = periodStart + miner.WPoStProvingPeriod
+ }
+
+ return NewDeadlineInfo(periodStart, newDeadline, currentDeadline.CurrentEpoch)
+}
+
+func NewDeadlineInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch) *dline.Info {
+ return dline.NewInfo(periodStart, deadlineIdx, currEpoch, miner.WPoStPeriodDeadlines, miner.WPoStProvingPeriod, miner.WPoStChallengeWindow, miner.WPoStChallengeLookback, miner.FaultDeclarationCutoff)
+}
diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go
new file mode 100644
index 000000000..6479c0d7e
--- /dev/null
+++ b/storage/wdpost_changehandler_test.go
@@ -0,0 +1,1173 @@
+package storage
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ tutils "github.com/filecoin-project/specs-actors/support/testing"
+
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/ipfs/go-cid"
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+var dummyCid cid.Cid
+
+func init() {
+ dummyCid, _ = cid.Parse("bafkqaaa")
+}
+
+type proveRes struct {
+ posts []miner.SubmitWindowedPoStParams
+ err error
+}
+
+type postStatus string
+
+const (
+ postStatusStart postStatus = "postStatusStart"
+ postStatusProving postStatus = "postStatusProving"
+ postStatusComplete postStatus = "postStatusComplete"
+)
+
+type mockAPI struct {
+ ch *changeHandler
+ deadline *dline.Info
+ proveResult chan *proveRes
+ submitResult chan error
+ onStateChange chan struct{}
+
+ tsLock sync.RWMutex
+ ts map[types.TipSetKey]*types.TipSet
+
+ abortCalledLock sync.RWMutex
+ abortCalled bool
+
+ statesLk sync.RWMutex
+ postStates map[abi.ChainEpoch]postStatus
+}
+
+func newMockAPI() *mockAPI {
+ return &mockAPI{
+ proveResult: make(chan *proveRes),
+ onStateChange: make(chan struct{}),
+ submitResult: make(chan error),
+ postStates: make(map[abi.ChainEpoch]postStatus),
+ ts: make(map[types.TipSetKey]*types.TipSet),
+ }
+}
+
+func (m *mockAPI) makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet {
+ m.tsLock.Lock()
+ defer m.tsLock.Unlock()
+
+ ts := makeTs(t, h)
+ m.ts[ts.Key()] = ts
+ return ts
+}
+
+func (m *mockAPI) setDeadline(di *dline.Info) {
+ m.tsLock.Lock()
+ defer m.tsLock.Unlock()
+
+ m.deadline = di
+}
+
+func (m *mockAPI) getDeadline(currentEpoch abi.ChainEpoch) *dline.Info {
+ close := miner.WPoStChallengeWindow - 1
+ dlIdx := uint64(0)
+ for close < currentEpoch {
+ close += miner.WPoStChallengeWindow
+ dlIdx++
+ }
+ return NewDeadlineInfo(0, dlIdx, currentEpoch)
+}
+
+func (m *mockAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) {
+ m.tsLock.RLock()
+ defer m.tsLock.RUnlock()
+
+ ts, ok := m.ts[key]
+ if !ok {
+ panic(fmt.Sprintf("unexpected tipset key %s", key))
+ }
+
+ if m.deadline != nil {
+ m.deadline.CurrentEpoch = ts.Height()
+ return m.deadline, nil
+ }
+
+ return m.getDeadline(ts.Height()), nil
+}
+
+func (m *mockAPI) startGeneratePoST(
+ ctx context.Context,
+ ts *types.TipSet,
+ deadline *dline.Info,
+ completeGeneratePoST CompleteGeneratePoSTCb,
+) context.CancelFunc {
+ ctx, cancel := context.WithCancel(ctx)
+
+ m.statesLk.Lock()
+ defer m.statesLk.Unlock()
+ m.postStates[deadline.Open] = postStatusProving
+
+ go func() {
+ defer cancel()
+
+ select {
+ case psRes := <-m.proveResult:
+ m.statesLk.Lock()
+ {
+ if psRes.err == nil {
+ m.postStates[deadline.Open] = postStatusComplete
+ } else {
+ m.postStates[deadline.Open] = postStatusStart
+ }
+ }
+ m.statesLk.Unlock()
+ completeGeneratePoST(psRes.posts, psRes.err)
+ case <-ctx.Done():
+ completeGeneratePoST(nil, ctx.Err())
+ }
+ }()
+
+ return cancel
+}
+
+func (m *mockAPI) getPostStatus(di *dline.Info) postStatus {
+ m.statesLk.RLock()
+ defer m.statesLk.RUnlock()
+
+ status, ok := m.postStates[di.Open]
+ if ok {
+ return status
+ }
+ return postStatusStart
+}
+
+func (m *mockAPI) startSubmitPoST(
+ ctx context.Context,
+ ts *types.TipSet,
+ deadline *dline.Info,
+ posts []miner.SubmitWindowedPoStParams,
+ completeSubmitPoST CompleteSubmitPoSTCb,
+) context.CancelFunc {
+ ctx, cancel := context.WithCancel(ctx)
+
+ go func() {
+ defer cancel()
+
+ select {
+ case err := <-m.submitResult:
+ completeSubmitPoST(err)
+ case <-ctx.Done():
+ completeSubmitPoST(ctx.Err())
+ }
+ }()
+
+ return cancel
+}
+
+func (m *mockAPI) onAbort(ts *types.TipSet, deadline *dline.Info) {
+ m.abortCalledLock.Lock()
+ defer m.abortCalledLock.Unlock()
+ m.abortCalled = true
+}
+
+func (m *mockAPI) wasAbortCalled() bool {
+ m.abortCalledLock.RLock()
+ defer m.abortCalledLock.RUnlock()
+ return m.abortCalled
+}
+
+func (m *mockAPI) failPost(err error, ts *types.TipSet, deadline *dline.Info) {
+}
+
+func (m *mockAPI) setChangeHandler(ch *changeHandler) {
+ m.ch = ch
+}
+
+// TestChangeHandlerBasic verifies we can generate a proof and submit it
+func TestChangeHandlerBasic(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Move to the correct height to submit the proof
+ currentEpoch = 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state
+ <-s.ch.submitHdlr.processedHeadChanges
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+
+ // Send a response to the submit call
+ mock.submitResult <- nil
+
+ // Should move to the complete state
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+}
+
+// TestChangeHandlerFromProvingToSubmittingNoHeadChange tests that when the
+// chain is already advanced past the confidence interval, we should move from
+// proving to submitting without a head change in between.
+func TestChangeHandlerFromProvingToSubmittingNoHeadChange(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ // Monitor submit handler's processing of incoming postInfo
+ s.ch.submitHdlr.processedPostReady = make(chan *postInfo)
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Trigger a head change that advances the chain beyond the submit
+ // confidence
+ currentEpoch = 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should be no change to state yet
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Should move directly to submitting state with no further head changes
+ <-s.ch.submitHdlr.processedPostReady
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+}
+
+// TestChangeHandlerFromProvingEmptyProofsToComplete tests that when there are no
+// proofs generated we should not submit anything to chain but submit state
+// should move to completed
+func TestChangeHandlerFromProvingEmptyProofsToComplete(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ // Monitor submit handler's processing of incoming postInfo
+ s.ch.submitHdlr.processedPostReady = make(chan *postInfo)
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Trigger a head change that advances the chain beyond the submit
+ // confidence
+ currentEpoch = 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should be no change to state yet
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Send a response to the call to generate proofs with an empty proofs array
+ posts := []miner.SubmitWindowedPoStParams{}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Should move directly to submitting complete state
+ <-s.ch.submitHdlr.processedPostReady
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+}
+
+// TestChangeHandlerDontStartUntilProvingPeriod tests that the handler
+// ignores updates until the proving period has been reached.
+func TestChangeHandlerDontStartUntilProvingPeriod(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ periodStart := miner.WPoStProvingPeriod
+ dlIdx := uint64(1)
+ currentEpoch := abi.ChainEpoch(10)
+ di := NewDeadlineInfo(periodStart, dlIdx, currentEpoch)
+ mock.setDeadline(di)
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Nothing should happen because the proving period has not started
+ select {
+ case <-s.ch.proveHdlr.processedHeadChanges:
+ require.Fail(t, "unexpected prove change")
+ case <-s.ch.submitHdlr.processedHeadChanges:
+ require.Fail(t, "unexpected submit change")
+ case <-time.After(10 * time.Millisecond):
+ }
+
+ // Advance the head to the next proving period's first epoch
+ currentEpoch = periodStart + miner.WPoStChallengeWindow
+ di = NewDeadlineInfo(periodStart, dlIdx, currentEpoch)
+ mock.setDeadline(di)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+}
+
+// TestChangeHandlerStartProvingNextDeadline verifies that the proof handler
+// starts proving the next deadline after the current one
+func TestChangeHandlerStartProvingNextDeadline(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Trigger a head change that advances the chain beyond the submit
+ // confidence
+ currentEpoch = 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should be no change to state yet
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Trigger head change that advances the chain to the Challenge epoch for
+ // the next deadline
+ go func() {
+ di = nextDeadline(di)
+ currentEpoch = di.Challenge
+ triggerHeadAdvance(t, s, currentEpoch)
+ }()
+
+ // Should start generating next window's proof
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+}
+
+// TestChangeHandlerProvingRounds verifies we can generate several rounds of
+// proofs as the chain head advances
+func TestChangeHandlerProvingRounds(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ completeProofIndex := abi.ChainEpoch(10)
+ for currentEpoch := abi.ChainEpoch(1); currentEpoch < miner.WPoStChallengeWindow*5; currentEpoch++ {
+ // Trigger a head change
+ di := mock.getDeadline(currentEpoch)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Wait for prover to process head change
+ <-s.ch.proveHdlr.processedHeadChanges
+
+ completeProofEpoch := di.Open + completeProofIndex
+ next := nextDeadline(di)
+ //fmt.Println("epoch", currentEpoch, s.mock.getPostStatus(di), "next", s.mock.getPostStatus(next))
+ if currentEpoch >= next.Challenge {
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+ // At the next deadline's challenge epoch, should start proving
+ // for that epoch
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(next))
+ } else if currentEpoch > completeProofEpoch {
+ // After proving for the round is complete, should be in complete state
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+ require.Equal(t, postStatusStart, s.mock.getPostStatus(next))
+ } else {
+ // Until proving completes, should be in the proving state
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+ require.Equal(t, postStatusStart, s.mock.getPostStatus(next))
+ }
+
+ // Wait for submitter to process head change
+ <-s.ch.submitHdlr.processedHeadChanges
+
+ completeSubmitEpoch := completeProofEpoch + 1
+ //fmt.Println("epoch", currentEpoch, s.submitState(di))
+ if currentEpoch > completeSubmitEpoch {
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+ } else if currentEpoch > completeProofEpoch {
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+ } else {
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+ }
+
+ if currentEpoch == completeProofEpoch {
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+ }
+
+ if currentEpoch == completeSubmitEpoch {
+ // Send a response to the submit call
+ mock.submitResult <- nil
+
+ // Should move to the complete state
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+ }
+ }
+}
+
+// TestChangeHandlerProvingErrorRecovery verifies that the proof handler
+// recovers correctly from an error
+func TestChangeHandlerProvingErrorRecovery(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Send an error response to the call to generate proofs
+ mock.proveResult <- &proveRes{err: fmt.Errorf("err")}
+
+ // Should abort and then move to start state
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusStart, s.mock.getPostStatus(di))
+
+ // Trigger a head change
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Send a success response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+}
+
+// TestChangeHandlerSubmitErrorRecovery verifies that the submit handler
+// recovers correctly from an error
+func TestChangeHandlerSubmitErrorRecovery(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Move to the correct height to submit the proof
+ currentEpoch = 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Read from prover incoming channel (so as not to block)
+ <-s.ch.proveHdlr.processedHeadChanges
+
+ // Should move to submitting state
+ <-s.ch.submitHdlr.processedHeadChanges
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+
+ // Send an error response to the call to submit
+ mock.submitResult <- fmt.Errorf("err")
+
+ // Should abort and then move back to the start state
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+ require.True(t, mock.wasAbortCalled())
+
+ // Trigger another head change
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Read from prover incoming channel (so as not to block)
+ <-s.ch.proveHdlr.processedHeadChanges
+
+ // Should move to submitting state
+ <-s.ch.submitHdlr.processedHeadChanges
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+
+ // Send a response to the submit call
+ mock.submitResult <- nil
+
+ // Should move to the complete state
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+}
+
+// TestChangeHandlerProveExpiry verifies that the prove handler
+// behaves correctly on expiry
+func TestChangeHandlerProveExpiry(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Move to a height that expires the current proof
+ currentEpoch = miner.WPoStChallengeWindow
+ di = mock.getDeadline(currentEpoch)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should trigger an abort and start proving for the new deadline
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+ <-s.ch.proveHdlr.processedPostResults
+ require.True(t, mock.wasAbortCalled())
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+}
+
+// TestChangeHandlerSubmitExpiry verifies that the submit handler
+// behaves correctly on expiry
+func TestChangeHandlerSubmitExpiry(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ // Ignore prove handler head change processing for this test
+ s.ch.proveHdlr.processedHeadChanges = nil
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := abi.ChainEpoch(1)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Move to the correct height to submit the proof
+ currentEpoch = 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state
+ <-s.ch.submitHdlr.processedHeadChanges
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+
+ // Move to a height that expires the submit
+ currentEpoch = miner.WPoStChallengeWindow
+ di = mock.getDeadline(currentEpoch)
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should trigger an abort and move back to start state
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.True(t, mock.wasAbortCalled())
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+ }()
+
+ wg.Wait()
+}
+
+// TestChangeHandlerProveRevert verifies that the prove handler
+// behaves correctly on revert
+func TestChangeHandlerProveRevert(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := miner.WPoStChallengeWindow
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should start proving
+ <-s.ch.proveHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Trigger a revert to the previous epoch
+ revertEpoch := di.Open - 5
+ go triggerHeadChange(t, s, revertEpoch, currentEpoch)
+
+ // Should be no change
+ <-s.ch.proveHdlr.processedHeadChanges
+ require.Equal(t, postStatusProving, s.mock.getPostStatus(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+ require.False(t, mock.wasAbortCalled())
+}
+
+// TestChangeHandlerSubmittingRevert verifies that the submit handler
+// behaves correctly when there's a revert from the submitting state
+func TestChangeHandlerSubmittingRevert(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ // Ignore prove handler head change processing for this test
+ s.ch.proveHdlr.processedHeadChanges = nil
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := miner.WPoStChallengeWindow
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Move to the correct height to submit the proof
+ currentEpoch = currentEpoch + 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state
+ <-s.ch.submitHdlr.processedHeadChanges
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+
+ // Trigger a revert to the previous epoch
+ revertEpoch := di.Open - 5
+ go triggerHeadChange(t, s, revertEpoch, currentEpoch)
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ // Should trigger an abort
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.True(t, mock.wasAbortCalled())
+ }()
+
+ // Should resubmit current epoch
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+ }()
+
+ wg.Wait()
+
+ // Send a response to the resubmit call
+ mock.submitResult <- nil
+
+ // Should move to the complete state
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+}
+
+// TestChangeHandlerSubmitCompleteRevert verifies that the submit handler
+// behaves correctly when there's a revert from the submit complete state
+func TestChangeHandlerSubmitCompleteRevert(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ // Ignore prove handler head change processing for this test
+ s.ch.proveHdlr.processedHeadChanges = nil
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := miner.WPoStChallengeWindow
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ di := mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateStart, s.submitState(di))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(di))
+
+ // Move to the correct height to submit the proof
+ currentEpoch = currentEpoch + 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state
+ <-s.ch.submitHdlr.processedHeadChanges
+ di = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+
+ // Send a response to the resubmit call
+ mock.submitResult <- nil
+
+ // Should move to the complete state
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+
+ // Trigger a revert to the previous epoch
+ revertEpoch := di.Open - 5
+ go triggerHeadChange(t, s, revertEpoch, currentEpoch)
+
+ // Should resubmit current epoch
+ <-s.ch.submitHdlr.processedHeadChanges
+ require.Equal(t, SubmitStateSubmitting, s.submitState(di))
+
+ // Send a response to the resubmit call
+ mock.submitResult <- nil
+
+ // Should move to the complete state
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(di))
+}
+
+// TestChangeHandlerSubmitRevertTwoEpochs verifies that the submit handler
+// behaves correctly when the revert is two epochs deep
+func TestChangeHandlerSubmitRevertTwoEpochs(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ // Ignore prove handler head change processing for this test
+ s.ch.proveHdlr.processedHeadChanges = nil
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := miner.WPoStChallengeWindow
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ diE1 := mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateStart, s.submitState(diE1))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: diE1.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE1))
+
+ // Move to the challenge epoch for the next deadline
+ diE2 := nextDeadline(diE1)
+ currentEpoch = diE2.Challenge
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state for epoch 1
+ <-s.ch.submitHdlr.processedHeadChanges
+ diE1 = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(diE1))
+
+ // Send a response to the submit call for epoch 1
+ mock.submitResult <- nil
+
+ // Should move to the complete state for epoch 1
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(diE1))
+
+ // Should start proving epoch 2
+ // Send a response to the call to generate proofs
+ postsE2 := []miner.SubmitWindowedPoStParams{{Deadline: diE2.Index}}
+ mock.proveResult <- &proveRes{posts: postsE2}
+
+ // Should move to proving complete for epoch 2
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE2))
+
+ // Move to the correct height to submit the proof for epoch 2
+ currentEpoch = diE2.Open + 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state for epoch 2
+ <-s.ch.submitHdlr.processedHeadChanges
+ diE2 = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(diE2))
+
+ // Trigger a revert through two epochs (from epoch 2 to epoch 0)
+ revertEpoch := diE1.Open - 5
+ go triggerHeadChange(t, s, revertEpoch, currentEpoch)
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ // Should trigger an abort
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.True(t, mock.wasAbortCalled())
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedHeadChanges
+
+ // Should reset epoch 1 (that is expired) to start state
+ require.Equal(t, SubmitStateStart, s.submitState(diE1))
+ // Should resubmit epoch 2
+ require.Equal(t, SubmitStateSubmitting, s.submitState(diE2))
+ }()
+
+ wg.Wait()
+
+ // Send a response to the resubmit call for epoch 2
+ mock.submitResult <- nil
+
+ // Should move to the complete state for epoch 2
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(diE2))
+}
+
+// TestChangeHandlerSubmitRevertAdvanceLess verifies that the submit handler
+// behaves correctly when the revert is two epochs deep and the advance is
+// to a lower height than before
+func TestChangeHandlerSubmitRevertAdvanceLess(t *testing.T) {
+ s := makeScaffolding(t)
+ mock := s.mock
+
+ // Ignore prove handler head change processing for this test
+ s.ch.proveHdlr.processedHeadChanges = nil
+
+ defer s.ch.shutdown()
+ s.ch.start()
+
+ // Trigger a head change
+ currentEpoch := miner.WPoStChallengeWindow
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Submitter doesn't have anything to do yet
+ <-s.ch.submitHdlr.processedHeadChanges
+ diE1 := mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateStart, s.submitState(diE1))
+
+ // Send a response to the call to generate proofs
+ posts := []miner.SubmitWindowedPoStParams{{Deadline: diE1.Index}}
+ mock.proveResult <- &proveRes{posts: posts}
+
+ // Should move to proving complete
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE1))
+
+ // Move to the challenge epoch for the next deadline
+ diE2 := nextDeadline(diE1)
+ currentEpoch = diE2.Challenge
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state for epoch 1
+ <-s.ch.submitHdlr.processedHeadChanges
+ diE1 = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(diE1))
+
+ // Send a response to the submit call for epoch 1
+ mock.submitResult <- nil
+
+ // Should move to the complete state for epoch 1
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(diE1))
+
+ // Should start proving epoch 2
+ // Send a response to the call to generate proofs
+ postsE2 := []miner.SubmitWindowedPoStParams{{Deadline: diE2.Index}}
+ mock.proveResult <- &proveRes{posts: postsE2}
+
+ // Should move to proving complete for epoch 2
+ <-s.ch.proveHdlr.processedPostResults
+ require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE2))
+
+ // Move to the correct height to submit the proof for epoch 2
+ currentEpoch = diE2.Open + 1 + SubmitConfidence
+ go triggerHeadAdvance(t, s, currentEpoch)
+
+ // Should move to submitting state for epoch 2
+ <-s.ch.submitHdlr.processedHeadChanges
+ diE2 = mock.getDeadline(currentEpoch)
+ require.Equal(t, SubmitStateSubmitting, s.submitState(diE2))
+
+ // Trigger a revert through two epochs (from epoch 2 to epoch 0)
+ // then advance to the previous epoch (to epoch 1)
+ revertEpoch := diE1.Open - 5
+ currentEpoch = diE2.Open - 1
+ go triggerHeadChange(t, s, revertEpoch, currentEpoch)
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ // Should trigger an abort
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.True(t, mock.wasAbortCalled())
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ <-s.ch.submitHdlr.processedHeadChanges
+
+ // Should resubmit epoch 1
+ require.Equal(t, SubmitStateSubmitting, s.submitState(diE1))
+ // Should reset epoch 2 to start state
+ require.Equal(t, SubmitStateStart, s.submitState(diE2))
+ }()
+
+ wg.Wait()
+
+ // Send a response to the resubmit call for epoch 1
+ mock.submitResult <- nil
+
+ // Should move to the complete state for epoch 1
+ <-s.ch.submitHdlr.processedSubmitResults
+ require.Equal(t, SubmitStateComplete, s.submitState(diE1))
+}
+
+type smScaffolding struct {
+ ctx context.Context
+ mock *mockAPI
+ ch *changeHandler
+}
+
+func makeScaffolding(t *testing.T) *smScaffolding {
+ ctx := context.Background()
+ actor := tutils.NewActorAddr(t, "actor")
+ mock := newMockAPI()
+ ch := newChangeHandler(mock, actor)
+ mock.setChangeHandler(ch)
+
+ ch.proveHdlr.processedHeadChanges = make(chan *headChange)
+ ch.proveHdlr.processedPostResults = make(chan *postResult)
+
+ ch.submitHdlr.processedHeadChanges = make(chan *headChange)
+ ch.submitHdlr.processedSubmitResults = make(chan *submitResult)
+
+ return &smScaffolding{
+ ctx: ctx,
+ mock: mock,
+ ch: ch,
+ }
+}
+
+func triggerHeadAdvance(t *testing.T, s *smScaffolding, height abi.ChainEpoch) {
+ ts := s.mock.makeTs(t, height)
+ err := s.ch.update(s.ctx, nil, ts)
+ require.NoError(t, err)
+}
+
+func triggerHeadChange(t *testing.T, s *smScaffolding, revertHeight, advanceHeight abi.ChainEpoch) {
+ tsRev := s.mock.makeTs(t, revertHeight)
+ tsAdv := s.mock.makeTs(t, advanceHeight)
+ err := s.ch.update(s.ctx, tsRev, tsAdv)
+ require.NoError(t, err)
+}
+
+func (s *smScaffolding) submitState(di *dline.Info) SubmitState {
+ return s.ch.submitHdlr.getPostWindow(di).submitState
+}
+
+func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet {
+ var parents []cid.Cid
+ msgcid := dummyCid
+
+ a, _ := address.NewFromString("t00")
+ b, _ := address.NewFromString("t02")
+ var ts, err = types.NewTipSet([]*types.BlockHeader{
+ {
+ Height: h,
+ Miner: a,
+
+ Parents: parents,
+
+ Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}},
+
+ ParentStateRoot: dummyCid,
+ Messages: msgcid,
+ ParentMessageReceipts: dummyCid,
+
+ BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
+ BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
+ },
+ {
+ Height: h,
+ Miner: b,
+
+ Parents: parents,
+
+ Ticket: &types.Ticket{VRFProof: []byte{byte((h + 1) % 2)}},
+
+ ParentStateRoot: dummyCid,
+ Messages: msgcid,
+ ParentMessageReceipts: dummyCid,
+
+ BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
+ BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
+ },
+ })
+
+ require.NoError(t, err)
+
+ return ts
+}
diff --git a/storage/wdpost_journal.go b/storage/wdpost_journal.go
new file mode 100644
index 000000000..48eb2f2b1
--- /dev/null
+++ b/storage/wdpost_journal.go
@@ -0,0 +1,75 @@
+package storage
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+
+ "github.com/ipfs/go-cid"
+)
+
+// SchedulerState defines the possible states in which the scheduler could be,
+// for the purposes of journalling.
+type SchedulerState string
+
+const (
+ // SchedulerStateStarted gets recorded when a WdPoSt cycle for an
+ // epoch begins.
+ SchedulerStateStarted = SchedulerState("started")
+ // SchedulerStateAborted gets recorded when a WdPoSt cycle for an
+ // epoch is aborted, normally because of a chain reorg or advancement.
+ SchedulerStateAborted = SchedulerState("aborted")
+ // SchedulerStateFaulted gets recorded when a WdPoSt cycle for an
+ // epoch terminates abnormally, in which case the error is also recorded.
+ SchedulerStateFaulted = SchedulerState("faulted")
+ // SchedulerStateSucceeded gets recorded when a WdPoSt cycle for an
+ // epoch ends successfully.
+ SchedulerStateSucceeded = SchedulerState("succeeded")
+)
+
+// Journal event types.
+const (
+ evtTypeWdPoStScheduler = iota
+ evtTypeWdPoStProofs
+ evtTypeWdPoStRecoveries
+ evtTypeWdPoStFaults
+)
+
+// evtCommon is a common set of attributes for Windowed PoSt journal events.
+type evtCommon struct {
+ Deadline *dline.Info
+ Height abi.ChainEpoch
+ TipSet []cid.Cid
+ Error error `json:",omitempty"`
+}
+
+// WdPoStSchedulerEvt is the journal event that gets recorded on scheduler
+// actions.
+type WdPoStSchedulerEvt struct {
+ evtCommon
+ State SchedulerState
+}
+
+// WdPoStProofsProcessedEvt is the journal event that gets recorded when
+// Windowed PoSt proofs have been processed.
+type WdPoStProofsProcessedEvt struct {
+ evtCommon
+ Partitions []miner.PoStPartition
+ MessageCID cid.Cid `json:",omitempty"`
+}
+
+// WdPoStRecoveriesProcessedEvt is the journal event that gets recorded when
+// Windowed PoSt recoveries have been processed.
+type WdPoStRecoveriesProcessedEvt struct {
+ evtCommon
+ Declarations []miner.RecoveryDeclaration
+ MessageCID cid.Cid `json:",omitempty"`
+}
+
+// WdPoStFaultsProcessedEvt is the journal event that gets recorded when
+// Windowed PoSt faults have been processed.
+type WdPoStFaultsProcessedEvt struct {
+ evtCommon
+ Declarations []miner.FaultDeclaration
+ MessageCID cid.Cid `json:",omitempty"`
+}
diff --git a/storage/wdpost_nextdl_test.go b/storage/wdpost_nextdl_test.go
new file mode 100644
index 000000000..4a23bad65
--- /dev/null
+++ b/storage/wdpost_nextdl_test.go
@@ -0,0 +1,38 @@
+package storage
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+)
+
+func TestNextDeadline(t *testing.T) {
+ periodStart := abi.ChainEpoch(0)
+ deadlineIdx := 0
+ currentEpoch := abi.ChainEpoch(10)
+
+ di := NewDeadlineInfo(periodStart, uint64(deadlineIdx), currentEpoch)
+ require.EqualValues(t, 0, di.Index)
+ require.EqualValues(t, 0, di.PeriodStart)
+ require.EqualValues(t, -20, di.Challenge)
+ require.EqualValues(t, 0, di.Open)
+ require.EqualValues(t, 60, di.Close)
+
+ for i := 1; i < 1+int(miner.WPoStPeriodDeadlines)*2; i++ {
+ di = nextDeadline(di)
+ deadlineIdx = i % int(miner.WPoStPeriodDeadlines)
+ expPeriodStart := int(miner.WPoStProvingPeriod) * (i / int(miner.WPoStPeriodDeadlines))
+ expOpen := expPeriodStart + deadlineIdx*int(miner.WPoStChallengeWindow)
+ expClose := expOpen + int(miner.WPoStChallengeWindow)
+ expChallenge := expOpen - int(miner.WPoStChallengeLookback)
+ //fmt.Printf("%d: %d@%d %d-%d (%d)\n", i, expPeriodStart, deadlineIdx, expOpen, expClose, expChallenge)
+ require.EqualValues(t, deadlineIdx, di.Index)
+ require.EqualValues(t, expPeriodStart, di.PeriodStart)
+ require.EqualValues(t, expOpen, di.Open)
+ require.EqualValues(t, expClose, di.Close)
+ require.EqualValues(t, expChallenge, di.Challenge)
+ }
+}
diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go
index 2e0ed1c84..6aa3d5188 100644
--- a/storage/wdpost_run.go
+++ b/storage/wdpost_run.go
@@ -3,30 +3,47 @@ package storage
import (
"bytes"
"context"
- "errors"
"time"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/abi/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- "github.com/filecoin-project/specs-actors/actors/crypto"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+
"go.opencensus.io/trace"
"golang.org/x/xerrors"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/journal"
)
-var errNoPartitions = errors.New("no partitions")
+func (s *WindowPoStScheduler) failPost(err error, ts *types.TipSet, deadline *dline.Info) {
+ journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
+ c := evtCommon{Error: err}
+ if ts != nil {
+ c.Deadline = deadline
+ c.Height = ts.Height()
+ c.TipSet = ts.Cids()
+ }
+ return WdPoStSchedulerEvt{
+ evtCommon: c,
+ State: SchedulerStateFaulted,
+ }
+ })
-func (s *WindowPoStScheduler) failPost(deadline *miner.DeadlineInfo) {
- log.Errorf("TODO")
+ log.Errorf("Got err %w - TODO handle errors", err)
/*s.failLk.Lock()
if eps > s.failed {
s.failed = eps
@@ -34,37 +51,137 @@ func (s *WindowPoStScheduler) failPost(deadline *miner.DeadlineInfo) {
s.failLk.Unlock()*/
}
-func (s *WindowPoStScheduler) doPost(ctx context.Context, deadline *miner.DeadlineInfo, ts *types.TipSet) {
+// recordProofsEvent records a successful proofs_processed event in the
+// journal, even if it was a noop (no partitions).
+func (s *WindowPoStScheduler) recordProofsEvent(partitions []miner.PoStPartition, mcid cid.Cid) {
+ journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStProofs], func() interface{} {
+ return &WdPoStProofsProcessedEvt{
+ evtCommon: s.getEvtCommon(nil),
+ Partitions: partitions,
+ MessageCID: mcid,
+ }
+ })
+}
+
+// startGeneratePoST kicks off the process of generating a PoST
+func (s *WindowPoStScheduler) startGeneratePoST(
+ ctx context.Context,
+ ts *types.TipSet,
+ deadline *dline.Info,
+ completeGeneratePoST CompleteGeneratePoSTCb,
+) context.CancelFunc {
ctx, abort := context.WithCancel(ctx)
-
- s.abort = abort
- s.activeDeadline = deadline
-
go func() {
defer abort()
- ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.doPost")
- defer span.End()
-
- proof, err := s.runPost(ctx, *deadline, ts)
- switch err {
- case errNoPartitions:
- return
- case nil:
- if err := s.submitPost(ctx, proof); err != nil {
- log.Errorf("submitPost failed: %+v", err)
- s.failPost(deadline)
- return
+ journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
+ return WdPoStSchedulerEvt{
+ evtCommon: s.getEvtCommon(nil),
+ State: SchedulerStateStarted,
}
- default:
- log.Errorf("runPost failed: %+v", err)
- s.failPost(deadline)
- return
- }
+ })
+
+ posts, err := s.runGeneratePoST(ctx, ts, deadline)
+ completeGeneratePoST(posts, err)
}()
+
+ return abort
}
-func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check abi.BitField) (abi.BitField, error) {
+// runGeneratePoST generates the PoST
+func (s *WindowPoStScheduler) runGeneratePoST(
+ ctx context.Context,
+ ts *types.TipSet,
+ deadline *dline.Info,
+) ([]miner.SubmitWindowedPoStParams, error) {
+ ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.generatePoST")
+ defer span.End()
+
+ posts, err := s.runPost(ctx, *deadline, ts)
+ if err != nil {
+ log.Errorf("runPost failed: %+v", err)
+ return nil, err
+ }
+
+ if len(posts) == 0 {
+ s.recordProofsEvent(nil, cid.Undef)
+ }
+
+ return posts, nil
+}
+
+// startSubmitPoST kicks of the process of submitting PoST
+func (s *WindowPoStScheduler) startSubmitPoST(
+ ctx context.Context,
+ ts *types.TipSet,
+ deadline *dline.Info,
+ posts []miner.SubmitWindowedPoStParams,
+ completeSubmitPoST CompleteSubmitPoSTCb,
+) context.CancelFunc {
+
+ ctx, abort := context.WithCancel(ctx)
+ go func() {
+ defer abort()
+
+ err := s.runSubmitPoST(ctx, ts, deadline, posts)
+ if err == nil {
+ journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
+ return WdPoStSchedulerEvt{
+ evtCommon: s.getEvtCommon(nil),
+ State: SchedulerStateSucceeded,
+ }
+ })
+ }
+ completeSubmitPoST(err)
+ }()
+
+ return abort
+}
+
+// runSubmitPoST submits PoST
+func (s *WindowPoStScheduler) runSubmitPoST(
+ ctx context.Context,
+ ts *types.TipSet,
+ deadline *dline.Info,
+ posts []miner.SubmitWindowedPoStParams,
+) error {
+ if len(posts) == 0 {
+ return nil
+ }
+
+ ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.submitPoST")
+ defer span.End()
+
+ // Get randomness from tickets
+ commEpoch := deadline.Open
+ commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
+ if err != nil {
+ err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err)
+ log.Errorf("submitPost failed: %+v", err)
+
+ return err
+ }
+
+ var submitErr error
+ for i := range posts {
+ // Add randomness to PoST
+ post := &posts[i]
+ post.ChainCommitEpoch = commEpoch
+ post.ChainCommitRand = commRand
+
+ // Submit PoST
+ sm, submitErr := s.submitPost(ctx, post)
+ if submitErr != nil {
+ log.Errorf("submit window post failed: %+v", submitErr)
+ } else {
+ s.recordProofsEvent(post.Partitions, sm.Cid())
+ }
+ }
+
+ return submitErr
+}
+
+func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) {
spt, err := s.proofType.RegisteredSealProof()
if err != nil {
return bitfield.BitField{}, xerrors.Errorf("getting seal proof type: %w", err)
@@ -109,25 +226,24 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check abi.BitFie
return sbf, nil
}
-func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) error {
+func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) {
ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries")
defer span.End()
+ faulty := uint64(0)
params := &miner.DeclareFaultsRecoveredParams{
Recoveries: []miner.RecoveryDeclaration{},
}
- faulty := uint64(0)
-
for partIdx, partition := range partitions {
- unrecovered, err := bitfield.SubtractBitField(partition.Faults, partition.Recoveries)
+ unrecovered, err := bitfield.SubtractBitField(partition.FaultySectors, partition.RecoveringSectors)
if err != nil {
- return xerrors.Errorf("subtracting recovered set from fault set: %w", err)
+ return nil, nil, xerrors.Errorf("subtracting recovered set from fault set: %w", err)
}
uc, err := unrecovered.Count()
if err != nil {
- return xerrors.Errorf("counting unrecovered sectors: %w", err)
+ return nil, nil, xerrors.Errorf("counting unrecovered sectors: %w", err)
}
if uc == 0 {
@@ -138,13 +254,13 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
recovered, err := s.checkSectors(ctx, unrecovered)
if err != nil {
- return xerrors.Errorf("checking unrecovered sectors: %w", err)
+ return nil, nil, xerrors.Errorf("checking unrecovered sectors: %w", err)
}
// if all sectors failed to recover, don't declare recoveries
recoveredCount, err := recovered.Count()
if err != nil {
- return xerrors.Errorf("counting recovered sectors: %w", err)
+ return nil, nil, xerrors.Errorf("counting recovered sectors: %w", err)
}
if recoveredCount == 0 {
@@ -158,23 +274,24 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
})
}
- if len(params.Recoveries) == 0 {
+ recoveries := params.Recoveries
+ if len(recoveries) == 0 {
if faulty != 0 {
log.Warnw("No recoveries to declare", "deadline", dlIdx, "faulty", faulty)
}
- return nil
+ return recoveries, nil, nil
}
enc, aerr := actors.SerializeParams(params)
if aerr != nil {
- return xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr)
+ return recoveries, nil, xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr)
}
msg := &types.Message{
To: s.actor,
From: s.worker,
- Method: builtin.MethodsMiner.DeclareFaultsRecovered,
+ Method: builtin0.MethodsMiner.DeclareFaultsRecovered,
Params: enc,
Value: types.NewInt(0),
}
@@ -183,52 +300,51 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)})
if err != nil {
- return xerrors.Errorf("pushing message to mpool: %w", err)
+ return recoveries, sm, xerrors.Errorf("pushing message to mpool: %w", err)
}
log.Warnw("declare faults recovered Message CID", "cid", sm.Cid())
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
if err != nil {
- return xerrors.Errorf("declare faults recovered wait error: %w", err)
+ return recoveries, sm, xerrors.Errorf("declare faults recovered wait error: %w", err)
}
if rec.Receipt.ExitCode != 0 {
- return xerrors.Errorf("declare faults recovered wait non-0 exit code: %d", rec.Receipt.ExitCode)
+ return recoveries, sm, xerrors.Errorf("declare faults recovered wait non-0 exit code: %d", rec.Receipt.ExitCode)
}
- return nil
+ return recoveries, sm, nil
}
-func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) error {
+func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults")
defer span.End()
+ bad := uint64(0)
params := &miner.DeclareFaultsParams{
Faults: []miner.FaultDeclaration{},
}
- bad := uint64(0)
-
for partIdx, partition := range partitions {
- toCheck, err := partition.ActiveSectors()
+ nonFaulty, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors)
if err != nil {
- return xerrors.Errorf("getting active sectors: %w", err)
+ return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err)
}
- good, err := s.checkSectors(ctx, toCheck)
+ good, err := s.checkSectors(ctx, nonFaulty)
if err != nil {
- return xerrors.Errorf("checking sectors: %w", err)
+ return nil, nil, xerrors.Errorf("checking sectors: %w", err)
}
- faulty, err := bitfield.SubtractBitField(toCheck, good)
+ newFaulty, err := bitfield.SubtractBitField(nonFaulty, good)
if err != nil {
- return xerrors.Errorf("calculating faulty sector set: %w", err)
+ return nil, nil, xerrors.Errorf("calculating faulty sector set: %w", err)
}
- c, err := faulty.Count()
+ c, err := newFaulty.Count()
if err != nil {
- return xerrors.Errorf("counting faulty sectors: %w", err)
+ return nil, nil, xerrors.Errorf("counting faulty sectors: %w", err)
}
if c == 0 {
@@ -240,25 +356,26 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
params.Faults = append(params.Faults, miner.FaultDeclaration{
Deadline: dlIdx,
Partition: uint64(partIdx),
- Sectors: faulty,
+ Sectors: newFaulty,
})
}
- if len(params.Faults) == 0 {
- return nil
+ faults := params.Faults
+ if len(faults) == 0 {
+ return faults, nil, nil
}
log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", bad)
enc, aerr := actors.SerializeParams(params)
if aerr != nil {
- return xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
+ return faults, nil, xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
}
msg := &types.Message{
To: s.actor,
From: s.worker,
- Method: builtin.MethodsMiner.DeclareFaults,
+ Method: builtin0.MethodsMiner.DeclareFaults,
Params: enc,
Value: types.NewInt(0), // TODO: Is there a fee?
}
@@ -267,24 +384,24 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
if err != nil {
- return xerrors.Errorf("pushing message to mpool: %w", err)
+ return faults, sm, xerrors.Errorf("pushing message to mpool: %w", err)
}
log.Warnw("declare faults Message CID", "cid", sm.Cid())
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
if err != nil {
- return xerrors.Errorf("declare faults wait error: %w", err)
+ return faults, sm, xerrors.Errorf("declare faults wait error: %w", err)
}
if rec.Receipt.ExitCode != 0 {
- return xerrors.Errorf("declare faults wait non-0 exit code: %d", rec.Receipt.ExitCode)
+ return faults, sm, xerrors.Errorf("declare faults wait non-0 exit code: %d", rec.Receipt.ExitCode)
}
- return nil
+ return faults, sm, nil
}
-func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo, ts *types.TipSet) (*miner.SubmitWindowedPoStParams, error) {
+func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) {
ctx, span := trace.StartSpan(ctx, "storage.runPost")
defer span.End()
@@ -293,7 +410,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
// check faults / recoveries for the *next* deadline. It's already too
// late to declare them for this deadline
- declDeadline := (di.Index + 2) % miner.WPoStPeriodDeadlines
+ declDeadline := (di.Index + 2) % di.WPoStPeriodDeadlines
partitions, err := s.api.StateMinerPartitions(context.TODO(), s.actor, declDeadline, ts.Key())
if err != nil {
@@ -301,15 +418,53 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
return
}
- if err := s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil {
+ var (
+ sigmsg *types.SignedMessage
+ recoveries []miner.RecoveryDeclaration
+ faults []miner.FaultDeclaration
+
+ // optionalCid returns the CID of the message, or cid.Undef is the
+ // message is nil. We don't need the argument (could capture the
+ // pointer), but it's clearer and purer like that.
+ optionalCid = func(sigmsg *types.SignedMessage) cid.Cid {
+ if sigmsg == nil {
+ return cid.Undef
+ }
+ return sigmsg.Cid()
+ }
+ )
+
+ if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil {
// TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse
log.Errorf("checking sector recoveries: %v", err)
}
- if err := s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil {
+ journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStRecoveries], func() interface{} {
+ j := WdPoStRecoveriesProcessedEvt{
+ evtCommon: s.getEvtCommon(err),
+ Declarations: recoveries,
+ MessageCID: optionalCid(sigmsg),
+ }
+ j.Error = err
+ return j
+ })
+
+ if ts.Height() > build.UpgradeIgnitionHeight {
+ return // FORK: declaring faults after ignition upgrade makes no sense
+ }
+
+ if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil {
// TODO: This is also potentially really bad, but we try to post anyways
log.Errorf("checking sector faults: %v", err)
}
+
+ journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStFaults], func() interface{} {
+ return WdPoStFaultsProcessedEvt{
+ evtCommon: s.getEvtCommon(err),
+ Declarations: faults,
+ MessageCID: optionalCid(sigmsg),
+ }
+ })
}()
buf := new(bytes.Buffer)
@@ -319,123 +474,200 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
rand, err := s.api.ChainGetRandomnessFromBeacon(ctx, ts.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes())
if err != nil {
- return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
+ return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err)
}
+ // Get the partitions for the given deadline
partitions, err := s.api.StateMinerPartitions(ctx, s.actor, di.Index, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting partitions: %w", err)
}
- params := &miner.SubmitWindowedPoStParams{
- Deadline: di.Index,
- Partitions: make([]miner.PoStPartition, 0, len(partitions)),
- Proofs: nil,
- }
-
- var sinfos []abi.SectorInfo
- sidToPart := map[abi.SectorNumber]uint64{}
- skipCount := uint64(0)
-
- for partIdx, partition := range partitions {
- // TODO: Can do this in parallel
- toProve, err := partition.ActiveSectors()
- if err != nil {
- return nil, xerrors.Errorf("getting active sectors: %w", err)
- }
-
- toProve, err = bitfield.MergeBitFields(toProve, partition.Recoveries)
- if err != nil {
- return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
- }
-
- good, err := s.checkSectors(ctx, toProve)
- if err != nil {
- return nil, xerrors.Errorf("checking sectors to skip: %w", err)
- }
-
- skipped, err := bitfield.SubtractBitField(toProve, good)
- if err != nil {
- return nil, xerrors.Errorf("toProve - good: %w", err)
- }
-
- sc, err := skipped.Count()
- if err != nil {
- return nil, xerrors.Errorf("getting skipped sector count: %w", err)
- }
-
- skipCount += sc
-
- ssi, err := s.sectorsForProof(ctx, good, partition.Sectors, ts)
- if err != nil {
- return nil, xerrors.Errorf("getting sorted sector info: %w", err)
- }
-
- if len(ssi) == 0 {
- continue
- }
-
- sinfos = append(sinfos, ssi...)
- for _, si := range ssi {
- sidToPart[si.SectorNumber] = uint64(partIdx)
- }
-
- params.Partitions = append(params.Partitions, miner.PoStPartition{
- Index: uint64(partIdx),
- Skipped: skipped,
- })
- }
-
- if len(sinfos) == 0 {
- // nothing to prove..
- return nil, errNoPartitions
- }
-
- log.Infow("running windowPost",
- "chain-random", rand,
- "deadline", di,
- "height", ts.Height(),
- "skipped", skipCount)
-
- tsStart := build.Clock.Now()
-
- mid, err := address.IDFromAddress(s.actor)
+ // Split partitions into batches, so as not to exceed the number of sectors
+ // allowed in a single message
+ partitionBatches, err := s.batchPartitions(partitions)
if err != nil {
return nil, err
}
- postOut, postSkipped, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
- if err != nil {
- return nil, xerrors.Errorf("running post failed: %w", err)
+ // Generate proofs in batches
+ posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
+ for batchIdx, batch := range partitionBatches {
+ batchPartitionStartIdx := 0
+ for _, batch := range partitionBatches[:batchIdx] {
+ batchPartitionStartIdx += len(batch)
+ }
+
+ params := miner.SubmitWindowedPoStParams{
+ Deadline: di.Index,
+ Partitions: make([]miner.PoStPartition, 0, len(batch)),
+ Proofs: nil,
+ }
+
+ skipCount := uint64(0)
+ postSkipped := bitfield.New()
+ var postOut []proof.PoStProof
+ somethingToProve := true
+
+ for retries := 0; retries < 5; retries++ {
+ var partitions []miner.PoStPartition
+ var sinfos []proof.SectorInfo
+ for partIdx, partition := range batch {
+ // TODO: Can do this in parallel
+ toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors)
+ if err != nil {
+ return nil, xerrors.Errorf("removing faults from set of sectors to prove: %w", err)
+ }
+ toProve, err = bitfield.MergeBitFields(toProve, partition.RecoveringSectors)
+ if err != nil {
+ return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
+ }
+
+ good, err := s.checkSectors(ctx, toProve)
+ if err != nil {
+ return nil, xerrors.Errorf("checking sectors to skip: %w", err)
+ }
+
+ good, err = bitfield.SubtractBitField(good, postSkipped)
+ if err != nil {
+ return nil, xerrors.Errorf("toProve - postSkipped: %w", err)
+ }
+
+ skipped, err := bitfield.SubtractBitField(toProve, good)
+ if err != nil {
+ return nil, xerrors.Errorf("toProve - good: %w", err)
+ }
+
+ sc, err := skipped.Count()
+ if err != nil {
+ return nil, xerrors.Errorf("getting skipped sector count: %w", err)
+ }
+
+ skipCount += sc
+
+ ssi, err := s.sectorsForProof(ctx, good, partition.AllSectors, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("getting sorted sector info: %w", err)
+ }
+
+ if len(ssi) == 0 {
+ continue
+ }
+
+ sinfos = append(sinfos, ssi...)
+ partitions = append(partitions, miner.PoStPartition{
+ Index: uint64(batchPartitionStartIdx + partIdx),
+ Skipped: skipped,
+ })
+ }
+
+ if len(sinfos) == 0 {
+ // nothing to prove for this batch
+ somethingToProve = false
+ break
+ }
+
+ // Generate proof
+ log.Infow("running window post",
+ "chain-random", rand,
+ "deadline", di,
+ "height", ts.Height(),
+ "skipped", skipCount)
+
+ tsStart := build.Clock.Now()
+
+ mid, err := address.IDFromAddress(s.actor)
+ if err != nil {
+ return nil, err
+ }
+
+ var ps []abi.SectorID
+ postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
+ elapsed := time.Since(tsStart)
+
+ log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed)
+
+ if err == nil {
+ // Proof generation successful, stop retrying
+ params.Partitions = append(params.Partitions, partitions...)
+
+ break
+ }
+
+ // Proof generation failed, so retry
+
+ if len(ps) == 0 {
+ return nil, xerrors.Errorf("running window post failed: %w", err)
+ }
+
+ log.Warnw("generate window post skipped sectors", "sectors", ps, "error", err, "try", retries)
+
+ skipCount += uint64(len(ps))
+ for _, sector := range ps {
+ postSkipped.Set(uint64(sector.Number))
+ }
+ }
+
+ // Nothing to prove for this batch, try the next batch
+ if !somethingToProve {
+ continue
+ }
+
+ if len(postOut) == 0 {
+ return nil, xerrors.Errorf("received no proofs back from generate window post")
+ }
+
+ params.Proofs = postOut
+
+ posts = append(posts, params)
}
- if len(postOut) == 0 {
- return nil, xerrors.Errorf("received proofs back from generate window post")
- }
-
- params.Proofs = postOut
-
- for _, sector := range postSkipped {
- params.Partitions[sidToPart[sector.Number]].Skipped.Set(uint64(sector.Number))
- }
-
- elapsed := time.Since(tsStart)
-
- commEpoch := di.Open
- commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
- if err != nil {
- return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
- }
- params.ChainCommitEpoch = commEpoch
- params.ChainCommitRand = commRand
-
- log.Infow("submitting window PoSt", "elapsed", elapsed)
-
- return params, nil
+ return posts, nil
}
-func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors abi.BitField, ts *types.TipSet) ([]abi.SectorInfo, error) {
- sset, err := s.api.StateMinerSectors(ctx, s.actor, &goodSectors, false, ts.Key())
+func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]api.Partition, error) {
+ // Get the number of sectors allowed in a partition, for this proof size
+ sectorsPerPartition, err := builtin0.PoStProofWindowPoStPartitionSectors(s.proofType)
+ if err != nil {
+ return nil, xerrors.Errorf("getting sectors per partition: %w", err)
+ }
+
+ // We don't want to exceed the number of sectors allowed in a message.
+ // So given the number of sectors in a partition, work out the number of
+ // partitions that can be in a message without exceeding sectors per
+ // message:
+ // floor(number of sectors allowed in a message / sectors per partition)
+ // eg:
+ // max sectors per message 7: ooooooo
+ // sectors per partition 3: ooo
+ // partitions per message 2: oooOOO
+ // <1><2> (3rd doesn't fit)
+ // TODO(NETUPGRADE): we're going to need some form of policy abstraction
+ // where we can get policy from the future. Unfortunately, we can't just get this from the state.
+ partitionsPerMsg := int(miner0.AddressedSectorsMax / sectorsPerPartition)
+
+ // The number of messages will be:
+ // ceiling(number of partitions / partitions per message)
+ batchCount := len(partitions) / partitionsPerMsg
+ if len(partitions)%partitionsPerMsg != 0 {
+ batchCount++
+ }
+
+ // Split the partitions into batches
+ batches := make([][]api.Partition, 0, batchCount)
+ for i := 0; i < len(partitions); i += partitionsPerMsg {
+ end := i + partitionsPerMsg
+ if end > len(partitions) {
+ end = len(partitions)
+ }
+ batches = append(batches, partitions[i:end])
+ }
+
+ return batches, nil
+}
+
+func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof.SectorInfo, error) {
+ sset, err := s.api.StateMinerSectors(ctx, s.actor, &goodSectors, ts.Key())
if err != nil {
return nil, err
}
@@ -444,22 +676,22 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors,
return nil, nil
}
- substitute := abi.SectorInfo{
- SectorNumber: sset[0].ID,
- SealedCID: sset[0].Info.SealedCID,
- SealProof: sset[0].Info.SealProof,
+ substitute := proof.SectorInfo{
+ SectorNumber: sset[0].SectorNumber,
+ SealedCID: sset[0].SealedCID,
+ SealProof: sset[0].SealProof,
}
- sectorByID := make(map[uint64]abi.SectorInfo, len(sset))
+ sectorByID := make(map[uint64]proof.SectorInfo, len(sset))
for _, sector := range sset {
- sectorByID[uint64(sector.ID)] = abi.SectorInfo{
- SectorNumber: sector.ID,
- SealedCID: sector.Info.SealedCID,
- SealProof: sector.Info.SealProof,
+ sectorByID[uint64(sector.SectorNumber)] = proof.SectorInfo{
+ SectorNumber: sector.SectorNumber,
+ SealedCID: sector.SealedCID,
+ SealProof: sector.SealProof,
}
}
- proofSectors := make([]abi.SectorInfo, 0, len(sset))
+ proofSectors := make([]proof.SectorInfo, 0, len(sset))
if err := allSectors.ForEach(func(sectorNo uint64) error {
if info, found := sectorByID[sectorNo]; found {
proofSectors = append(proofSectors, info)
@@ -474,29 +706,32 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors,
return proofSectors, nil
}
-func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) error {
+func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (*types.SignedMessage, error) {
ctx, span := trace.StartSpan(ctx, "storage.commitPost")
defer span.End()
+ var sm *types.SignedMessage
+
enc, aerr := actors.SerializeParams(proof)
if aerr != nil {
- return xerrors.Errorf("could not serialize submit post parameters: %w", aerr)
+ return nil, xerrors.Errorf("could not serialize submit window post parameters: %w", aerr)
}
msg := &types.Message{
To: s.actor,
From: s.worker,
- Method: builtin.MethodsMiner.SubmitWindowedPoSt,
+ Method: builtin0.MethodsMiner.SubmitWindowedPoSt,
Params: enc,
- Value: types.NewInt(1000), // currently hard-coded late fee in actor, returned if not late
+ Value: types.NewInt(0),
}
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
s.setSender(ctx, msg, spec)
// TODO: consider maybe caring about the output
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
+
if err != nil {
- return xerrors.Errorf("pushing message to mpool: %w", err)
+ return nil, xerrors.Errorf("pushing message to mpool: %w", err)
}
log.Infof("Submitted window post: %s", sm.Cid())
@@ -515,7 +750,7 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi
log.Errorf("Submitting window post %s failed: exit %d", sm.Cid(), rec.Receipt.ExitCode)
}()
- return nil
+ return sm, nil
}
func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) {
@@ -540,7 +775,7 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message,
pa, err := AddressFor(ctx, s.api, mi, PoStAddr, minFunds)
if err != nil {
- log.Errorw("error selecting address for post", "error", err)
+ log.Errorw("error selecting address for window post", "error", err)
msg.From = s.worker
return
}
diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go
new file mode 100644
index 000000000..09b9aee5c
--- /dev/null
+++ b/storage/wdpost_run_test.go
@@ -0,0 +1,349 @@
+package storage
+
+import (
+ "bytes"
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/go-state-types/network"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+ tutils "github.com/filecoin-project/specs-actors/support/testing"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type mockStorageMinerAPI struct {
+ partitions []api.Partition
+ pushedMessages chan *types.Message
+}
+
+func newMockStorageMinerAPI() *mockStorageMinerAPI {
+ return &mockStorageMinerAPI{
+ pushedMessages: make(chan *types.Message),
+ }
+}
+
+func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, a address.Address, key types.TipSetKey) (miner.MinerInfo, error) {
+ return miner.MinerInfo{
+ Worker: tutils.NewIDAddr(nil, 101),
+ Owner: tutils.NewIDAddr(nil, 101),
+ }, nil
+}
+
+func (m *mockStorageMinerAPI) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
+ return abi.Randomness("ticket rand"), nil
+}
+
+func (m *mockStorageMinerAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
+ return abi.Randomness("beacon rand"), nil
+}
+
+func (m *mockStorageMinerAPI) setPartitions(ps []api.Partition) {
+ m.partitions = append(m.partitions, ps...)
+}
+
+func (m *mockStorageMinerAPI) StateMinerPartitions(ctx context.Context, a address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) {
+ return m.partitions, nil
+}
+
+func (m *mockStorageMinerAPI) StateMinerSectors(ctx context.Context, address address.Address, snos *bitfield.BitField, key types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ var sis []*miner.SectorOnChainInfo
+ if snos == nil {
+ panic("unsupported")
+ }
+ _ = snos.ForEach(func(i uint64) error {
+ sis = append(sis, &miner.SectorOnChainInfo{
+ SectorNumber: abi.SectorNumber(i),
+ })
+ return nil
+ })
+ return sis, nil
+}
+
+func (m *mockStorageMinerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
+ m.pushedMessages <- message
+ return &types.SignedMessage{
+ Message: *message,
+ }, nil
+}
+
+func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) {
+ return &api.MsgLookup{
+ Receipt: types.MessageReceipt{
+ ExitCode: 0,
+ },
+ }, nil
+}
+
+type mockProver struct {
+}
+
+func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof0.SectorInfo, abi.PoStRandomness) ([]proof0.PoStProof, error) {
+ panic("implement me")
+}
+
+func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof0.SectorInfo, pr abi.PoStRandomness) ([]proof0.PoStProof, []abi.SectorID, error) {
+ return []proof0.PoStProof{
+ {
+ PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1,
+ ProofBytes: []byte("post-proof"),
+ },
+ }, nil, nil
+}
+
+type mockFaultTracker struct {
+}
+
+func (m mockFaultTracker) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
+ // Returns "bad" sectors so just return nil meaning all sectors are good
+ return nil, nil
+}
+
+// TestWDPostDoPost verifies that doPost will send the correct number of window
+// PoST messages for a given number of partitions
+func TestWDPostDoPost(t *testing.T) {
+ ctx := context.Background()
+ expectedMsgCount := 5
+
+ proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
+ postAct := tutils.NewIDAddr(t, 100)
+ workerAct := tutils.NewIDAddr(t, 101)
+
+ mockStgMinerAPI := newMockStorageMinerAPI()
+
+ // Get the number of sectors allowed in a partition for this proof type
+ sectorsPerPartition, err := builtin0.PoStProofWindowPoStPartitionSectors(proofType)
+ require.NoError(t, err)
+ // Work out the number of partitions that can be included in a message
+ // without exceeding the message sector limit
+
+ require.NoError(t, err)
+ partitionsPerMsg := int(miner0.AddressedSectorsMax / sectorsPerPartition)
+
+ // Enough partitions to fill expectedMsgCount-1 messages
+ partitionCount := (expectedMsgCount - 1) * partitionsPerMsg
+ // Add an extra partition that should be included in the last message
+ partitionCount++
+
+ var partitions []api.Partition
+ for p := 0; p < partitionCount; p++ {
+ sectors := bitfield.New()
+ for s := uint64(0); s < sectorsPerPartition; s++ {
+ sectors.Set(s)
+ }
+ partitions = append(partitions, api.Partition{
+ AllSectors: sectors,
+ FaultySectors: bitfield.New(),
+ RecoveringSectors: bitfield.New(),
+ LiveSectors: sectors,
+ ActiveSectors: sectors,
+ })
+ }
+ mockStgMinerAPI.setPartitions(partitions)
+
+ // Run window PoST
+ scheduler := &WindowPoStScheduler{
+ api: mockStgMinerAPI,
+ prover: &mockProver{},
+ faultTracker: &mockFaultTracker{},
+ proofType: proofType,
+ actor: postAct,
+ worker: workerAct,
+ }
+
+ di := &dline.Info{
+ WPoStPeriodDeadlines: miner0.WPoStPeriodDeadlines,
+ WPoStProvingPeriod: miner0.WPoStProvingPeriod,
+ WPoStChallengeWindow: miner0.WPoStChallengeWindow,
+ WPoStChallengeLookback: miner0.WPoStChallengeLookback,
+ FaultDeclarationCutoff: miner0.FaultDeclarationCutoff,
+ }
+ ts := mockTipSet(t)
+
+ scheduler.startGeneratePoST(ctx, ts, di, func(posts []miner.SubmitWindowedPoStParams, err error) {
+ scheduler.startSubmitPoST(ctx, ts, di, posts, func(err error) {})
+ })
+
+ // Read the window PoST messages
+ for i := 0; i < expectedMsgCount; i++ {
+ msg := <-mockStgMinerAPI.pushedMessages
+ require.Equal(t, builtin0.MethodsMiner.SubmitWindowedPoSt, msg.Method)
+ var params miner.SubmitWindowedPoStParams
+ err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
+ require.NoError(t, err)
+
+ if i == expectedMsgCount-1 {
+ // In the last message we only included a single partition (see above)
+ require.Len(t, params.Partitions, 1)
+ } else {
+ // All previous messages should include the full number of partitions
+ require.Len(t, params.Partitions, partitionsPerMsg)
+ }
+ }
+}
+
+func mockTipSet(t *testing.T) *types.TipSet {
+ minerAct := tutils.NewActorAddr(t, "miner")
+ c, err := cid.Decode("QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH")
+ require.NoError(t, err)
+ blks := []*types.BlockHeader{
+ {
+ Miner: minerAct,
+ Height: abi.ChainEpoch(1),
+ ParentStateRoot: c,
+ ParentMessageReceipts: c,
+ Messages: c,
+ },
+ }
+ ts, err := types.NewTipSet(blks)
+ require.NoError(t, err)
+ return ts
+}
+
+//
+// All the mock methods below here are unused
+//
+
+func (m *mockStorageMinerAPI) StateCall(ctx context.Context, message *types.Message, key types.TipSetKey) (*api.InvocResult, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok types.TipSetKey) ([]api.Deadline, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateSectorPreCommitInfo(ctx context.Context, address address.Address, number abi.SectorNumber, key types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateSectorGetInfo(ctx context.Context, address address.Address, number abi.SectorNumber, key types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) {
+ return &dline.Info{
+ CurrentEpoch: 0,
+ PeriodStart: 0,
+ Index: 0,
+ Open: 0,
+ Close: 0,
+ Challenge: 0,
+ FaultCutoff: 0,
+ WPoStPeriodDeadlines: miner0.WPoStPeriodDeadlines,
+ WPoStProvingPeriod: miner0.WPoStProvingPeriod,
+ WPoStChallengeWindow: miner0.WPoStChallengeWindow,
+ WPoStChallengeLookback: miner0.WPoStChallengeLookback,
+ FaultDeclarationCutoff: miner0.FaultDeclarationCutoff,
+ }, nil
+}
+
+func (m *mockStorageMinerAPI) StateMinerPreCommitDepositForPower(ctx context.Context, address address.Address, info miner.SectorPreCommitInfo, key types.TipSetKey) (types.BigInt, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateMinerInitialPledgeCollateral(ctx context.Context, address address.Address, info miner.SectorPreCommitInfo, key types.TipSetKey) (types.BigInt, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateSearchMsg(ctx context.Context, cid cid.Cid) (*api.MsgLookup, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
+ return &types.Actor{
+ Code: builtin0.StorageMinerActorCodeID,
+ }, nil
+}
+
+func (m *mockStorageMinerAPI) StateGetReceipt(ctx context.Context, cid cid.Cid, key types.TipSetKey) (*types.MessageReceipt, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateMarketStorageDeal(ctx context.Context, id abi.DealID, key types.TipSetKey) (*api.MarketDeal, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateMinerFaults(ctx context.Context, address address.Address, key types.TipSetKey) (bitfield.BitField, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateMinerRecoveries(ctx context.Context, address address.Address, key types.TipSetKey) (bitfield.BitField, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) StateAccountKey(ctx context.Context, address address.Address, key types.TipSetKey) (address.Address, error) {
+ return address, nil
+}
+
+func (m *mockStorageMinerAPI) GasEstimateMessageGas(ctx context.Context, message *types.Message, spec *api.MessageSendSpec, key types.TipSetKey) (*types.Message, error) {
+ msg := *message
+ msg.GasFeeCap = big.NewInt(1)
+ msg.GasPremium = big.NewInt(1)
+ msg.GasLimit = 2
+ return &msg, nil
+}
+
+func (m *mockStorageMinerAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) ChainGetBlockMessages(ctx context.Context, cid cid.Cid) (*api.BlockMessages, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) ChainReadObj(ctx context.Context, cid cid.Cid) ([]byte, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) ChainHasObj(ctx context.Context, cid cid.Cid) (bool, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
+ panic("implement me")
+}
+
+func (m *mockStorageMinerAPI) WalletSign(ctx context.Context, address address.Address, bytes []byte) (*crypto.Signature, error) {
+ return nil, nil
+}
+
+func (m *mockStorageMinerAPI) WalletBalance(ctx context.Context, address address.Address) (types.BigInt, error) {
+ return big.NewInt(333), nil
+}
+
+func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Address) (bool, error) {
+ return true, nil
+}
+
+var _ storageMinerApi = &mockStorageMinerAPI{}
diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go
index 2645b3702..ee380fbaf 100644
--- a/storage/wdpost_sched.go
+++ b/storage/wdpost_sched.go
@@ -7,8 +7,8 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/specs-actors/actors/abi"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
@@ -16,13 +16,12 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
+ "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/node/config"
"go.opencensus.io/trace"
)
-const StartConfidence = 4 // TODO: config
-
type WindowPoStScheduler struct {
api storageMinerApi
feeCfg config.MinerFeeConfig
@@ -30,18 +29,15 @@ type WindowPoStScheduler struct {
faultTracker sectorstorage.FaultTracker
proofType abi.RegisteredPoStProof
partitionSectors uint64
+ ch *changeHandler
actor address.Address
worker address.Address
- cur *types.TipSet
+ evtTypes [4]journal.EventType
- // if a post is in progress, this indicates for which ElectionPeriodStart
- activeDeadline *miner.DeadlineInfo
- abort context.CancelFunc
-
- //failed abi.ChainEpoch // eps
- //failLk sync.Mutex
+ // failed abi.ChainEpoch // eps
+ // failLk sync.Mutex
}
func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, actor address.Address, worker address.Address) (*WindowPoStScheduler, error) {
@@ -65,19 +61,26 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb
actor: actor,
worker: worker,
+ evtTypes: [...]journal.EventType{
+ evtTypeWdPoStScheduler: journal.J.RegisterEventType("wdpost", "scheduler"),
+ evtTypeWdPoStProofs: journal.J.RegisterEventType("wdpost", "proofs_processed"),
+ evtTypeWdPoStRecoveries: journal.J.RegisterEventType("wdpost", "recoveries_processed"),
+ evtTypeWdPoStFaults: journal.J.RegisterEventType("wdpost", "faults_processed"),
+ },
}, nil
}
-func deadlineEquals(a, b *miner.DeadlineInfo) bool {
- if a == nil || b == nil {
- return b == a
- }
-
- return a.PeriodStart == b.PeriodStart && a.Index == b.Index && a.Challenge == b.Challenge
+type changeHandlerAPIImpl struct {
+ storageMinerApi
+ *WindowPoStScheduler
}
func (s *WindowPoStScheduler) Run(ctx context.Context) {
- defer s.abortActivePoSt()
+ // Initialize change handler
+ chImpl := &changeHandlerAPIImpl{storageMinerApi: s.api, WindowPoStScheduler: s}
+ s.ch = newChangeHandler(chImpl, s.actor)
+ defer s.ch.shutdown()
+ s.ch.start()
var notifs <-chan []*api.HeadChange
var err error
@@ -100,7 +103,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) {
select {
case changes, ok := <-notifs:
if !ok {
- log.Warn("WindowPoStScheduler notifs channel closed")
+ log.Warn("window post scheduler notifs channel closed")
notifs = nil
continue
}
@@ -110,22 +113,24 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) {
log.Errorf("expected first notif to have len = 1")
continue
}
- if changes[0].Type != store.HCCurrent {
+ chg := changes[0]
+ if chg.Type != store.HCCurrent {
log.Errorf("expected first notif to tell current ts")
continue
}
- if err := s.update(ctx, changes[0].Val); err != nil {
- log.Errorf("%+v", err)
- }
+ ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.headChange")
+ s.update(ctx, nil, chg.Val)
+
+ span.End()
gotCur = true
continue
}
ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.headChange")
- var lowest, highest *types.TipSet = s.cur, nil
+ var lowest, highest *types.TipSet = nil, nil
for _, change := range changes {
if change.Val == nil {
@@ -139,12 +144,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) {
}
}
- if err := s.revert(ctx, lowest); err != nil {
- log.Error("handling head reverts in windowPost sched: %+v", err)
- }
- if err := s.update(ctx, highest); err != nil {
- log.Error("handling head updates in windowPost sched: %+v", err)
- }
+ s.update(ctx, lowest, highest)
span.End()
case <-ctx.Done():
@@ -153,76 +153,40 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) {
}
}
-func (s *WindowPoStScheduler) revert(ctx context.Context, newLowest *types.TipSet) error {
- if s.cur == newLowest {
- return nil
+func (s *WindowPoStScheduler) update(ctx context.Context, revert, apply *types.TipSet) {
+ if apply == nil {
+ log.Error("no new tipset in window post WindowPoStScheduler.update")
+ return
}
- s.cur = newLowest
-
- newDeadline, err := s.api.StateMinerProvingDeadline(ctx, s.actor, newLowest.Key())
+ err := s.ch.update(ctx, revert, apply)
if err != nil {
- return err
+ log.Errorf("handling head updates in window post sched: %+v", err)
}
-
- if !deadlineEquals(s.activeDeadline, newDeadline) {
- s.abortActivePoSt()
- }
-
- return nil
}
-func (s *WindowPoStScheduler) update(ctx context.Context, new *types.TipSet) error {
- if new == nil {
- return xerrors.Errorf("no new tipset in WindowPoStScheduler.update")
- }
-
- di, err := s.api.StateMinerProvingDeadline(ctx, s.actor, new.Key())
- if err != nil {
- return err
- }
-
- if deadlineEquals(s.activeDeadline, di) {
- return nil // already working on this deadline
- }
-
- if !di.PeriodStarted() {
- return nil // not proving anything yet
- }
-
- s.abortActivePoSt()
-
- // TODO: wait for di.Challenge here, will give us ~10min more to compute windowpost
- // (Need to get correct deadline above, which is tricky)
-
- if di.Open+StartConfidence >= new.Height() {
- log.Info("not starting windowPost yet, waiting for startconfidence", di.Open, di.Open+StartConfidence, new.Height())
- return nil
- }
-
- /*s.failLk.Lock()
- if s.failed > 0 {
- s.failed = 0
- s.activeEPS = 0
- }
- s.failLk.Unlock()*/
- log.Infof("at %d, doPost for P %d, dd %d", new.Height(), di.PeriodStart, di.Index)
-
- s.doPost(ctx, di, new)
-
- return nil
+// onAbort is called when generating proofs or submitting proofs is aborted
+func (s *WindowPoStScheduler) onAbort(ts *types.TipSet, deadline *dline.Info) {
+ journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
+ c := evtCommon{}
+ if ts != nil {
+ c.Deadline = deadline
+ c.Height = ts.Height()
+ c.TipSet = ts.Cids()
+ }
+ return WdPoStSchedulerEvt{
+ evtCommon: c,
+ State: SchedulerStateAborted,
+ }
+ })
}
-func (s *WindowPoStScheduler) abortActivePoSt() {
- if s.activeDeadline == nil {
- return // noop
+func (s *WindowPoStScheduler) getEvtCommon(err error) evtCommon {
+ c := evtCommon{Error: err}
+ currentTS, currentDeadline := s.ch.currentTSDI()
+ if currentTS != nil {
+ c.Deadline = currentDeadline
+ c.Height = currentTS.Height()
+ c.TipSet = currentTS.Cids()
}
-
- if s.abort != nil {
- s.abort()
- }
-
- log.Warnf("Aborting Window PoSt (Deadline: %+v)", s.activeDeadline)
-
- s.activeDeadline = nil
- s.abort = nil
+ return c
}
diff --git a/tools/dockers/docker-examples/README.md b/tools/dockers/docker-examples/README.md
index 28553653c..3b8c34480 100644
--- a/tools/dockers/docker-examples/README.md
+++ b/tools/dockers/docker-examples/README.md
@@ -11,7 +11,7 @@ In this `docker-examples/` directory are community-contributed Docker and Docker
- local node for a developer (`api-local-`)
- hosted endpoint for apps / multiple developers (`api-hosted-`)
- **For a local devnet or shared devnet**
- - basic local devnet (also see [lotus docs on setting up a local devnet](https://lotu.sh/en+setup-local-dev-net))
+ - basic local devnet (also see [lotus docs on setting up a local devnet](https://docs.filecoin.io/build/local-devnet/))
- shared devnet
diff --git a/tools/stats/collect.go b/tools/stats/collect.go
index 3d031a415..221dc37e2 100644
--- a/tools/stats/collect.go
+++ b/tools/stats/collect.go
@@ -4,8 +4,8 @@ import (
"context"
"time"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/specs-actors/actors/abi"
client "github.com/influxdata/influxdb1-client/v2"
)
diff --git a/tools/stats/metrics.go b/tools/stats/metrics.go
index 39fecf47b..aee61b2aa 100644
--- a/tools/stats/metrics.go
+++ b/tools/stats/metrics.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "math"
"math/big"
"strings"
"time"
@@ -12,14 +13,13 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+ "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/power"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
- "golang.org/x/xerrors"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
+ "golang.org/x/xerrors"
cbg "github.com/whyrusleeping/cbor-gen"
@@ -131,12 +131,6 @@ func RecordTipsetPoints(ctx context.Context, api api.FullNode, pl *PointList, ti
p = NewPoint("chain.blocktime", tsTime.Unix())
pl.AddPoint(p)
- baseFeeBig := tipset.Blocks()[0].ParentBaseFee.Copy()
- baseFeeRat := new(big.Rat).SetFrac(baseFeeBig.Int, new(big.Int).SetUint64(build.FilecoinPrecision))
- baseFeeFloat, _ := baseFeeRat.Float64()
- p = NewPoint("chain.basefee", baseFeeFloat)
- pl.AddPoint(p)
-
totalGasLimit := int64(0)
totalUniqGasLimit := int64(0)
seen := make(map[cid.Cid]struct{})
@@ -178,19 +172,51 @@ func RecordTipsetPoints(ctx context.Context, api api.FullNode, pl *PointList, ti
p = NewPoint("chain.gas_limit_uniq_total", totalUniqGasLimit)
pl.AddPoint(p)
+ {
+ baseFeeIn := tipset.Blocks()[0].ParentBaseFee
+ newBaseFee := store.ComputeNextBaseFee(baseFeeIn, totalUniqGasLimit, len(tipset.Blocks()), tipset.Height())
+
+ baseFeeRat := new(big.Rat).SetFrac(newBaseFee.Int, new(big.Int).SetUint64(build.FilecoinPrecision))
+ baseFeeFloat, _ := baseFeeRat.Float64()
+ p = NewPoint("chain.basefee", baseFeeFloat)
+ pl.AddPoint(p)
+
+ baseFeeChange := new(big.Rat).SetFrac(newBaseFee.Int, baseFeeIn.Int)
+ baseFeeChangeF, _ := baseFeeChange.Float64()
+ p = NewPoint("chain.basefee_change_log", math.Log(baseFeeChangeF)/math.Log(1.125))
+ pl.AddPoint(p)
+ }
+ {
+ blks := int64(len(cids))
+ p = NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget))
+ pl.AddPoint(p)
+ p = NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget))
+ pl.AddPoint(p)
+ p = NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget))
+ pl.AddPoint(p)
+ }
+
return nil
}
-type apiIpldStore struct {
+type ApiIpldStore struct {
ctx context.Context
- api api.FullNode
+ api apiIpldStoreApi
}
-func (ht *apiIpldStore) Context() context.Context {
+type apiIpldStoreApi interface {
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error)
+}
+
+func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi) *ApiIpldStore {
+ return &ApiIpldStore{ctx, api}
+}
+
+func (ht *ApiIpldStore) Context() context.Context {
return ht.ctx
}
-func (ht *apiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
+func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
raw, err := ht.api.ChainReadObj(ctx, c)
if err != nil {
return err
@@ -207,8 +233,8 @@ func (ht *apiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) err
return fmt.Errorf("Object does not implement CBORUnmarshaler")
}
-func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
- return cid.Undef, fmt.Errorf("Put is not implemented on apiIpldStore")
+func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
+ return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore")
}
func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointList, tipset *types.TipSet) error {
@@ -225,7 +251,7 @@ func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointLis
//p := NewPoint("chain.pledge_collateral", pcFilFloat)
//pl.AddPoint(p)
- netBal, err := api.WalletBalance(ctx, builtin.RewardActorAddr)
+ netBal, err := api.WalletBalance(ctx, reward.Address)
if err != nil {
return err
}
@@ -243,47 +269,22 @@ func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointLis
p = NewPoint("chain.power", totalPower.TotalPower.QualityAdjPower.Int64())
pl.AddPoint(p)
- powerActor, err := api.StateGetActor(ctx, builtin.StoragePowerActorAddr, tipset.Key())
+ miners, err := api.StateListMiners(ctx, tipset.Key())
if err != nil {
return err
}
- powerRaw, err := api.ChainReadObj(ctx, powerActor.Head)
- if err != nil {
- return err
- }
-
- var powerActorState power.State
-
- if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerRaw)); err != nil {
- return fmt.Errorf("failed to unmarshal power actor state: %w", err)
- }
-
- s := &apiIpldStore{ctx, api}
- mp, err := adt.AsMap(s, powerActorState.Claims)
- if err != nil {
- return err
- }
-
- var claim power.Claim
- err = mp.ForEach(&claim, func(key string) error {
- addr, err := address.NewFromBytes([]byte(key))
+ for _, addr := range miners {
+ mp, err := api.StateMinerPower(ctx, addr, tipset.Key())
if err != nil {
return err
}
- if claim.QualityAdjPower.Int64() == 0 {
- return nil
+ if !mp.MinerPower.QualityAdjPower.IsZero() {
+ p = NewPoint("chain.miner_power", mp.MinerPower.QualityAdjPower.Int64())
+ p.AddTag("miner", addr.String())
+ pl.AddPoint(p)
}
-
- p = NewPoint("chain.miner_power", claim.QualityAdjPower.Int64())
- p.AddTag("miner", addr.String())
- pl.AddPoint(p)
-
- return nil
- })
- if err != nil {
- return err
}
return nil
diff --git a/tools/stats/rpc.go b/tools/stats/rpc.go
index 166769fed..b01c07a35 100644
--- a/tools/stats/rpc.go
+++ b/tools/stats/rpc.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/filecoin-project/go-jsonrpc"
- "github.com/filecoin-project/specs-actors/actors/abi"
+ "github.com/filecoin-project/go-state-types/abi"
manet "github.com/multiformats/go-multiaddr/net"
"golang.org/x/xerrors"
@@ -14,7 +14,6 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/repo"
@@ -72,7 +71,7 @@ sync_complete:
"target_height", w.Target.Height(),
"height", w.Height,
"error", w.Message,
- "stage", chain.SyncStageString(w.Stage),
+ "stage", w.Stage.String(),
)
} else {
log.Infow(
@@ -82,7 +81,7 @@ sync_complete:
"target", w.Target.Key(),
"target_height", w.Target.Height(),
"height", w.Height,
- "stage", chain.SyncStageString(w.Stage),
+ "stage", w.Stage.String(),
)
}